Merge
This commit is contained in:
commit
9573ef8bda
1
.hgtags
1
.hgtags
@ -657,3 +657,4 @@ b0817631d2f4395508cb10e81c3858a94d9ae4de jdk-15+34
|
||||
fd60c3146a024037cdd9be34c645bb793995a7cc jdk-15+35
|
||||
c075a286cc7df767cce28e8057d6ec5051786490 jdk-16+9
|
||||
b01985b4f88f554f97901e53e1ba314681dd9c19 jdk-16+10
|
||||
5c18d696c7ce724ca36df13933aa53f50e12b9e0 jdk-16+11
|
||||
|
@ -116,8 +116,11 @@ endif
|
||||
ifneq ($(call check-jvm-feature, cds), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_CDS=0
|
||||
JVM_EXCLUDE_FILES += \
|
||||
archiveBuilder.cpp \
|
||||
archiveUtils.cpp \
|
||||
classListParser.cpp \
|
||||
classLoaderExt.cpp \
|
||||
dumpAllocStats.cpp \
|
||||
dynamicArchive.cpp \
|
||||
filemap.cpp \
|
||||
heapShared.cpp \
|
||||
@ -126,8 +129,7 @@ ifneq ($(call check-jvm-feature, cds), true)
|
||||
metaspaceShared_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
|
||||
sharedClassUtil.cpp \
|
||||
sharedPathsMiscInfo.cpp \
|
||||
systemDictionaryShared.cpp \
|
||||
#
|
||||
systemDictionaryShared.cpp
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, nmt), true)
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Configure cpptools IntelliSense
|
||||
"C_Cpp.intelliSenseCachePath": "{{OUTPUTDIR}}/.vscode",
|
||||
"C_Cpp.default.compileCommands": "{{OUTPUTDIR}}/compile_commands.json",
|
||||
"C_Cpp.default.cppStandard": "c++03",
|
||||
"C_Cpp.default.cppStandard": "c++14",
|
||||
"C_Cpp.default.compilerPath": "{{COMPILER}}",
|
||||
|
||||
// Configure ccls
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Configure cpptools IntelliSense
|
||||
"C_Cpp.intelliSenseCachePath": "{{OUTPUTDIR}}/.vscode",
|
||||
"C_Cpp.default.compileCommands": "{{OUTPUTDIR}}/compile_commands.json",
|
||||
"C_Cpp.default.cppStandard": "c++03",
|
||||
"C_Cpp.default.cppStandard": "c++14",
|
||||
"C_Cpp.default.compilerPath": "{{COMPILER}}",
|
||||
|
||||
// Configure clangd
|
||||
|
@ -1,5 +1,5 @@
|
||||
// Configure cpptools IntelliSense
|
||||
"C_Cpp.intelliSenseCachePath": "{{OUTPUTDIR}}/.vscode",
|
||||
"C_Cpp.default.compileCommands": "{{OUTPUTDIR}}/compile_commands.json",
|
||||
"C_Cpp.default.cppStandard": "c++03",
|
||||
"C_Cpp.default.cppStandard": "c++14",
|
||||
"C_Cpp.default.compilerPath": "{{COMPILER}}",
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Configure cpptools IntelliSense
|
||||
"C_Cpp.intelliSenseCachePath": "{{OUTPUTDIR}}/.vscode",
|
||||
"C_Cpp.default.compileCommands": "{{OUTPUTDIR}}/compile_commands.json",
|
||||
"C_Cpp.default.cppStandard": "c++03",
|
||||
"C_Cpp.default.cppStandard": "c++14",
|
||||
"C_Cpp.default.compilerPath": "{{COMPILER}}",
|
||||
|
||||
// Configure RTags
|
||||
|
@ -157,7 +157,6 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJIMAGE, \
|
||||
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
|
||||
$(call SET_SHARED_LIBRARY_ORIGIN), \
|
||||
LIBS_unix := -ljvm -ldl $(LIBCXX), \
|
||||
LIBS_macosx := -lc++, \
|
||||
LIBS_windows := jvm.lib, \
|
||||
))
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -136,7 +136,20 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
// interpreter_frame_sender_sp interpreter_frame_sender_sp is
|
||||
// the original sp of the caller (the unextended_sp) and
|
||||
// sender_sp is fp+8/16 (32bit/64bit) XXX
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
//
|
||||
// The interpreted method entry on AArch64 aligns SP to 16 bytes
|
||||
// before generating the fixed part of the activation frame. So there
|
||||
// may be a gap between the locals block and the saved sender SP. For
|
||||
// an interpreted caller we need to recreate this gap and exactly
|
||||
// align the incoming parameters with the caller's temporary
|
||||
// expression stack. For other types of caller frame it doesn't
|
||||
// matter.
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
locals = caller->interpreter_frame_last_sp() + caller_actual_parameters - 1;
|
||||
} else {
|
||||
locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller->is_interpreted_frame()) {
|
||||
|
@ -291,7 +291,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
|
||||
}
|
||||
|
||||
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
|
||||
|
||||
if (is_power_of_2(c - 1)) {
|
||||
__ shift_left(left, exact_log2(c - 1), tmp);
|
||||
|
@ -61,6 +61,7 @@
|
||||
// [last sp ]
|
||||
// [oop temp ] (only for native calls)
|
||||
|
||||
// [padding ] (to preserve machine SP alignment)
|
||||
// [locals and parameters ]
|
||||
// <- sender sp
|
||||
// ------------------------------ Asm interpreter ----------------------------------------
|
||||
|
@ -1580,6 +1580,9 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// Make room for locals
|
||||
__ sub(rscratch1, esp, r3, ext::uxtx, 3);
|
||||
|
||||
// Padding between locals and fixed part of activation frame to ensure
|
||||
// SP is always 16-byte aligned.
|
||||
__ andr(sp, rscratch1, -16);
|
||||
|
||||
// r3 - # of additional locals
|
||||
|
@ -326,7 +326,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
|
||||
}
|
||||
|
||||
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
|
||||
assert(left != result, "should be different registers");
|
||||
if (is_power_of_2(c + 1)) {
|
||||
LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1);
|
||||
|
@ -290,7 +290,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
|
||||
}
|
||||
|
||||
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
|
||||
assert(left != result, "should be different registers");
|
||||
if (is_power_of_2(c + 1)) {
|
||||
__ shift_left(left, log2_int(c + 1), result);
|
||||
|
@ -224,7 +224,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
|
||||
__ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
|
||||
}
|
||||
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
|
||||
if (tmp->is_valid()) {
|
||||
if (is_power_of_2(c + 1)) {
|
||||
__ move(left, tmp);
|
||||
|
@ -921,7 +921,17 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
|
||||
|
||||
int status = pthread_attr_setstacksize(&attr, stack_size);
|
||||
assert_status(status == 0, status, "pthread_attr_setstacksize");
|
||||
if (status != 0) {
|
||||
// pthread_attr_setstacksize() function can fail
|
||||
// if the stack size exceeds a system-imposed limit.
|
||||
assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
|
||||
log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
|
||||
(thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
|
||||
stack_size / K);
|
||||
thread->set_osthread(NULL);
|
||||
delete osthread;
|
||||
return false;
|
||||
}
|
||||
|
||||
ThreadState state;
|
||||
|
||||
@ -3716,6 +3726,10 @@ bool os::pd_release_memory(char* addr, size_t size) {
|
||||
return anon_munmap(addr, size);
|
||||
}
|
||||
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
extern char* g_assert_poison; // assertion poison page address
|
||||
#endif
|
||||
|
||||
static bool linux_mprotect(char* addr, size_t size, int prot) {
|
||||
// Linux wants the mprotect address argument to be page aligned.
|
||||
char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
|
||||
@ -3728,6 +3742,11 @@ static bool linux_mprotect(char* addr, size_t size, int prot) {
|
||||
assert(addr == bottom, "sanity check");
|
||||
|
||||
size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
|
||||
// Don't log anything if we're executing in the poison page signal handling
|
||||
// context. It can lead to reentrant use of other parts of the VM code.
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
if (addr != g_assert_poison)
|
||||
#endif
|
||||
Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
|
||||
return ::mprotect(bottom, size, prot) == 0;
|
||||
}
|
||||
|
@ -4542,6 +4542,18 @@ bool Interval::has_hole_between(int hole_from, int hole_to) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if there is an intersection with any of the split children of 'interval'
|
||||
bool Interval::intersects_any_children_of(Interval* interval) const {
|
||||
if (interval->_split_children != NULL) {
|
||||
for (int i = 0; i < interval->_split_children->length(); i++) {
|
||||
if (intersects(interval->_split_children->at(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Interval::print(outputStream* out) const {
|
||||
@ -5722,6 +5734,13 @@ void LinearScanWalker::combine_spilled_intervals(Interval* cur) {
|
||||
return;
|
||||
}
|
||||
assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled");
|
||||
assert(!cur->intersects(register_hint), "cur should not intersect register_hint");
|
||||
|
||||
if (cur->intersects_any_children_of(register_hint)) {
|
||||
// Bail out if cur intersects any split children of register_hint, which have the same spill slot as their parent. An overlap of two intervals with
|
||||
// the same spill slot could result in a situation where both intervals are spilled at the same time to the same stack location which is not correct.
|
||||
return;
|
||||
}
|
||||
|
||||
// modify intervals such that cur gets the same stack slot as register_hint
|
||||
// delete use positions to prevent the intervals to get a register at beginning
|
||||
|
@ -613,6 +613,7 @@ class Interval : public CompilationResourceObj {
|
||||
bool covers(int op_id, LIR_OpVisitState::OprMode mode) const;
|
||||
bool has_hole_between(int from, int to);
|
||||
bool intersects(Interval* i) const { return _first->intersects(i->_first); }
|
||||
bool intersects_any_children_of(Interval* i) const;
|
||||
int intersects_at(Interval* i) const { return _first->intersects_at(i->_first); }
|
||||
|
||||
// range iteration
|
||||
|
@ -513,13 +513,6 @@ Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, boo
|
||||
}
|
||||
|
||||
assert((sym == NULL) || sym->refcount() != 0, "found dead symbol");
|
||||
#if INCLUDE_CDS
|
||||
if (DumpSharedSpaces) {
|
||||
if (sym != NULL) {
|
||||
MetaspaceShared::add_symbol(sym);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return sym;
|
||||
}
|
||||
|
||||
|
@ -599,8 +599,8 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
// ArchiveCompactor::allocate() has reserved a pointer immediately before
|
||||
// archived InstanceKlasses. We can use this slot to do a quick
|
||||
// ArchiveBuilder::make_shallow_copy() has reserved a pointer immediately
|
||||
// before archived InstanceKlasses. We can use this slot to do a quick
|
||||
// lookup of InstanceKlass* -> RunTimeSharedClassInfo* without
|
||||
// building a new hashtable.
|
||||
//
|
||||
@ -1345,7 +1345,7 @@ bool SystemDictionaryShared::should_be_excluded(InstanceKlass* k) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// k is a class before relocating by ArchiveCompactor
|
||||
// k is a class before relocating by ArchiveBuilder
|
||||
void SystemDictionaryShared::validate_before_archiving(InstanceKlass* k) {
|
||||
ResourceMark rm;
|
||||
const char* name = k->name()->as_C_string();
|
||||
|
@ -119,6 +119,10 @@ public:
|
||||
// No GC threads
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const {}
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers
|
||||
// No workGang for EpsilonHeap, work serially with thread 0
|
||||
virtual void run_task(AbstractGangTask* task) { task->work(0); }
|
||||
|
||||
// No nmethod handling
|
||||
virtual void register_nmethod(nmethod* nm) {}
|
||||
virtual void unregister_nmethod(nmethod* nm) {}
|
||||
|
@ -89,6 +89,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
@ -161,9 +162,13 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
|
||||
reset_from_card_cache(start_idx, num_regions);
|
||||
}
|
||||
|
||||
Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
|
||||
Ticks start = Ticks::now();
|
||||
void G1CollectedHeap::run_task(AbstractGangTask* task) {
|
||||
workers()->run_task(task, workers()->active_workers());
|
||||
}
|
||||
|
||||
Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
|
||||
Ticks start = Ticks::now();
|
||||
run_task(task);
|
||||
return Ticks::now() - start;
|
||||
}
|
||||
|
||||
@ -2301,6 +2306,30 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
class G1ParallelObjectIterator : public ParallelObjectIterator {
|
||||
private:
|
||||
G1CollectedHeap* _heap;
|
||||
HeapRegionClaimer _claimer;
|
||||
|
||||
public:
|
||||
G1ParallelObjectIterator(uint thread_num) :
|
||||
_heap(G1CollectedHeap::heap()),
|
||||
_claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
|
||||
|
||||
virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
|
||||
_heap->object_iterate_parallel(cl, worker_id, &_claimer);
|
||||
}
|
||||
};
|
||||
|
||||
ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
|
||||
return new G1ParallelObjectIterator(thread_num);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
|
||||
IterateObjectClosureRegionClosure blk(cl);
|
||||
heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::keep_alive(oop obj) {
|
||||
G1BarrierSet::enqueue(obj);
|
||||
}
|
||||
@ -3694,7 +3723,7 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
|
||||
|
||||
{
|
||||
G1PrepareEvacuationTask g1_prep_task(this);
|
||||
Tickspan task_time = run_task(&g1_prep_task);
|
||||
Tickspan task_time = run_task_timed(&g1_prep_task);
|
||||
|
||||
phase_times()->record_register_regions(task_time.seconds() * 1000.0,
|
||||
g1_prep_task.humongous_total(),
|
||||
@ -3843,7 +3872,7 @@ void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* p
|
||||
{
|
||||
G1RootProcessor root_processor(this, num_workers);
|
||||
G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
|
||||
task_time = run_task(&g1_par_task);
|
||||
task_time = run_task_timed(&g1_par_task);
|
||||
// Closing the inner scope will execute the destructor for the G1RootProcessor object.
|
||||
// To extract its code root fixup time we measure total time of this scope and
|
||||
// subtract from the time the WorkGang task took.
|
||||
@ -3882,7 +3911,7 @@ void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* pe
|
||||
{
|
||||
G1MarkScope code_mark_scope;
|
||||
G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
|
||||
task_time = run_task(&task);
|
||||
task_time = run_task_timed(&task);
|
||||
// See comment in evacuate_collection_set() for the reason of the scope.
|
||||
}
|
||||
Tickspan total_processing = Ticks::now() - start_processing;
|
||||
|
@ -551,9 +551,12 @@ public:
|
||||
|
||||
WorkGang* workers() const { return _workers; }
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers, returning the
|
||||
// total time taken.
|
||||
Tickspan run_task(AbstractGangTask* task);
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers,
|
||||
// returning the total time taken.
|
||||
Tickspan run_task_timed(AbstractGangTask* task);
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
@ -1173,9 +1176,13 @@ public:
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj);
|
||||
|
||||
|
@ -155,11 +155,131 @@ void G1ParScanThreadState::verify_task(ScannerTask task) const {
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
// Reference should not be NULL here as such are never pushed to the task queue.
|
||||
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times, and so we might get references into old gen here.
|
||||
// So we need to redo this check.
|
||||
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
|
||||
// References pushed onto the work stack should never point to a humongous region
|
||||
// as they are not added to the collection set due to above precondition.
|
||||
assert(!region_attr.is_humongous(),
|
||||
"Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
|
||||
p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
|
||||
|
||||
if (!region_attr.is_in_cset()) {
|
||||
// In this case somebody else already did all the work.
|
||||
return;
|
||||
}
|
||||
|
||||
markWord m = obj->mark_raw();
|
||||
if (m.is_marked()) {
|
||||
obj = (oop) m.decode_pointer();
|
||||
} else {
|
||||
obj = do_copy_to_survivor_space(region_attr, obj, m);
|
||||
}
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, obj);
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
if (HeapRegion::is_in_same_region(p, obj)) {
|
||||
return;
|
||||
}
|
||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
||||
if (!from->is_young()) {
|
||||
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
|
||||
oop from_obj = task.to_source_array();
|
||||
|
||||
assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
"invariant, next index: %d, length: %d", next_index, length);
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
|
||||
HeapRegion* hr = _g1h->heap_region_containing(to_obj);
|
||||
G1ScanInYoungSetter x(&_scanner, hr->is_young());
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::dispatch_task(ScannerTask task) {
|
||||
verify_task(task);
|
||||
if (task.is_narrow_oop_ptr()) {
|
||||
do_oop_evac(task.to_narrow_oop_ptr());
|
||||
} else if (task.is_oop_ptr()) {
|
||||
do_oop_evac(task.to_oop_ptr());
|
||||
} else {
|
||||
do_partial_array(task.to_partial_array_task());
|
||||
}
|
||||
}
|
||||
|
||||
// Process tasks until overflow queue is empty and local queue
|
||||
// contains no more than threshold entries. NOINLINE to prevent
|
||||
// inlining into steal_and_trim_queue.
|
||||
ATTRIBUTE_FLATTEN NOINLINE
|
||||
void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
|
||||
ScannerTask task;
|
||||
do {
|
||||
// Fully drain the queue.
|
||||
trim_queue_to_threshold(0);
|
||||
} while (!_task_queue->is_empty());
|
||||
while (_task_queue->pop_overflow(task)) {
|
||||
if (!_task_queue->try_push_to_taskqueue(task)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
}
|
||||
while (_task_queue->pop_local(task, threshold)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
} while (!_task_queue->overflow_empty());
|
||||
}
|
||||
|
||||
ATTRIBUTE_FLATTEN
|
||||
void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
|
||||
ScannerTask stolen_task;
|
||||
while (task_queues->steal(_worker_id, stolen_task)) {
|
||||
dispatch_task(stolen_task);
|
||||
// Processing stolen task may have added tasks to our queue.
|
||||
trim_queue();
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
@ -227,18 +347,57 @@ void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_at
|
||||
}
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
|
||||
oop const old,
|
||||
markWord const old_mark) {
|
||||
NOINLINE
|
||||
HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
|
||||
oop old,
|
||||
size_t word_sz,
|
||||
uint age,
|
||||
uint node_index) {
|
||||
HeapWord* obj_ptr = NULL;
|
||||
// Try slow-path allocation unless we're allocating old and old is already full.
|
||||
if (!(dest_attr->is_old() && _old_gen_is_full)) {
|
||||
bool plab_refill_failed = false;
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
|
||||
word_sz,
|
||||
&plab_refill_failed,
|
||||
node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = allocate_in_next_plab(dest_attr,
|
||||
word_sz,
|
||||
plab_refill_failed,
|
||||
node_index);
|
||||
}
|
||||
}
|
||||
if (obj_ptr != NULL) {
|
||||
update_numa_stats(node_index);
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
|
||||
}
|
||||
}
|
||||
return obj_ptr;
|
||||
}
|
||||
|
||||
NOINLINE
|
||||
void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
|
||||
HeapWord* obj_ptr,
|
||||
size_t word_sz,
|
||||
uint node_index) {
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
|
||||
}
|
||||
|
||||
// Private inline function, for direct internal use and providing the
|
||||
// implementation of the public not-inline function.
|
||||
oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
|
||||
oop const old,
|
||||
markWord const old_mark) {
|
||||
assert(region_attr.is_in_cset(),
|
||||
"Unexpected region attr type: %s", region_attr.get_type_str());
|
||||
|
||||
const size_t word_sz = old->size();
|
||||
|
||||
uint age = 0;
|
||||
G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
|
||||
// The second clause is to prevent premature evacuation failure in case there
|
||||
// is still space in survivor, but old gen is full.
|
||||
if (_old_gen_is_full && dest_attr.is_old()) {
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing(old);
|
||||
uint node_index = from_region->node_index();
|
||||
|
||||
@ -247,22 +406,11 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
bool plab_refill_failed = false;
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
|
||||
obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
|
||||
obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
}
|
||||
update_numa_stats(node_index);
|
||||
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,7 +422,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
// Doing this after all the allocation attempts also tests the
|
||||
// undo_allocation() method too.
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
|
||||
undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
@ -287,10 +435,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
|
||||
|
||||
const uint young_index = from_region->young_index_in_cset();
|
||||
|
||||
assert((from_region->is_young() && young_index > 0) ||
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
{
|
||||
const uint young_index = from_region->young_index_in_cset();
|
||||
assert((from_region->is_young() && young_index > 0) ||
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
_surviving_young_words[young_index] += word_sz;
|
||||
}
|
||||
|
||||
if (dest_attr.is_young()) {
|
||||
if (age < markWord::max_age) {
|
||||
@ -324,8 +474,6 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
obj);
|
||||
}
|
||||
|
||||
_surviving_young_words[young_index] += word_sz;
|
||||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
// We keep track of the next start index in the length field of
|
||||
// the to-space object. The actual length can be found in the
|
||||
@ -343,6 +491,14 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
}
|
||||
}
|
||||
|
||||
// Public not-inline entry point.
|
||||
ATTRIBUTE_FLATTEN
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
|
||||
oop old,
|
||||
markWord old_mark) {
|
||||
return do_copy_to_survivor_space(region_attr, old, old_mark);
|
||||
}
|
||||
|
||||
G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
|
||||
assert(worker_id < _n_workers, "out of bounds access");
|
||||
if (_states[worker_id] == NULL) {
|
||||
@ -398,6 +554,7 @@ void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE
|
||||
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
|
||||
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
|
||||
|
||||
@ -428,6 +585,33 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::initialize_numa_stats() {
|
||||
if (_numa->is_enabled()) {
|
||||
LogTarget(Info, gc, heap, numa) lt;
|
||||
|
||||
if (lt.is_enabled()) {
|
||||
uint num_nodes = _numa->num_active_nodes();
|
||||
// Record only if there are multiple active nodes.
|
||||
_obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
|
||||
memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::flush_numa_stats() {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
uint node_index = _numa->index_of_current_thread();
|
||||
_numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::update_numa_stats(uint node_index) {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
_obj_alloc_stat[node_index]++;
|
||||
}
|
||||
}
|
||||
|
||||
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
|
||||
G1RedirtyCardsQueueSet* rdcqs,
|
||||
uint n_workers,
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1RedirtyCardsQueue.hpp"
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/shared/ageTable.hpp"
|
||||
@ -159,6 +158,21 @@ public:
|
||||
private:
|
||||
inline void do_partial_array(PartialArrayScanTask task);
|
||||
|
||||
HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
|
||||
oop old,
|
||||
size_t word_sz,
|
||||
uint age,
|
||||
uint node_index);
|
||||
|
||||
void undo_allocation(G1HeapRegionAttr dest_addr,
|
||||
HeapWord* obj_ptr,
|
||||
size_t word_sz,
|
||||
uint node_index);
|
||||
|
||||
inline oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
|
||||
oop obj,
|
||||
markWord old_mark);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> inline void do_oop_evac(T* p);
|
||||
|
||||
@ -181,27 +195,25 @@ private:
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr, uint node_index) const;
|
||||
|
||||
inline bool needs_partial_trimming() const;
|
||||
inline bool is_partially_trimmed() const;
|
||||
void trim_queue_to_threshold(uint threshold);
|
||||
|
||||
inline void trim_queue_to_threshold(uint threshold);
|
||||
inline bool needs_partial_trimming() const;
|
||||
|
||||
// NUMA statistics related methods.
|
||||
inline void initialize_numa_stats();
|
||||
inline void flush_numa_stats();
|
||||
void initialize_numa_stats();
|
||||
void flush_numa_stats();
|
||||
inline void update_numa_stats(uint node_index);
|
||||
|
||||
public:
|
||||
oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);
|
||||
oop copy_to_survivor_space(G1HeapRegionAttr region_attr, oop obj, markWord old_mark);
|
||||
|
||||
void trim_queue();
|
||||
void trim_queue_partially();
|
||||
inline void trim_queue();
|
||||
inline void trim_queue_partially();
|
||||
void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
|
||||
|
||||
Tickspan trim_ticks() const;
|
||||
void reset_trim_ticks();
|
||||
|
||||
inline void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
|
||||
|
||||
// An attempt to evacuate "obj" has failed; take necessary steps.
|
||||
oop handle_evacuation_failure_par(oop obj, markWord m);
|
||||
|
||||
|
@ -32,158 +32,34 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
// Reference should not be NULL here as such are never pushed to the task queue.
|
||||
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times, and so we might get references into old gen here.
|
||||
// So we need to redo this check.
|
||||
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
|
||||
// References pushed onto the work stack should never point to a humongous region
|
||||
// as they are not added to the collection set due to above precondition.
|
||||
assert(!region_attr.is_humongous(),
|
||||
"Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
|
||||
p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
|
||||
|
||||
if (!region_attr.is_in_cset()) {
|
||||
// In this case somebody else already did all the work.
|
||||
return;
|
||||
}
|
||||
|
||||
markWord m = obj->mark_raw();
|
||||
if (m.is_marked()) {
|
||||
obj = (oop) m.decode_pointer();
|
||||
} else {
|
||||
obj = copy_to_survivor_space(region_attr, obj, m);
|
||||
}
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, obj);
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
if (HeapRegion::is_in_same_region(p, obj)) {
|
||||
return;
|
||||
}
|
||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
||||
if (!from->is_young()) {
|
||||
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::push_on_queue(ScannerTask task) {
|
||||
verify_task(task);
|
||||
_task_queue->push(task);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
|
||||
oop from_obj = task.to_source_array();
|
||||
|
||||
assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
"invariant, next index: %d, length: %d", next_index, length);
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
|
||||
HeapRegion* hr = _g1h->heap_region_containing(to_obj);
|
||||
G1ScanInYoungSetter x(&_scanner, hr->is_young());
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::dispatch_task(ScannerTask task) {
|
||||
verify_task(task);
|
||||
if (task.is_narrow_oop_ptr()) {
|
||||
do_oop_evac(task.to_narrow_oop_ptr());
|
||||
} else if (task.is_oop_ptr()) {
|
||||
do_oop_evac(task.to_oop_ptr());
|
||||
} else {
|
||||
do_partial_array(task.to_partial_array_task());
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues) {
|
||||
ScannerTask stolen_task;
|
||||
while (task_queues->steal(_worker_id, stolen_task)) {
|
||||
dispatch_task(stolen_task);
|
||||
|
||||
// We've just processed a task and we might have made
|
||||
// available new entries on the queues. So we have to make sure
|
||||
// we drain the queues as necessary.
|
||||
trim_queue();
|
||||
}
|
||||
}
|
||||
|
||||
inline bool G1ParScanThreadState::needs_partial_trimming() const {
|
||||
bool G1ParScanThreadState::needs_partial_trimming() const {
|
||||
return !_task_queue->overflow_empty() ||
|
||||
(_task_queue->size() > _stack_trim_upper_threshold);
|
||||
}
|
||||
|
||||
inline bool G1ParScanThreadState::is_partially_trimmed() const {
|
||||
return _task_queue->overflow_empty() &&
|
||||
(_task_queue->size() <= _stack_trim_lower_threshold);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
|
||||
ScannerTask task;
|
||||
// Drain the overflow stack first, so other threads can potentially steal.
|
||||
while (_task_queue->pop_overflow(task)) {
|
||||
if (!_task_queue->try_push_to_taskqueue(task)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
while (_task_queue->pop_local(task, threshold)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::trim_queue_partially() {
|
||||
void G1ParScanThreadState::trim_queue_partially() {
|
||||
if (!needs_partial_trimming()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Ticks start = Ticks::now();
|
||||
do {
|
||||
trim_queue_to_threshold(_stack_trim_lower_threshold);
|
||||
} while (!is_partially_trimmed());
|
||||
trim_queue_to_threshold(_stack_trim_lower_threshold);
|
||||
assert(_task_queue->overflow_empty(), "invariant");
|
||||
assert(_task_queue->size() <= _stack_trim_lower_threshold, "invariant");
|
||||
_trim_ticks += Ticks::now() - start;
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
trim_queue_to_threshold(0);
|
||||
assert(_task_queue->overflow_empty(), "invariant");
|
||||
assert(_task_queue->taskqueue_empty(), "invariant");
|
||||
}
|
||||
|
||||
inline Tickspan G1ParScanThreadState::trim_ticks() const {
|
||||
return _trim_ticks;
|
||||
}
|
||||
@ -218,30 +94,4 @@ G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const Heap
|
||||
return &_oops_into_optional_regions[hr->index_in_opt_cset()];
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::initialize_numa_stats() {
|
||||
if (_numa->is_enabled()) {
|
||||
LogTarget(Info, gc, heap, numa) lt;
|
||||
|
||||
if (lt.is_enabled()) {
|
||||
uint num_nodes = _numa->num_active_nodes();
|
||||
// Record only if there are multiple active nodes.
|
||||
_obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
|
||||
memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::flush_numa_stats() {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
uint node_index = _numa->index_of_current_thread();
|
||||
_numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::update_numa_stats(uint node_index) {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
_obj_alloc_stat[node_index]++;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
|
@ -539,7 +539,6 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
||||
old_gen()->object_iterate(cl);
|
||||
}
|
||||
|
||||
|
||||
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
|
||||
if (young_gen()->is_in_reserved(addr)) {
|
||||
assert(young_gen()->is_in(addr),
|
||||
@ -611,6 +610,10 @@ void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
ParallelScavengeHeap::heap()->workers().threads_do(tc);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::run_task(AbstractGangTask* task) {
|
||||
_workers.run_task(task);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::print_tracing_info() const {
|
||||
AdaptiveSizePolicyOutput::print();
|
||||
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
|
||||
|
@ -218,6 +218,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
virtual void print_on(outputStream* st) const;
|
||||
virtual void print_on_error(outputStream* st) const;
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
virtual void print_tracing_info() const;
|
||||
|
||||
virtual WorkGang* get_safepoint_workers() { return &_workers; }
|
||||
|
@ -87,3 +87,8 @@ GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
|
||||
memory_pools.append(_old_pool);
|
||||
return memory_pools;
|
||||
}
|
||||
|
||||
// No workGang for SerialHeap, work serially with thread 0.
|
||||
void SerialHeap::run_task(AbstractGangTask* task) {
|
||||
task->work(0);
|
||||
}
|
||||
|
@ -75,6 +75,10 @@ public:
|
||||
template <typename OopClosureType1, typename OopClosureType2>
|
||||
void oop_since_save_marks_iterate(OopClosureType1* cur,
|
||||
OopClosureType2* older);
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
// No workGang for SerialHeap, work serially with thread 0.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SERIAL_SERIALHEAP_HPP
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/shared/gcWhen.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
@ -44,6 +45,7 @@
|
||||
// class defines the functions that a heap must implement, and contains
|
||||
// infrastructure common to all heaps.
|
||||
|
||||
class AbstractGangTask;
|
||||
class AdaptiveSizePolicy;
|
||||
class BarrierSet;
|
||||
class GCHeapSummary;
|
||||
@ -85,6 +87,11 @@ class GCHeapLog : public EventLogBase<GCMessage> {
|
||||
}
|
||||
};
|
||||
|
||||
class ParallelObjectIterator : public CHeapObj<mtGC> {
|
||||
public:
|
||||
virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// CollectedHeap
|
||||
// GenCollectedHeap
|
||||
@ -407,6 +414,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl) = 0;
|
||||
|
||||
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj) {}
|
||||
|
||||
@ -456,6 +467,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Iterator for all GC threads (other than VM thread)
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
|
||||
|
||||
// Run given task. Possibly in parallel if the GC supports it.
|
||||
virtual void run_task(AbstractGangTask* task) = 0;
|
||||
|
||||
// Print any relevant tracing info that flags imply.
|
||||
// Default implementation does nothing.
|
||||
virtual void print_tracing_info() const = 0;
|
||||
|
@ -149,7 +149,7 @@ void VM_GC_HeapInspection::doit() {
|
||||
}
|
||||
}
|
||||
HeapInspection inspect;
|
||||
inspect.heap_inspection(_out);
|
||||
inspect.heap_inspection(_out, _parallel_thread_num);
|
||||
}
|
||||
|
||||
|
||||
|
@ -125,12 +125,15 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
|
||||
private:
|
||||
outputStream* _out;
|
||||
bool _full_gc;
|
||||
uint _parallel_thread_num;
|
||||
public:
|
||||
VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
|
||||
VM_GC_HeapInspection(outputStream* out, bool request_full_gc,
|
||||
uint parallel_thread_num = 1) :
|
||||
VM_GC_Operation(0 /* total collections, dummy, ignored */,
|
||||
GCCause::_heap_inspection /* GC Cause */,
|
||||
0 /* total full collections, dummy, ignored */,
|
||||
request_full_gc), _out(out), _full_gc(request_full_gc) {}
|
||||
request_full_gc), _out(out), _full_gc(request_full_gc),
|
||||
_parallel_thread_num(parallel_thread_num) {}
|
||||
|
||||
~VM_GC_HeapInspection() {}
|
||||
virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; }
|
||||
|
@ -77,6 +77,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, T* load
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::enqueue(oop obj) {
|
||||
assert(obj != NULL, "checked by caller");
|
||||
assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
|
||||
|
||||
// Filter marked objects before hitting the SATB queues. The same predicate would
|
||||
@ -116,6 +117,7 @@ inline void ShenandoahBarrierSet::storeval_barrier(oop obj) {
|
||||
|
||||
inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
assert(value != NULL, "checked by caller");
|
||||
const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
|
||||
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
|
||||
if (!peek && !on_strong_oop_ref) {
|
||||
@ -125,6 +127,7 @@ inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oo
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline void ShenandoahBarrierSet::keep_alive_if_weak(oop value) {
|
||||
assert(value != NULL, "checked by caller");
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
if (!HasDecorator<decorators, ON_STRONG_OOP_REF>::value &&
|
||||
!HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
|
||||
|
@ -256,7 +256,8 @@ class ShenandoahFinalMarkingTask : public AbstractGangTask {
|
||||
private:
|
||||
ShenandoahConcurrentMark* _cm;
|
||||
TaskTerminator* _terminator;
|
||||
bool _dedup_string;
|
||||
bool _dedup_string;
|
||||
ShenandoahSharedFlag _claimed_syncroots;
|
||||
|
||||
public:
|
||||
ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
|
||||
@ -294,6 +295,9 @@ public:
|
||||
ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
|
||||
do_nmethods ? &blobsCl : NULL);
|
||||
Threads::threads_do(&tc);
|
||||
if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {
|
||||
ObjectSynchronizer::oops_do(&resolve_mark_cl);
|
||||
}
|
||||
} else {
|
||||
ShenandoahMarkRefsClosure mark_cl(q, rp);
|
||||
MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
|
||||
@ -301,6 +305,9 @@ public:
|
||||
ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
|
||||
do_nmethods ? &blobsCl : NULL);
|
||||
Threads::threads_do(&tc);
|
||||
if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) {
|
||||
ObjectSynchronizer::oops_do(&mark_cl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1195,6 +1195,10 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::run_task(AbstractGangTask* task) {
|
||||
workers()->run_task(task, workers()->active_workers());
|
||||
}
|
||||
|
||||
void ShenandoahHeap::print_tracing_info() const {
|
||||
LogTarget(Info, gc, stats) lt;
|
||||
if (lt.is_enabled()) {
|
||||
@ -1325,7 +1329,7 @@ void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
void ShenandoahHeap::keep_alive(oop obj) {
|
||||
if (is_concurrent_mark_in_progress()) {
|
||||
if (is_concurrent_mark_in_progress() && (obj != NULL)) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(obj);
|
||||
}
|
||||
}
|
||||
|
@ -198,6 +198,8 @@ public:
|
||||
WorkGang* get_safepoint_workers();
|
||||
|
||||
void gc_threads_do(ThreadClosure* tcl) const;
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
// ---------- Heap regions handling machinery
|
||||
//
|
||||
|
@ -253,6 +253,10 @@ void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
_heap.object_iterate(cl, true /* visit_weaks */);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::run_task(AbstractGangTask* task) {
|
||||
return _heap.run_task(task);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::keep_alive(oop obj) {
|
||||
_heap.keep_alive(obj);
|
||||
}
|
||||
|
@ -98,6 +98,8 @@ public:
|
||||
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
virtual void keep_alive(oop obj);
|
||||
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "gc/z/zResurrection.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTask.hpp"
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
@ -185,6 +186,26 @@ void ZHeap::threads_do(ThreadClosure* tc) const {
|
||||
_workers.threads_do(tc);
|
||||
}
|
||||
|
||||
// Adapter class from AbstractGangTask to Ztask
|
||||
class ZAbstractGangTaskAdapter : public ZTask {
|
||||
private:
|
||||
AbstractGangTask* _task;
|
||||
|
||||
public:
|
||||
ZAbstractGangTaskAdapter(AbstractGangTask* task) :
|
||||
ZTask(task->name()),
|
||||
_task(task) { }
|
||||
|
||||
virtual void work() {
|
||||
_task->work(ZThread::worker_id());
|
||||
}
|
||||
};
|
||||
|
||||
void ZHeap::run_task(AbstractGangTask* task) {
|
||||
ZAbstractGangTaskAdapter ztask(task);
|
||||
_workers.run_parallel(&ztask);
|
||||
}
|
||||
|
||||
void ZHeap::out_of_memory() {
|
||||
ResourceMark rm;
|
||||
|
||||
|
@ -98,6 +98,7 @@ public:
|
||||
uint nconcurrent_no_boost_worker_threads() const;
|
||||
void set_boost_worker_threads(bool boost);
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
void run_task(AbstractGangTask* task);
|
||||
|
||||
// Reference processing
|
||||
ReferenceDiscoverer* reference_discoverer();
|
||||
|
558
src/hotspot/share/memory/archiveBuilder.cpp
Normal file
558
src/hotspot/share/memory/archiveBuilder.cpp
Normal file
@ -0,0 +1,558 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "memory/archiveBuilder.hpp"
|
||||
#include "memory/archiveUtils.hpp"
|
||||
#include "memory/dumpAllocStats.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
|
||||
ArchiveBuilder* ArchiveBuilder::_singleton = NULL;
|
||||
|
||||
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
|
||||
char* newtop = ArchiveBuilder::singleton()->_ro_region->top();
|
||||
ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
|
||||
}
|
||||
|
||||
ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K) {
|
||||
_total_bytes = 0;
|
||||
_objs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
|
||||
}
|
||||
|
||||
ArchiveBuilder::SourceObjList::~SourceObjList() {
|
||||
delete _objs;
|
||||
}
|
||||
|
||||
void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
|
||||
// Save this source object for copying
|
||||
_objs->append(src_info);
|
||||
|
||||
// Prepare for marking the pointers in this source object
|
||||
assert(is_aligned(_total_bytes, sizeof(address)), "must be");
|
||||
src_info->set_ptrmap_start(_total_bytes / sizeof(address));
|
||||
_total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
|
||||
src_info->set_ptrmap_end(_total_bytes / sizeof(address));
|
||||
|
||||
BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
|
||||
if (_ptrmap.size() <= bitmap_size_needed) {
|
||||
_ptrmap.resize((bitmap_size_needed + 1) * 2);
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
|
||||
// src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
|
||||
// so that we can copy/relocate it later. E.g., if we have
|
||||
// class Foo { intx scala; Bar* ptr; }
|
||||
// Foo *f = 0x100;
|
||||
// To mark the f->ptr pointer on 64-bit platform, this function is called with
|
||||
// src_info()->obj() == 0x100
|
||||
// ref->addr() == 0x108
|
||||
address src_obj = src_info->obj();
|
||||
address* field_addr = ref->addr();
|
||||
assert(src_info->ptrmap_start() < _total_bytes, "sanity");
|
||||
assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
|
||||
assert(*field_addr != NULL, "should have checked");
|
||||
|
||||
intx field_offset_in_bytes = ((address)field_addr) - src_obj;
|
||||
DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
|
||||
assert(field_offset_in_bytes >= 0, "must be");
|
||||
assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
|
||||
assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
|
||||
|
||||
BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
|
||||
_ptrmap.set_bit(BitMap::idx_t(idx));
|
||||
}
|
||||
|
||||
class RelocateEmbeddedPointers : public BitMapClosure {
|
||||
ArchiveBuilder* _builder;
|
||||
address _dumped_obj;
|
||||
BitMap::idx_t _start_idx;
|
||||
public:
|
||||
RelocateEmbeddedPointers(ArchiveBuilder* builder, address dumped_obj, BitMap::idx_t start_idx) :
|
||||
_builder(builder), _dumped_obj(dumped_obj), _start_idx(start_idx) {}
|
||||
|
||||
bool do_bit(BitMap::idx_t bit_offset) {
|
||||
uintx FLAG_MASK = 0x03; // See comments around MetaspaceClosure::FLAG_MASK
|
||||
size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
|
||||
address* ptr_loc = (address*)(_dumped_obj + field_offset);
|
||||
|
||||
uintx old_p_and_bits = (uintx)(*ptr_loc);
|
||||
uintx flag_bits = (old_p_and_bits & FLAG_MASK);
|
||||
address old_p = (address)(old_p_and_bits & (~FLAG_MASK));
|
||||
address new_p = _builder->get_dumped_addr(old_p);
|
||||
uintx new_p_and_bits = ((uintx)new_p) | flag_bits;
|
||||
|
||||
log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
|
||||
p2i(ptr_loc), p2i(old_p), p2i(new_p));
|
||||
|
||||
ArchivePtrMarker::set_and_mark_pointer(ptr_loc, (address)(new_p_and_bits));
|
||||
return true; // keep iterating the bitmap
|
||||
}
|
||||
};
|
||||
|
||||
void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
|
||||
SourceObjInfo* src_info = objs()->at(i);
|
||||
assert(src_info->should_copy(), "must be");
|
||||
BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
|
||||
BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
|
||||
|
||||
RelocateEmbeddedPointers relocator(builder, src_info->dumped_addr(), start);
|
||||
_ptrmap.iterate(&relocator, start, end);
|
||||
}
|
||||
|
||||
ArchiveBuilder::ArchiveBuilder(DumpRegion* rw_region, DumpRegion* ro_region)
|
||||
: _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) {
|
||||
assert(_singleton == NULL, "must be");
|
||||
_singleton = this;
|
||||
|
||||
_klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
|
||||
_symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
|
||||
_special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
|
||||
|
||||
_num_instance_klasses = 0;
|
||||
_num_obj_array_klasses = 0;
|
||||
_num_type_array_klasses = 0;
|
||||
_alloc_stats = new (ResourceObj::C_HEAP, mtClassShared) DumpAllocStats;
|
||||
|
||||
_rw_region = rw_region;
|
||||
_ro_region = ro_region;
|
||||
|
||||
_estimated_metsapceobj_bytes = 0;
|
||||
}
|
||||
|
||||
ArchiveBuilder::~ArchiveBuilder() {
|
||||
assert(_singleton == this, "must be");
|
||||
_singleton = NULL;
|
||||
|
||||
clean_up_src_obj_table();
|
||||
|
||||
delete _klasses;
|
||||
delete _symbols;
|
||||
delete _special_refs;
|
||||
delete _alloc_stats;
|
||||
}
|
||||
|
||||
class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
|
||||
ArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
|
||||
|
||||
virtual bool do_unique_ref(Ref* ref, bool read_only) {
|
||||
return _builder->gather_klass_and_symbol(ref, read_only);
|
||||
}
|
||||
};
|
||||
|
||||
bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
|
||||
if (ref->obj() == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (get_follow_mode(ref) != make_a_copy) {
|
||||
return false;
|
||||
}
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (!is_excluded(klass)) {
|
||||
_klasses->append(klass);
|
||||
if (klass->is_instance_klass()) {
|
||||
_num_instance_klasses ++;
|
||||
} else if (klass->is_objArray_klass()) {
|
||||
_num_obj_array_klasses ++;
|
||||
} else {
|
||||
assert(klass->is_typeArray_klass(), "sanity");
|
||||
_num_type_array_klasses ++;
|
||||
}
|
||||
}
|
||||
_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
|
||||
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
|
||||
_symbols->append((Symbol*)ref->obj());
|
||||
}
|
||||
|
||||
int bytes = ref->size() * BytesPerWord;
|
||||
_estimated_metsapceobj_bytes += bytes;
|
||||
|
||||
return true; // recurse
|
||||
}
|
||||
|
||||
void ArchiveBuilder::gather_klasses_and_symbols() {
|
||||
ResourceMark rm;
|
||||
log_info(cds)("Gathering classes and symbols ... ");
|
||||
GatherKlassesAndSymbols doit(this);
|
||||
iterate_roots(&doit, /*is_relocating_pointers=*/false);
|
||||
doit.finish();
|
||||
|
||||
log_info(cds)("Number of classes %d", _num_instance_klasses + _num_obj_array_klasses + _num_type_array_klasses);
|
||||
log_info(cds)(" instance classes = %5d", _num_instance_klasses);
|
||||
log_info(cds)(" obj array classes = %5d", _num_obj_array_klasses);
|
||||
log_info(cds)(" type array classes = %5d", _num_type_array_klasses);
|
||||
|
||||
if (DumpSharedSpaces) {
|
||||
// To ensure deterministic contents in the static archive, we need to ensure that
|
||||
// we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
|
||||
// the MetsapceObjs are located originally, as they are copied sequentially into
|
||||
// the archive during the iteration.
|
||||
//
|
||||
// The only issue here is that the symbol table and the system directories may be
|
||||
// randomly ordered, so we copy the symbols and klasses into two arrays and sort
|
||||
// them deterministically.
|
||||
//
|
||||
// During -Xshare:dump, the order of Symbol creation is strictly determined by
|
||||
// the SharedClassListFile (class loading is done in a single thread and the JIT
|
||||
// is disabled). Also, Symbols are allocated in monotonically increasing addresses
|
||||
// (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
|
||||
// ascending address order, we ensure that all Symbols are copied into deterministic
|
||||
// locations in the archive.
|
||||
//
|
||||
// TODO: in the future, if we want to produce deterministic contents in the
|
||||
// dynamic archive, we might need to sort the symbols alphabetically (also see
|
||||
// DynamicArchiveBuilder::sort_methods()).
|
||||
sort_symbols_and_fix_hash();
|
||||
sort_klasses();
|
||||
}
|
||||
}
|
||||
|
||||
int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
|
||||
if (a[0] < b[0]) {
|
||||
return -1;
|
||||
} else {
|
||||
assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::sort_symbols_and_fix_hash() {
|
||||
log_info(cds)("Sorting symbols and fixing identity hash ... ");
|
||||
os::init_random(0x12345678);
|
||||
_symbols->sort(compare_symbols_by_address);
|
||||
for (int i = 0; i < _symbols->length(); i++) {
|
||||
assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
|
||||
_symbols->at(i)->update_identity_hash();
|
||||
}
|
||||
}
|
||||
|
||||
int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
|
||||
return a[0]->name()->fast_compare(b[0]->name());
|
||||
}
|
||||
|
||||
void ArchiveBuilder::sort_klasses() {
|
||||
log_info(cds)("Sorting classes ... ");
|
||||
_klasses->sort(compare_klass_by_name);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||
int i;
|
||||
|
||||
int num_symbols = _symbols->length();
|
||||
for (i = 0; i < num_symbols; i++) {
|
||||
it->push(&_symbols->at(i));
|
||||
}
|
||||
|
||||
int num_klasses = _klasses->length();
|
||||
for (i = 0; i < num_klasses; i++) {
|
||||
it->push(&_klasses->at(i));
|
||||
}
|
||||
|
||||
iterate_roots(it, is_relocating_pointers);
|
||||
}
|
||||
|
||||
class GatherSortedSourceObjs : public MetaspaceClosure {
|
||||
ArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
|
||||
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
|
||||
}
|
||||
|
||||
virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
|
||||
assert(type == _method_entry_ref, "only special type allowed for now");
|
||||
address src_obj = ref->obj();
|
||||
size_t field_offset = pointer_delta(p, src_obj, sizeof(u1));
|
||||
_builder->add_special_ref(type, src_obj, field_offset);
|
||||
};
|
||||
|
||||
virtual void do_pending_ref(Ref* ref) {
|
||||
if (ref->obj() != NULL) {
|
||||
_builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
|
||||
MetaspaceClosure::Ref* ref, bool read_only) {
|
||||
address src_obj = ref->obj();
|
||||
if (src_obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
ref->set_keep_after_pushing();
|
||||
remember_embedded_pointer_in_copied_obj(enclosing_ref, ref);
|
||||
|
||||
FollowMode follow_mode = get_follow_mode(ref);
|
||||
SourceObjInfo src_info(ref, read_only, follow_mode);
|
||||
bool created = false;
|
||||
SourceObjInfo* p = _src_obj_table.lookup(src_obj);
|
||||
if (p == NULL) {
|
||||
p = _src_obj_table.add(src_obj, src_info);
|
||||
if (_src_obj_table.maybe_grow(MAX_TABLE_SIZE)) {
|
||||
log_info(cds, hashtables)("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
|
||||
}
|
||||
created = true;
|
||||
}
|
||||
|
||||
assert(p->read_only() == src_info.read_only(), "must be");
|
||||
|
||||
if (created && src_info.should_copy()) {
|
||||
ref->set_user_data((void*)p);
|
||||
if (read_only) {
|
||||
_ro_src_objs.append(enclosing_ref, p);
|
||||
} else {
|
||||
_rw_src_objs.append(enclosing_ref, p);
|
||||
}
|
||||
return true; // Need to recurse into this ref only if we are copying it
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset) {
|
||||
_special_refs->append(SpecialRefInfo(type, src_obj, field_offset));
|
||||
}
|
||||
|
||||
void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
|
||||
MetaspaceClosure::Ref* ref) {
|
||||
assert(ref->obj() != NULL, "should have checked");
|
||||
|
||||
if (enclosing_ref != NULL) {
|
||||
SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
|
||||
if (src_info == NULL) {
|
||||
// source objects of point_to_it/set_to_null types are not copied
|
||||
// so we don't need to remember their pointers.
|
||||
} else {
|
||||
if (src_info->read_only()) {
|
||||
_ro_src_objs.remember_embedded_pointer(src_info, ref);
|
||||
} else {
|
||||
_rw_src_objs.remember_embedded_pointer(src_info, ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::gather_source_objs() {
|
||||
ResourceMark rm;
|
||||
log_info(cds)("Gathering all archivable objects ... ");
|
||||
GatherSortedSourceObjs doit(this);
|
||||
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
|
||||
doit.finish();
|
||||
}
|
||||
|
||||
bool ArchiveBuilder::is_excluded(Klass* klass) {
|
||||
if (klass->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
return SystemDictionaryShared::is_excluded_class(ik);
|
||||
} else if (klass->is_objArray_klass()) {
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
// Don't support archiving of array klasses for now (WHY???)
|
||||
return true;
|
||||
}
|
||||
Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
|
||||
if (bottom->is_instance_klass()) {
|
||||
return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
|
||||
address obj = ref->obj();
|
||||
if (MetaspaceShared::is_in_shared_metaspace(obj)) {
|
||||
// Don't dump existing shared metadata again.
|
||||
return point_to_it;
|
||||
} else if (ref->msotype() == MetaspaceObj::MethodDataType) {
|
||||
return set_to_null;
|
||||
} else {
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (is_excluded(klass)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
}
|
||||
|
||||
return make_a_copy;
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::dump_rw_region() {
|
||||
ResourceMark rm;
|
||||
log_info(cds)("Allocating RW objects ... ");
|
||||
make_shallow_copies(_rw_region, &_rw_src_objs);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::dump_ro_region() {
|
||||
ResourceMark rm;
|
||||
log_info(cds)("Allocating RO objects ... ");
|
||||
make_shallow_copies(_ro_region, &_ro_src_objs);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
|
||||
const ArchiveBuilder::SourceObjList* src_objs) {
|
||||
for (int i = 0; i < src_objs->objs()->length(); i++) {
|
||||
make_shallow_copy(dump_region, src_objs->objs()->at(i));
|
||||
}
|
||||
log_info(cds)("done (%d objects)", src_objs->objs()->length());
|
||||
}
|
||||
|
||||
void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
|
||||
MetaspaceClosure::Ref* ref = src_info->ref();
|
||||
address src = ref->obj();
|
||||
int bytes = src_info->size_in_bytes();
|
||||
char* dest;
|
||||
size_t alignment = BytesPerWord;
|
||||
char* oldtop;
|
||||
char* newtop;
|
||||
|
||||
oldtop = dump_region->top();
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
// Save a pointer immediate in front of an InstanceKlass, so
|
||||
// we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
|
||||
// without building another hashtable. See RunTimeSharedClassInfo::get_for()
|
||||
// in systemDictionaryShared.cpp.
|
||||
Klass* klass = (Klass*)src;
|
||||
if (klass->is_instance_klass()) {
|
||||
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
|
||||
dump_region->allocate(sizeof(address), BytesPerWord);
|
||||
}
|
||||
}
|
||||
dest = dump_region->allocate(bytes, alignment);
|
||||
newtop = dump_region->top();
|
||||
|
||||
memcpy(dest, src, bytes);
|
||||
|
||||
intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)dest);
|
||||
if (archived_vtable != NULL) {
|
||||
*(address*)dest = (address)archived_vtable;
|
||||
ArchivePtrMarker::mark_pointer((address*)dest);
|
||||
}
|
||||
|
||||
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
|
||||
src_info->set_dumped_addr((address)dest);
|
||||
|
||||
_alloc_stats->record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
|
||||
}
|
||||
|
||||
address ArchiveBuilder::get_dumped_addr(address src_obj) const {
|
||||
SourceObjInfo* p = _src_obj_table.lookup(src_obj);
|
||||
assert(p != NULL, "must be");
|
||||
|
||||
return p->dumped_addr();
|
||||
}
|
||||
|
||||
void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
|
||||
for (int i = 0; i < src_objs->objs()->length(); i++) {
|
||||
src_objs->relocate(i, this);
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveBuilder::update_special_refs() {
|
||||
for (int i = 0; i < _special_refs->length(); i++) {
|
||||
SpecialRefInfo s = _special_refs->at(i);
|
||||
size_t field_offset = s.field_offset();
|
||||
address src_obj = s.src_obj();
|
||||
address dst_obj = get_dumped_addr(src_obj);
|
||||
intptr_t* src_p = (intptr_t*)(src_obj + field_offset);
|
||||
intptr_t* dst_p = (intptr_t*)(dst_obj + field_offset);
|
||||
assert(s.type() == MetaspaceClosure::_method_entry_ref, "only special type allowed for now");
|
||||
|
||||
assert(*src_p == *dst_p, "must be a copy");
|
||||
ArchivePtrMarker::mark_pointer((address*)dst_p);
|
||||
}
|
||||
}
|
||||
|
||||
class RefRelocator: public MetaspaceClosure {
|
||||
ArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
RefRelocator(ArchiveBuilder* builder) : _builder(builder) {}
|
||||
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (ref->not_null()) {
|
||||
ref->update(_builder->get_dumped_addr(ref->obj()));
|
||||
ArchivePtrMarker::mark_pointer(ref->addr());
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
|
||||
void ArchiveBuilder::relocate_roots() {
|
||||
ResourceMark rm;
|
||||
RefRelocator doit(this);
|
||||
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
|
||||
doit.finish();
|
||||
}
|
||||
|
||||
void ArchiveBuilder::relocate_pointers() {
|
||||
log_info(cds)("Relocating embedded pointers ... ");
|
||||
relocate_embedded_pointers(&_rw_src_objs);
|
||||
relocate_embedded_pointers(&_ro_src_objs);
|
||||
update_special_refs();
|
||||
|
||||
log_info(cds)("Relocating external roots ... ");
|
||||
relocate_roots();
|
||||
|
||||
log_info(cds)("done");
|
||||
}
|
||||
|
||||
// We must relocate the System::_well_known_klasses only after we have copied the
|
||||
// java objects in during dump_java_heap_objects(): during the object copy, we operate on
|
||||
// old objects which assert that their klass is the original klass.
|
||||
void ArchiveBuilder::relocate_well_known_klasses() {
|
||||
log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... ");
|
||||
ResourceMark rm;
|
||||
RefRelocator doit(this);
|
||||
SystemDictionary::well_known_klasses_do(&doit);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::print_stats(int ro_all, int rw_all, int mc_all) {
|
||||
_alloc_stats->print_stats(ro_all, rw_all, mc_all);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::clean_up_src_obj_table() {
|
||||
SrcObjTableCleaner cleaner;
|
||||
_src_obj_table.iterate(&cleaner);
|
||||
}
|
235
src/hotspot/share/memory/archiveBuilder.hpp
Normal file
235
src/hotspot/share/memory/archiveBuilder.hpp
Normal file
@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
class CHeapBitMap;
|
||||
class Klass;
|
||||
class DumpRegion;
|
||||
class Symbol;
|
||||
class DumpAllocStats;
|
||||
|
||||
class ArchiveBuilder : public StackObj {
|
||||
public:
|
||||
enum FollowMode {
|
||||
make_a_copy, point_to_it, set_to_null
|
||||
};
|
||||
|
||||
private:
|
||||
class SpecialRefInfo {
|
||||
// We have a "special pointer" of the given _type at _field_offset of _src_obj.
|
||||
// See MetaspaceClosure::push_special().
|
||||
MetaspaceClosure::SpecialRef _type;
|
||||
address _src_obj;
|
||||
size_t _field_offset;
|
||||
|
||||
public:
|
||||
SpecialRefInfo() {}
|
||||
SpecialRefInfo(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset)
|
||||
: _type(type), _src_obj(src_obj), _field_offset(field_offset) {}
|
||||
|
||||
MetaspaceClosure::SpecialRef type() const { return _type; }
|
||||
address src_obj() const { return _src_obj; }
|
||||
size_t field_offset() const { return _field_offset; }
|
||||
};
|
||||
|
||||
class SourceObjInfo {
|
||||
MetaspaceClosure::Ref* _ref;
|
||||
uintx _ptrmap_start; // The bit-offset of the start of this object (inclusive)
|
||||
uintx _ptrmap_end; // The bit-offset of the end of this object (exclusive)
|
||||
bool _read_only;
|
||||
FollowMode _follow_mode;
|
||||
address _dumped_addr; // Address this->obj(), as used by the dumped archive.
|
||||
|
||||
public:
|
||||
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
|
||||
_ref(ref), _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _follow_mode(follow_mode) {
|
||||
if (follow_mode == point_to_it) {
|
||||
_dumped_addr = ref->obj();
|
||||
} else {
|
||||
_dumped_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool should_copy() const { return _follow_mode == make_a_copy; }
|
||||
MetaspaceClosure::Ref* ref() const { return _ref; }
|
||||
void set_dumped_addr(address dumped_addr) {
|
||||
assert(should_copy(), "must be");
|
||||
assert(_dumped_addr == NULL, "cannot be copied twice");
|
||||
assert(dumped_addr != NULL, "must be a valid copy");
|
||||
_dumped_addr = dumped_addr;
|
||||
}
|
||||
void set_ptrmap_start(uintx v) { _ptrmap_start = v; }
|
||||
void set_ptrmap_end(uintx v) { _ptrmap_end = v; }
|
||||
uintx ptrmap_start() const { return _ptrmap_start; } // inclusive
|
||||
uintx ptrmap_end() const { return _ptrmap_end; } // exclusive
|
||||
bool read_only() const { return _read_only; }
|
||||
int size_in_bytes() const { return _ref->size() * BytesPerWord; }
|
||||
address dumped_addr() const { return _dumped_addr; }
|
||||
|
||||
// convenience accessor
|
||||
address obj() const { return ref()->obj(); }
|
||||
};
|
||||
|
||||
class SourceObjList {
|
||||
uintx _total_bytes;
|
||||
GrowableArray<SourceObjInfo*>* _objs; // Source objects to be archived
|
||||
CHeapBitMap _ptrmap; // Marks the addresses of the pointer fields
|
||||
// in the source objects
|
||||
public:
|
||||
SourceObjList();
|
||||
~SourceObjList();
|
||||
|
||||
GrowableArray<SourceObjInfo*>* objs() const { return _objs; }
|
||||
|
||||
void append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info);
|
||||
void remember_embedded_pointer(SourceObjInfo* pointing_obj, MetaspaceClosure::Ref* ref);
|
||||
void relocate(int i, ArchiveBuilder* builder);
|
||||
|
||||
// convenience accessor
|
||||
SourceObjInfo* at(int i) const { return objs()->at(i); }
|
||||
};
|
||||
|
||||
class SrcObjTableCleaner {
|
||||
public:
|
||||
bool do_entry(address key, const SourceObjInfo* value) {
|
||||
delete value->ref();
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
static const int INITIAL_TABLE_SIZE = 15889;
|
||||
static const int MAX_TABLE_SIZE = 1000000;
|
||||
|
||||
DumpRegion* _rw_region;
|
||||
DumpRegion* _ro_region;
|
||||
|
||||
SourceObjList _rw_src_objs; // objs to put in rw region
|
||||
SourceObjList _ro_src_objs; // objs to put in ro region
|
||||
KVHashtable<address, SourceObjInfo, mtClassShared> _src_obj_table;
|
||||
GrowableArray<Klass*>* _klasses;
|
||||
GrowableArray<Symbol*>* _symbols;
|
||||
GrowableArray<SpecialRefInfo>* _special_refs;
|
||||
|
||||
// statistics
|
||||
int _num_instance_klasses;
|
||||
int _num_obj_array_klasses;
|
||||
int _num_type_array_klasses;
|
||||
DumpAllocStats* _alloc_stats;
|
||||
|
||||
// For global access.
|
||||
static ArchiveBuilder* _singleton;
|
||||
|
||||
public:
|
||||
// Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
|
||||
// outside of ArchiveBuilder::dump_{rw,ro}_region. These are usually for misc tables
|
||||
// that are allocated in the RO space.
|
||||
class OtherROAllocMark {
|
||||
char* _oldtop;
|
||||
public:
|
||||
OtherROAllocMark() {
|
||||
_oldtop = _singleton->_ro_region->top();
|
||||
}
|
||||
~OtherROAllocMark();
|
||||
};
|
||||
|
||||
private:
|
||||
FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
|
||||
|
||||
void iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers);
|
||||
void sort_symbols_and_fix_hash();
|
||||
void sort_klasses();
|
||||
static int compare_symbols_by_address(Symbol** a, Symbol** b);
|
||||
static int compare_klass_by_name(Klass** a, Klass** b);
|
||||
|
||||
void make_shallow_copies(DumpRegion *dump_region, const SourceObjList* src_objs);
|
||||
void make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info);
|
||||
|
||||
void update_special_refs();
|
||||
void relocate_embedded_pointers(SourceObjList* src_objs);
|
||||
void relocate_roots();
|
||||
|
||||
bool is_excluded(Klass* k);
|
||||
void clean_up_src_obj_table();
|
||||
|
||||
protected:
|
||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) = 0;
|
||||
|
||||
// Conservative estimate for number of bytes needed for:
|
||||
size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's.
|
||||
|
||||
void set_dump_regions(DumpRegion* rw_region, DumpRegion* ro_region) {
|
||||
assert(_rw_region == NULL && _ro_region == NULL, "do not change");
|
||||
_rw_region = rw_region;
|
||||
_ro_region = ro_region;
|
||||
}
|
||||
|
||||
public:
|
||||
ArchiveBuilder(DumpRegion* rw_region, DumpRegion* ro_region);
|
||||
~ArchiveBuilder();
|
||||
|
||||
void gather_klasses_and_symbols();
|
||||
void gather_source_objs();
|
||||
bool gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only);
|
||||
bool gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref, bool read_only);
|
||||
void add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset);
|
||||
void remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref);
|
||||
|
||||
void dump_rw_region();
|
||||
void dump_ro_region();
|
||||
void relocate_pointers();
|
||||
void relocate_well_known_klasses();
|
||||
|
||||
address get_dumped_addr(address src_obj) const;
|
||||
|
||||
// All klasses and symbols that will be copied into the archive
|
||||
GrowableArray<Klass*>* klasses() const { return _klasses; }
|
||||
GrowableArray<Symbol*>* symbols() const { return _symbols; }
|
||||
|
||||
static ArchiveBuilder* singleton() {
|
||||
assert(_singleton != NULL, "ArchiveBuilder must be active");
|
||||
return _singleton;
|
||||
}
|
||||
|
||||
static DumpAllocStats* alloc_stats() {
|
||||
return singleton()->_alloc_stats;
|
||||
}
|
||||
|
||||
static Klass* get_relocated_klass(Klass* orig_klass) {
|
||||
Klass* klass = (Klass*)singleton()->get_dumped_addr((address)orig_klass);
|
||||
assert(klass != NULL && klass->is_klass(), "must be");
|
||||
return klass;
|
||||
}
|
||||
|
||||
void print_stats(int ro_all, int rw_all, int mc_all);
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
@ -72,11 +72,23 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
|
||||
}
|
||||
assert(idx < _ptrmap->size(), "must be");
|
||||
_ptrmap->set_bit(idx);
|
||||
//tty->print_cr("Marking pointer [%p] -> %p @ " SIZE_FORMAT_W(9), ptr_loc, *ptr_loc, idx);
|
||||
//tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
|
||||
assert(_ptrmap != NULL, "not initialized");
|
||||
assert(!_compacted, "cannot clear anymore");
|
||||
|
||||
assert(_ptr_base <= ptr_loc && ptr_loc < _ptr_end, "must be");
|
||||
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
|
||||
size_t idx = ptr_loc - _ptr_base;
|
||||
assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
|
||||
_ptrmap->clear_bit(idx);
|
||||
//tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
|
||||
}
|
||||
|
||||
class ArchivePtrBitmapCleaner: public BitMapClosure {
|
||||
CHeapBitMap* _ptrmap;
|
||||
address* _ptr_base;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@ class ArchivePtrMarker : AllStatic {
|
||||
public:
|
||||
static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end);
|
||||
static void mark_pointer(address* ptr_loc);
|
||||
static void clear_pointer(address* ptr_loc);
|
||||
static void compact(address relocatable_base, address relocatable_end);
|
||||
static void compact(size_t max_non_null_offset);
|
||||
|
||||
@ -52,6 +53,12 @@ public:
|
||||
mark_pointer((address*)ptr_loc);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void set_and_mark_pointer(T* ptr_loc, T ptr_value) {
|
||||
*ptr_loc = ptr_value;
|
||||
mark_pointer(ptr_loc);
|
||||
}
|
||||
|
||||
static void expand_ptr_end(address *new_ptr_end) {
|
||||
assert(_ptr_end <= new_ptr_end, "must be");
|
||||
_ptr_end = new_ptr_end;
|
||||
|
@ -34,7 +34,11 @@ inline bool SharedDataRelocator<COMPACTING>::do_bit(size_t offset) {
|
||||
assert(_patch_base <= p && p < _patch_end, "must be");
|
||||
|
||||
address old_ptr = *p;
|
||||
assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
|
||||
if (old_ptr == NULL) {
|
||||
assert(COMPACTING, "NULL pointers should not be marked when relocating at run-time");
|
||||
} else {
|
||||
assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
|
||||
}
|
||||
|
||||
if (COMPACTING) {
|
||||
// Start-up performance: use a template parameter to elide this block for run-time archive
|
||||
|
118
src/hotspot/share/memory/dumpAllocStats.cpp
Normal file
118
src/hotspot/share/memory/dumpAllocStats.cpp
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "memory/dumpAllocStats.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
|
||||
// Calculate size of data that was not allocated by Metaspace::allocate()
|
||||
MetaspaceSharedStats *stats = MetaspaceShared::stats();
|
||||
|
||||
// symbols
|
||||
_counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
|
||||
_bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
|
||||
|
||||
_counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
|
||||
_bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
|
||||
|
||||
// strings
|
||||
_counts[RO][StringHashentryType] = stats->string.hashentry_count;
|
||||
_bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
|
||||
|
||||
_counts[RO][StringBucketType] = stats->string.bucket_count;
|
||||
_bytes [RO][StringBucketType] = stats->string.bucket_bytes;
|
||||
|
||||
// TODO: count things like dictionary, vtable, etc
|
||||
_bytes[RW][OtherType] += mc_all;
|
||||
rw_all += mc_all; // mc is mapped Read/Write
|
||||
|
||||
// prevent divide-by-zero
|
||||
if (ro_all < 1) {
|
||||
ro_all = 1;
|
||||
}
|
||||
if (rw_all < 1) {
|
||||
rw_all = 1;
|
||||
}
|
||||
|
||||
int all_ro_count = 0;
|
||||
int all_ro_bytes = 0;
|
||||
int all_rw_count = 0;
|
||||
int all_rw_bytes = 0;
|
||||
|
||||
// To make fmt_stats be a syntactic constant (for format warnings), use #define.
|
||||
#define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
|
||||
const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
|
||||
const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
|
||||
|
||||
LogMessage(cds) msg;
|
||||
|
||||
msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
|
||||
msg.debug("%s", hdr);
|
||||
msg.debug("%s", sep);
|
||||
for (int type = 0; type < int(_number_of_types); type ++) {
|
||||
const char *name = type_name((Type)type);
|
||||
int ro_count = _counts[RO][type];
|
||||
int ro_bytes = _bytes [RO][type];
|
||||
int rw_count = _counts[RW][type];
|
||||
int rw_bytes = _bytes [RW][type];
|
||||
int count = ro_count + rw_count;
|
||||
int bytes = ro_bytes + rw_bytes;
|
||||
|
||||
double ro_perc = percent_of(ro_bytes, ro_all);
|
||||
double rw_perc = percent_of(rw_bytes, rw_all);
|
||||
double perc = percent_of(bytes, ro_all + rw_all);
|
||||
|
||||
msg.debug(fmt_stats, name,
|
||||
ro_count, ro_bytes, ro_perc,
|
||||
rw_count, rw_bytes, rw_perc,
|
||||
count, bytes, perc);
|
||||
|
||||
all_ro_count += ro_count;
|
||||
all_ro_bytes += ro_bytes;
|
||||
all_rw_count += rw_count;
|
||||
all_rw_bytes += rw_bytes;
|
||||
}
|
||||
|
||||
int all_count = all_ro_count + all_rw_count;
|
||||
int all_bytes = all_ro_bytes + all_rw_bytes;
|
||||
|
||||
double all_ro_perc = percent_of(all_ro_bytes, ro_all);
|
||||
double all_rw_perc = percent_of(all_rw_bytes, rw_all);
|
||||
double all_perc = percent_of(all_bytes, ro_all + rw_all);
|
||||
|
||||
msg.debug("%s", sep);
|
||||
msg.debug(fmt_stats, "Total",
|
||||
all_ro_count, all_ro_bytes, all_ro_perc,
|
||||
all_rw_count, all_rw_bytes, all_rw_perc,
|
||||
all_count, all_bytes, all_perc);
|
||||
|
||||
assert(all_ro_bytes == ro_all, "everything should have been counted");
|
||||
assert(all_rw_bytes == rw_all, "everything should have been counted");
|
||||
|
||||
#undef fmt_stats
|
||||
}
|
||||
|
84
src/hotspot/share/memory/dumpAllocStats.hpp
Normal file
84
src/hotspot/share/memory/dumpAllocStats.hpp
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_MEMORY_DUMPALLOCSTATS_HPP
|
||||
#define SHARE_MEMORY_DUMPALLOCSTATS_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// This is for dumping detailed statistics for the allocations
|
||||
// in the shared spaces.
|
||||
class DumpAllocStats : public ResourceObj {
|
||||
public:
|
||||
|
||||
// Here's poor man's enum inheritance
|
||||
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
|
||||
METASPACE_OBJ_TYPES_DO(f) \
|
||||
f(SymbolHashentry) \
|
||||
f(SymbolBucket) \
|
||||
f(StringHashentry) \
|
||||
f(StringBucket) \
|
||||
f(Other)
|
||||
|
||||
enum Type {
|
||||
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
|
||||
_number_of_types
|
||||
};
|
||||
|
||||
static const char* type_name(Type type) {
|
||||
switch(type) {
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
enum { RO = 0, RW = 1 };
|
||||
|
||||
int _counts[2][_number_of_types];
|
||||
int _bytes [2][_number_of_types];
|
||||
|
||||
DumpAllocStats() {
|
||||
memset(_counts, 0, sizeof(_counts));
|
||||
memset(_bytes, 0, sizeof(_bytes));
|
||||
};
|
||||
|
||||
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
|
||||
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
|
||||
int which = (read_only) ? RO : RW;
|
||||
_counts[which][type] ++;
|
||||
_bytes [which][type] += byte_size;
|
||||
}
|
||||
|
||||
void record_other_type(int byte_size, bool read_only) {
|
||||
int which = (read_only) ? RO : RW;
|
||||
_bytes [which][OtherType] += byte_size;
|
||||
}
|
||||
void print_stats(int ro_all, int rw_all, int mc_all);
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_DUMPALLOCSTATS_HPP
|
@ -26,46 +26,25 @@
|
||||
#include "jvm.h"
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/archiveBuilder.hpp"
|
||||
#include "memory/archiveUtils.inline.hpp"
|
||||
#include "memory/dynamicArchive.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "prims/jvmtiRedefineClasses.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
#ifndef O_BINARY // if defined (Win32) use binary files.
|
||||
#define O_BINARY 0 // otherwise do nothing.
|
||||
#endif
|
||||
|
||||
class DynamicArchiveBuilder : ResourceObj {
|
||||
static unsigned my_hash(const address& a) {
|
||||
return primitive_hash<address>(a);
|
||||
}
|
||||
static bool my_equals(const address& a0, const address& a1) {
|
||||
return primitive_equals<address>(a0, a1);
|
||||
}
|
||||
typedef ResourceHashtable<
|
||||
address, address,
|
||||
DynamicArchiveBuilder::my_hash, // solaris compiler doesn't like: primitive_hash<address>
|
||||
DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address>
|
||||
16384, ResourceObj::C_HEAP> RelocationTable;
|
||||
RelocationTable _new_loc_table;
|
||||
|
||||
class DynamicArchiveBuilder : public ArchiveBuilder {
|
||||
public:
|
||||
static intx _buffer_to_target_delta;
|
||||
|
||||
DumpRegion* _current_dump_space;
|
||||
|
||||
static size_t reserve_alignment() {
|
||||
@ -106,23 +85,8 @@ public:
|
||||
return (T)(address(obj) + _buffer_to_target_delta);
|
||||
}
|
||||
|
||||
template <typename T> T get_new_loc(T obj) {
|
||||
address* pp = _new_loc_table.get((address)obj);
|
||||
if (pp == NULL) {
|
||||
// Excluded klasses are not copied
|
||||
return NULL;
|
||||
} else {
|
||||
return (T)*pp;
|
||||
}
|
||||
}
|
||||
|
||||
address get_new_loc(MetaspaceClosure::Ref* ref) {
|
||||
return get_new_loc(ref->obj());
|
||||
}
|
||||
|
||||
template <typename T> bool has_new_loc(T obj) {
|
||||
address* pp = _new_loc_table.get((address)obj);
|
||||
return pp != NULL;
|
||||
template <typename T> T get_dumped_addr(T obj) {
|
||||
return (T)ArchiveBuilder::get_dumped_addr((address)obj);
|
||||
}
|
||||
|
||||
static int dynamic_dump_method_comparator(Method* a, Method* b) {
|
||||
@ -147,345 +111,13 @@ public:
|
||||
return a_name->fast_compare(b_name);
|
||||
}
|
||||
|
||||
protected:
|
||||
enum FollowMode {
|
||||
make_a_copy, point_to_it, set_to_null
|
||||
};
|
||||
|
||||
public:
|
||||
void copy(MetaspaceClosure::Ref* ref, bool read_only) {
|
||||
int bytes = ref->size() * BytesPerWord;
|
||||
address old_obj = ref->obj();
|
||||
address new_obj = copy_impl(ref, read_only, bytes);
|
||||
|
||||
assert(new_obj != NULL, "must be");
|
||||
assert(new_obj != old_obj, "must be");
|
||||
bool isnew = _new_loc_table.put(old_obj, new_obj);
|
||||
assert(isnew, "must be");
|
||||
}
|
||||
|
||||
// Make a shallow copy of each eligible MetaspaceObj into the buffer.
|
||||
class ShallowCopier: public UniqueMetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
bool _read_only;
|
||||
public:
|
||||
ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only)
|
||||
: _builder(shuffler), _read_only(read_only) {}
|
||||
|
||||
virtual bool do_unique_ref(Ref* orig_obj, bool read_only) {
|
||||
// This method gets called on each *original* object
|
||||
// reachable from _builder->iterate_roots(). Each orig_obj is
|
||||
// called exactly once.
|
||||
FollowMode mode = _builder->follow_ref(orig_obj);
|
||||
|
||||
if (mode == point_to_it) {
|
||||
if (read_only == _read_only) {
|
||||
log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
|
||||
MetaspaceObj::type_name(orig_obj->msotype()));
|
||||
address p = orig_obj->obj();
|
||||
bool isnew = _builder->_new_loc_table.put(p, p);
|
||||
assert(isnew, "must be");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mode == set_to_null) {
|
||||
log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s", p2i(orig_obj->obj()),
|
||||
MetaspaceObj::type_name(orig_obj->msotype()));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (read_only == _read_only) {
|
||||
// Make a shallow copy of orig_obj in a buffer (maintained
|
||||
// by copy_impl in a subclass of DynamicArchiveBuilder).
|
||||
_builder->copy(orig_obj, read_only);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Relocate all embedded pointer fields within a MetaspaceObj's shallow copy
|
||||
class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
public:
|
||||
ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler)
|
||||
: _builder(shuffler) {}
|
||||
|
||||
// This method gets called on each *original* object reachable
|
||||
// from _builder->iterate_roots(). Each orig_obj is
|
||||
// called exactly once.
|
||||
virtual bool do_unique_ref(Ref* orig_ref, bool read_only) {
|
||||
FollowMode mode = _builder->follow_ref(orig_ref);
|
||||
|
||||
if (mode == point_to_it) {
|
||||
// We did not make a copy of this object
|
||||
// and we have nothing to update
|
||||
assert(_builder->get_new_loc(orig_ref) == NULL ||
|
||||
_builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mode == set_to_null) {
|
||||
// We did not make a copy of this object
|
||||
// and we have nothing to update
|
||||
assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to");
|
||||
return false;
|
||||
}
|
||||
|
||||
// - orig_obj points to the original object.
|
||||
// - new_obj points to the shallow copy (created by ShallowCopier)
|
||||
// of orig_obj. new_obj is NULL if the orig_obj is excluded
|
||||
address orig_obj = orig_ref->obj();
|
||||
address new_obj = _builder->get_new_loc(orig_ref);
|
||||
|
||||
assert(new_obj != orig_obj, "must be");
|
||||
#ifdef ASSERT
|
||||
if (new_obj == NULL) {
|
||||
if (orig_ref->msotype() == MetaspaceObj::ClassType) {
|
||||
Klass* k = (Klass*)orig_obj;
|
||||
assert(k->is_instance_klass() &&
|
||||
SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)),
|
||||
"orig_obj must be excluded Class");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s", p2i(new_obj),
|
||||
MetaspaceObj::type_name(orig_ref->msotype()));
|
||||
if (new_obj != NULL) {
|
||||
EmbeddedRefUpdater updater(_builder, orig_obj, new_obj);
|
||||
orig_ref->metaspace_pointers_do(&updater);
|
||||
}
|
||||
|
||||
return true; // keep recursing until every object is visited exactly once.
|
||||
}
|
||||
|
||||
virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
|
||||
assert(type == _method_entry_ref, "only special type allowed for now");
|
||||
address obj = ref->obj();
|
||||
address new_obj = _builder->get_new_loc(ref);
|
||||
size_t offset = pointer_delta(p, obj, sizeof(u1));
|
||||
intptr_t* new_p = (intptr_t*)(new_obj + offset);
|
||||
assert(*p == *new_p, "must be a copy");
|
||||
ArchivePtrMarker::mark_pointer((address*)new_p);
|
||||
}
|
||||
};
|
||||
|
||||
class EmbeddedRefUpdater: public MetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
address _orig_obj;
|
||||
address _new_obj;
|
||||
public:
|
||||
EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) :
|
||||
_builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {}
|
||||
|
||||
// This method gets called once for each pointer field F of orig_obj.
|
||||
// We update new_obj->F to point to the new location of orig_obj->F.
|
||||
//
|
||||
// Example: Klass* 0x100 is copied to 0x400
|
||||
// Symbol* 0x200 is copied to 0x500
|
||||
//
|
||||
// Let orig_obj == 0x100; and
|
||||
// new_obj == 0x400; and
|
||||
// ((Klass*)orig_obj)->_name == 0x200;
|
||||
// Then this function effectively assigns
|
||||
// ((Klass*)new_obj)->_name = 0x500;
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
address new_pointee = NULL;
|
||||
|
||||
if (ref->not_null()) {
|
||||
address old_pointee = ref->obj();
|
||||
|
||||
FollowMode mode = _builder->follow_ref(ref);
|
||||
if (mode == point_to_it) {
|
||||
new_pointee = old_pointee;
|
||||
} else if (mode == set_to_null) {
|
||||
new_pointee = NULL;
|
||||
} else {
|
||||
new_pointee = _builder->get_new_loc(old_pointee);
|
||||
}
|
||||
}
|
||||
|
||||
const char* kind = MetaspaceObj::type_name(ref->msotype());
|
||||
// offset of this field inside the original object
|
||||
intx offset = (address)ref->addr() - _orig_obj;
|
||||
_builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset);
|
||||
|
||||
// We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods
|
||||
// may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass
|
||||
// that would contain pointers. Therefore, we must mark the pointers after
|
||||
// sort_methods(), using PointerMarker.
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
|
||||
class ExternalRefUpdater: public MetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
|
||||
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
// ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer
|
||||
if (ref->not_null()) {
|
||||
address new_loc = _builder->get_new_loc(ref);
|
||||
const char* kind = MetaspaceObj::type_name(ref->msotype());
|
||||
_builder->update_pointer(ref->addr(), new_loc, kind, 0);
|
||||
_builder->mark_pointer(ref->addr());
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
|
||||
class PointerMarker: public UniqueMetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
|
||||
|
||||
virtual bool do_unique_ref(Ref* ref, bool read_only) {
|
||||
if (_builder->is_in_buffer_space(ref->obj())) {
|
||||
EmbeddedRefMarker ref_marker(_builder);
|
||||
ref->metaspace_pointers_do(&ref_marker);
|
||||
return true; // keep recursing until every buffered object is visited exactly once.
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class EmbeddedRefMarker: public MetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
|
||||
public:
|
||||
EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (ref->not_null()) {
|
||||
_builder->mark_pointer(ref->addr());
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
|
||||
void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) {
|
||||
// Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj()
|
||||
if (is_mso_pointer) {
|
||||
const uintx FLAG_MASK = 0x03;
|
||||
uintx mask_bits = uintx(*addr) & FLAG_MASK;
|
||||
value = (address)(uintx(value) | mask_bits);
|
||||
}
|
||||
|
||||
if (*addr != value) {
|
||||
log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT,
|
||||
kind, int(offset), p2i(addr), p2i(*addr), p2i(value));
|
||||
*addr = value;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
GrowableArray<Symbol*>* _symbols; // symbols to dump
|
||||
GrowableArray<InstanceKlass*>* _klasses; // klasses to dump
|
||||
|
||||
void append(InstanceKlass* k) { _klasses->append(k); }
|
||||
void append(Symbol* s) { _symbols->append(s); }
|
||||
|
||||
class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
bool _read_only;
|
||||
|
||||
public:
|
||||
GatherKlassesAndSymbols(DynamicArchiveBuilder* builder)
|
||||
: _builder(builder) {}
|
||||
|
||||
virtual bool do_unique_ref(Ref* ref, bool read_only) {
|
||||
if (_builder->follow_ref(ref) != make_a_copy) {
|
||||
return false;
|
||||
}
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (klass->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
assert(!SystemDictionaryShared::is_excluded_class(ik), "must be");
|
||||
_builder->append(ik);
|
||||
_builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
|
||||
}
|
||||
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
|
||||
_builder->append((Symbol*)ref->obj());
|
||||
}
|
||||
|
||||
int bytes = ref->size() * BytesPerWord;
|
||||
_builder->_estimated_metsapceobj_bytes += bytes;
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
FollowMode follow_ref(MetaspaceClosure::Ref *ref) {
|
||||
address obj = ref->obj();
|
||||
if (MetaspaceShared::is_in_shared_metaspace(obj)) {
|
||||
// Don't dump existing shared metadata again.
|
||||
return point_to_it;
|
||||
} else if (ref->msotype() == MetaspaceObj::MethodDataType) {
|
||||
return set_to_null;
|
||||
} else {
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (klass->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
if (SystemDictionaryShared::is_excluded_class(ik)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
} else if (klass->is_array_klass()) {
|
||||
// Don't support archiving of array klasses for now.
|
||||
ResourceMark rm;
|
||||
log_debug(cds, dynamic)("Skipping class (array): %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
}
|
||||
|
||||
return make_a_copy;
|
||||
}
|
||||
}
|
||||
|
||||
address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) {
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
// Save a pointer immediate in front of an InstanceKlass, so
|
||||
// we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
|
||||
// without building another hashtable. See RunTimeSharedClassInfo::get_for()
|
||||
// in systemDictionaryShared.cpp.
|
||||
address obj = ref->obj();
|
||||
Klass* klass = (Klass*)obj;
|
||||
if (klass->is_instance_klass()) {
|
||||
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
|
||||
current_dump_space()->allocate(sizeof(address), BytesPerWord);
|
||||
}
|
||||
}
|
||||
address p = (address)current_dump_space()->allocate(bytes);
|
||||
address obj = ref->obj();
|
||||
log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s",
|
||||
p2i(obj), p2i(p), bytes,
|
||||
MetaspaceObj::type_name(ref->msotype()));
|
||||
memcpy(p, obj, bytes);
|
||||
intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), p);
|
||||
if (archived_vtable != NULL) {
|
||||
update_pointer((address*)p, (address)archived_vtable, "vtb", 0, /*is_mso_pointer*/false);
|
||||
mark_pointer((address*)p);
|
||||
}
|
||||
|
||||
return (address)p;
|
||||
}
|
||||
|
||||
DynamicArchiveHeader *_header;
|
||||
address _alloc_bottom;
|
||||
address _last_verified_top;
|
||||
size_t _other_region_used_bytes;
|
||||
|
||||
// Conservative estimate for number of bytes needed for:
|
||||
size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's.
|
||||
size_t _estimated_hashtable_bytes; // symbol table and dictionaries
|
||||
size_t _estimated_trampoline_bytes; // method entry trampolines
|
||||
|
||||
@ -498,7 +130,7 @@ private:
|
||||
void make_trampolines();
|
||||
void make_klasses_shareable();
|
||||
void sort_methods(InstanceKlass* ik) const;
|
||||
void set_symbols_permanent();
|
||||
void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
|
||||
void relocate_buffer_to_target();
|
||||
void write_archive(char* serialized_data);
|
||||
|
||||
@ -520,11 +152,7 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
DynamicArchiveBuilder() {
|
||||
_klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, mtClass);
|
||||
_symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, mtClass);
|
||||
|
||||
_estimated_metsapceobj_bytes = 0;
|
||||
DynamicArchiveBuilder() : ArchiveBuilder(NULL, NULL) {
|
||||
_estimated_hashtable_bytes = 0;
|
||||
_estimated_trampoline_bytes = 0;
|
||||
|
||||
@ -572,19 +200,11 @@ public:
|
||||
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
|
||||
SystemDictionaryShared::check_excluded_classes();
|
||||
|
||||
{
|
||||
ResourceMark rm;
|
||||
GatherKlassesAndSymbols gatherer(this);
|
||||
|
||||
SystemDictionaryShared::dumptime_classes_do(&gatherer);
|
||||
SymbolTable::metaspace_pointers_do(&gatherer);
|
||||
FileMapInfo::metaspace_pointers_do(&gatherer);
|
||||
|
||||
gatherer.finish();
|
||||
}
|
||||
gather_klasses_and_symbols();
|
||||
|
||||
// rw space starts ...
|
||||
address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
|
||||
set_dump_regions(MetaspaceShared::read_write_dump_space(), MetaspaceShared::read_only_dump_space());
|
||||
init_header(reserved_bottom);
|
||||
|
||||
CHeapBitMap ptrmap;
|
||||
@ -593,54 +213,26 @@ public:
|
||||
reserve_buffers_for_trampolines();
|
||||
verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
|
||||
|
||||
gather_source_objs();
|
||||
start_dump_space(MetaspaceShared::read_write_dump_space());
|
||||
|
||||
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
|
||||
_klasses->length(), _symbols->length());
|
||||
klasses()->length(), symbols()->length());
|
||||
|
||||
{
|
||||
assert(current_dump_space() == MetaspaceShared::read_write_dump_space(),
|
||||
"Current dump space is not rw space");
|
||||
// shallow-copy RW objects, if necessary
|
||||
ResourceMark rm;
|
||||
ShallowCopier rw_copier(this, false);
|
||||
iterate_roots(&rw_copier);
|
||||
}
|
||||
dump_rw_region();
|
||||
|
||||
// ro space starts ...
|
||||
DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
|
||||
{
|
||||
start_dump_space(ro_space);
|
||||
|
||||
// shallow-copy RO objects, if necessary
|
||||
ResourceMark rm;
|
||||
ShallowCopier ro_copier(this, true);
|
||||
iterate_roots(&ro_copier);
|
||||
}
|
||||
|
||||
{
|
||||
log_info(cds)("Relocating embedded pointers ... ");
|
||||
ResourceMark rm;
|
||||
ShallowCopyEmbeddedRefRelocator emb_reloc(this);
|
||||
iterate_roots(&emb_reloc);
|
||||
}
|
||||
|
||||
{
|
||||
log_info(cds)("Relocating external roots ... ");
|
||||
ResourceMark rm;
|
||||
ExternalRefUpdater ext_reloc(this);
|
||||
iterate_roots(&ext_reloc);
|
||||
}
|
||||
start_dump_space(ro_space);
|
||||
dump_ro_region();
|
||||
relocate_pointers();
|
||||
|
||||
verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs");
|
||||
|
||||
char* serialized_data;
|
||||
{
|
||||
set_symbols_permanent();
|
||||
|
||||
// Write the symbol table and system dictionaries to the RO space.
|
||||
// Note that these tables still point to the *original* objects
|
||||
// (because they were not processed by ExternalRefUpdater), so
|
||||
// Note that these tables still point to the *original* objects, so
|
||||
// they would need to call DynamicArchive::original_to_target() to
|
||||
// get the correct addresses.
|
||||
assert(current_dump_space() == ro_space, "Must be RO space");
|
||||
@ -656,20 +248,15 @@ public:
|
||||
verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
|
||||
|
||||
make_trampolines();
|
||||
|
||||
log_info(cds)("Make classes shareable");
|
||||
make_klasses_shareable();
|
||||
|
||||
{
|
||||
log_info(cds)("Adjust lambda proxy class dictionary");
|
||||
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
|
||||
}
|
||||
log_info(cds)("Adjust lambda proxy class dictionary");
|
||||
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
|
||||
|
||||
{
|
||||
log_info(cds)("Final relocation of pointers ... ");
|
||||
ResourceMark rm;
|
||||
PointerMarker marker(this);
|
||||
iterate_roots(&marker);
|
||||
relocate_buffer_to_target();
|
||||
}
|
||||
log_info(cds)("Final relocation of pointers ... ");
|
||||
relocate_buffer_to_target();
|
||||
|
||||
write_archive(serialized_data);
|
||||
release_header();
|
||||
@ -678,34 +265,17 @@ public:
|
||||
verify_universe("After CDS dynamic dump");
|
||||
}
|
||||
|
||||
void iterate_roots(MetaspaceClosure* it) {
|
||||
int i;
|
||||
int num_klasses = _klasses->length();
|
||||
for (i = 0; i < num_klasses; i++) {
|
||||
it->push(&_klasses->at(i));
|
||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||
if (!is_relocating_pointers) {
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
SymbolTable::metaspace_pointers_do(it);
|
||||
}
|
||||
|
||||
int num_symbols = _symbols->length();
|
||||
for (i = 0; i < num_symbols; i++) {
|
||||
it->push(&_symbols->at(i));
|
||||
}
|
||||
|
||||
FileMapInfo::metaspace_pointers_do(it);
|
||||
|
||||
// Do not call these again, as we have already collected all the classes and symbols
|
||||
// that we want to archive. Also, these calls would corrupt the tables when
|
||||
// ExternalRefUpdater is used.
|
||||
//
|
||||
// SystemDictionaryShared::dumptime_classes_do(it);
|
||||
// SymbolTable::metaspace_pointers_do(it);
|
||||
|
||||
it->finish();
|
||||
}
|
||||
};
|
||||
|
||||
intx DynamicArchiveBuilder::_buffer_to_target_delta;
|
||||
|
||||
|
||||
size_t DynamicArchiveBuilder::estimate_archive_size() {
|
||||
// size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
|
||||
_estimated_hashtable_bytes = 0;
|
||||
@ -795,10 +365,12 @@ size_t DynamicArchiveBuilder::estimate_trampoline_size() {
|
||||
align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
|
||||
align_up(sizeof(AdapterHandlerEntry*), BytesPerWord);
|
||||
|
||||
for (int i = 0; i < _klasses->length(); i++) {
|
||||
InstanceKlass* ik = _klasses->at(i);
|
||||
Array<Method*>* methods = ik->methods();
|
||||
total += each_method_bytes * methods->length();
|
||||
for (int i = 0; i < klasses()->length(); i++) {
|
||||
Klass* k = klasses()->at(i);
|
||||
if (k->is_instance_klass()) {
|
||||
Array<Method*>* methods = InstanceKlass::cast(k)->methods();
|
||||
total += each_method_bytes * methods->length();
|
||||
}
|
||||
}
|
||||
if (total == 0) {
|
||||
// We have nothing to archive, but let's avoid having an empty region.
|
||||
@ -810,8 +382,12 @@ size_t DynamicArchiveBuilder::estimate_trampoline_size() {
|
||||
void DynamicArchiveBuilder::make_trampolines() {
|
||||
DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
|
||||
char* p = mc_space->base();
|
||||
for (int i = 0; i < _klasses->length(); i++) {
|
||||
InstanceKlass* ik = _klasses->at(i);
|
||||
for (int i = 0; i < klasses()->length(); i++) {
|
||||
Klass* k = klasses()->at(i);
|
||||
if (!k->is_instance_klass()) {
|
||||
continue;
|
||||
}
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
Array<Method*>* methods = ik->methods();
|
||||
for (int j = 0; j < methods->length(); j++) {
|
||||
Method* m = methods->at(j);
|
||||
@ -832,16 +408,22 @@ void DynamicArchiveBuilder::make_trampolines() {
|
||||
}
|
||||
|
||||
void DynamicArchiveBuilder::make_klasses_shareable() {
|
||||
int i, count = _klasses->length();
|
||||
int i, count = klasses()->length();
|
||||
|
||||
InstanceKlass::disable_method_binary_search();
|
||||
for (i = 0; i < count; i++) {
|
||||
InstanceKlass* ik = _klasses->at(i);
|
||||
sort_methods(ik);
|
||||
Klass* k = klasses()->at(i);
|
||||
if (k->is_instance_klass()) {
|
||||
sort_methods(InstanceKlass::cast(k));
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
InstanceKlass* ik = _klasses->at(i);
|
||||
Klass* k = klasses()->at(i);
|
||||
if (!k->is_instance_klass()) {
|
||||
continue;
|
||||
}
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
ik->assign_class_loader_type();
|
||||
|
||||
MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
|
||||
@ -876,6 +458,11 @@ void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
|
||||
log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name());
|
||||
}
|
||||
|
||||
// Method sorting may re-layout the [iv]tables, which would change the offset(s)
|
||||
// of the locations in an InstanceKlass that would contain pointers. Let's clear
|
||||
// all the existing pointer marking bits, and re-mark the pointers after sorting.
|
||||
remark_pointers_for_instance_klass(ik, false);
|
||||
|
||||
// Make sure all supertypes have been sorted
|
||||
sort_methods(ik->java_super());
|
||||
Array<InstanceKlass*>* interfaces = ik->local_interfaces();
|
||||
@ -906,18 +493,33 @@ void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
|
||||
}
|
||||
ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
|
||||
ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
|
||||
|
||||
// Set all the pointer marking bits after sorting.
|
||||
remark_pointers_for_instance_klass(ik, true);
|
||||
}
|
||||
|
||||
void DynamicArchiveBuilder::set_symbols_permanent() {
|
||||
int count = _symbols->length();
|
||||
for (int i=0; i<count; i++) {
|
||||
Symbol* s = _symbols->at(i);
|
||||
s->set_permanent();
|
||||
|
||||
if (log_is_enabled(Trace, cds, dynamic)) {
|
||||
ResourceMark rm;
|
||||
log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(s)), s->as_quoted_ascii());
|
||||
template<bool should_mark>
|
||||
class PointerRemarker: public MetaspaceClosure {
|
||||
public:
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (should_mark) {
|
||||
ArchivePtrMarker::mark_pointer(ref->addr());
|
||||
} else {
|
||||
ArchivePtrMarker::clear_pointer(ref->addr());
|
||||
}
|
||||
return false; // don't recurse
|
||||
}
|
||||
};
|
||||
|
||||
void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const {
|
||||
if (should_mark) {
|
||||
PointerRemarker<true> marker;
|
||||
k->metaspace_pointers_do(&marker);
|
||||
marker.finish();
|
||||
} else {
|
||||
PointerRemarker<false> marker;
|
||||
k->metaspace_pointers_do(&marker);
|
||||
marker.finish();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1003,8 +605,8 @@ void DynamicArchiveBuilder::relocate_buffer_to_target() {
|
||||
}
|
||||
|
||||
void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
||||
int num_klasses = _klasses->length();
|
||||
int num_symbols = _symbols->length();
|
||||
int num_klasses = klasses()->length();
|
||||
int num_symbols = symbols()->length();
|
||||
|
||||
_header->set_serialized_data(to_target(serialized_data));
|
||||
|
||||
@ -1032,7 +634,6 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
||||
log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
|
||||
}
|
||||
|
||||
|
||||
class VM_PopulateDynamicDumpSharedSpace: public VM_Operation {
|
||||
DynamicArchiveBuilder* _builder;
|
||||
public:
|
||||
@ -1070,7 +671,7 @@ void DynamicArchive::dump() {
|
||||
|
||||
address DynamicArchive::original_to_buffer_impl(address orig_obj) {
|
||||
assert(DynamicDumpSharedSpaces, "must be");
|
||||
address buff_obj = _builder->get_new_loc(orig_obj);
|
||||
address buff_obj = _builder->get_dumped_addr(orig_obj);
|
||||
assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
|
||||
assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced");
|
||||
assert(_builder->is_in_buffer_space(buff_obj), "must be");
|
||||
@ -1089,7 +690,7 @@ address DynamicArchive::original_to_target_impl(address orig_obj) {
|
||||
// This happens when the top archive points to a Symbol* in the base archive.
|
||||
return orig_obj;
|
||||
}
|
||||
address buff_obj = _builder->get_new_loc(orig_obj);
|
||||
address buff_obj = _builder->get_dumped_addr(orig_obj);
|
||||
assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
|
||||
if (buff_obj == orig_obj) {
|
||||
// We are storing a pointer to an original object into the dynamic buffer. E.g.,
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/reflectionAccessorImplKlassHelper.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -237,6 +238,41 @@ size_t KlassInfoTable::size_of_instances_in_words() const {
|
||||
return _size_of_instances_in_words;
|
||||
}
|
||||
|
||||
// Return false if the entry could not be recorded on account
|
||||
// of running out of space required to create a new entry.
|
||||
bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
|
||||
Klass* k = cie->klass();
|
||||
KlassInfoEntry* elt = lookup(k);
|
||||
// elt may be NULL if it's a new klass for which we
|
||||
// could not allocate space for a new entry in the hashtable.
|
||||
if (elt != NULL) {
|
||||
elt->set_count(elt->count() + cie->count());
|
||||
elt->set_words(elt->words() + cie->words());
|
||||
_size_of_instances_in_words += cie->words();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
class KlassInfoTableMergeClosure : public KlassInfoClosure {
|
||||
private:
|
||||
KlassInfoTable* _dest;
|
||||
bool _success;
|
||||
public:
|
||||
KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
|
||||
void do_cinfo(KlassInfoEntry* cie) {
|
||||
_success &= _dest->merge_entry(cie);
|
||||
}
|
||||
bool success() { return _success; }
|
||||
};
|
||||
|
||||
// merge from table
|
||||
bool KlassInfoTable::merge(KlassInfoTable* table) {
|
||||
KlassInfoTableMergeClosure closure(this);
|
||||
table->iterate(&closure);
|
||||
return closure.success();
|
||||
}
|
||||
|
||||
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
|
||||
return (*e1)->compare(*e1,*e2);
|
||||
}
|
||||
@ -482,7 +518,7 @@ class HistoClosure : public KlassInfoClosure {
|
||||
class RecordInstanceClosure : public ObjectClosure {
|
||||
private:
|
||||
KlassInfoTable* _cit;
|
||||
size_t _missed_count;
|
||||
uintx _missed_count;
|
||||
BoolObjectClosure* _filter;
|
||||
public:
|
||||
RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
|
||||
@ -496,7 +532,7 @@ class RecordInstanceClosure : public ObjectClosure {
|
||||
}
|
||||
}
|
||||
|
||||
size_t missed_count() { return _missed_count; }
|
||||
uintx missed_count() { return _missed_count; }
|
||||
|
||||
private:
|
||||
bool should_visit(oop obj) {
|
||||
@ -504,23 +540,68 @@ class RecordInstanceClosure : public ObjectClosure {
|
||||
}
|
||||
};
|
||||
|
||||
size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
|
||||
ResourceMark rm;
|
||||
// Heap inspection for every worker.
|
||||
// When native OOM happens for KlassInfoTable, set _success to false.
|
||||
void ParHeapInspectTask::work(uint worker_id) {
|
||||
uintx missed_count = 0;
|
||||
bool merge_success = true;
|
||||
if (!Atomic::load(&_success)) {
|
||||
// other worker has failed on parallel iteration.
|
||||
return;
|
||||
}
|
||||
|
||||
KlassInfoTable cit(false);
|
||||
if (cit.allocation_failed()) {
|
||||
// fail to allocate memory, stop parallel mode
|
||||
Atomic::store(&_success, false);
|
||||
return;
|
||||
}
|
||||
RecordInstanceClosure ric(&cit, _filter);
|
||||
_poi->object_iterate(&ric, worker_id);
|
||||
missed_count = ric.missed_count();
|
||||
{
|
||||
MutexLocker x(&_mutex);
|
||||
merge_success = _shared_cit->merge(&cit);
|
||||
}
|
||||
if (merge_success) {
|
||||
Atomic::add(&_missed_count, missed_count);
|
||||
} else {
|
||||
Atomic::store(&_success, false);
|
||||
}
|
||||
}
|
||||
|
||||
uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
|
||||
|
||||
// Try parallel first.
|
||||
if (parallel_thread_num > 1) {
|
||||
ResourceMark rm;
|
||||
ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
|
||||
if (poi != NULL) {
|
||||
ParHeapInspectTask task(poi, cit, filter);
|
||||
Universe::heap()->run_task(&task);
|
||||
delete poi;
|
||||
if (task.success()) {
|
||||
return task.missed_count();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
// If no parallel iteration available, run serially.
|
||||
RecordInstanceClosure ric(cit, filter);
|
||||
Universe::heap()->object_iterate(&ric);
|
||||
return ric.missed_count();
|
||||
}
|
||||
|
||||
void HeapInspection::heap_inspection(outputStream* st) {
|
||||
void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
|
||||
ResourceMark rm;
|
||||
|
||||
KlassInfoTable cit(false);
|
||||
if (!cit.allocation_failed()) {
|
||||
// populate table with object allocation info
|
||||
size_t missed_count = populate_table(&cit);
|
||||
uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
|
||||
if (missed_count != 0) {
|
||||
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
|
||||
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
|
||||
" total instances in data below",
|
||||
missed_count);
|
||||
}
|
||||
|
@ -30,6 +30,9 @@
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/annotations.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
|
||||
class ParallelObjectIterator;
|
||||
|
||||
#if INCLUDE_SERVICES
|
||||
|
||||
@ -122,6 +125,8 @@ class KlassInfoTable: public StackObj {
|
||||
void iterate(KlassInfoClosure* cic);
|
||||
bool allocation_failed() { return _buckets == NULL; }
|
||||
size_t size_of_instances_in_words() const;
|
||||
bool merge(KlassInfoTable* table);
|
||||
bool merge_entry(const KlassInfoEntry* cie);
|
||||
|
||||
friend class KlassInfoHisto;
|
||||
friend class KlassHierarchy;
|
||||
@ -211,11 +216,46 @@ class KlassInfoClosure;
|
||||
|
||||
class HeapInspection : public StackObj {
|
||||
public:
|
||||
void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
|
||||
size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0);
|
||||
void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN;
|
||||
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
|
||||
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
|
||||
private:
|
||||
void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
|
||||
};
|
||||
|
||||
// Parallel heap inspection task. Parallel inspection can fail due to
|
||||
// a native OOM when allocating memory for TL-KlassInfoTable.
|
||||
// _success will be set false on an OOM, and serial inspection tried.
|
||||
class ParHeapInspectTask : public AbstractGangTask {
|
||||
private:
|
||||
ParallelObjectIterator* _poi;
|
||||
KlassInfoTable* _shared_cit;
|
||||
BoolObjectClosure* _filter;
|
||||
uintx _missed_count;
|
||||
bool _success;
|
||||
Mutex _mutex;
|
||||
|
||||
public:
|
||||
ParHeapInspectTask(ParallelObjectIterator* poi,
|
||||
KlassInfoTable* shared_cit,
|
||||
BoolObjectClosure* filter) :
|
||||
AbstractGangTask("Iterating heap"),
|
||||
_poi(poi),
|
||||
_shared_cit(shared_cit),
|
||||
_filter(filter),
|
||||
_missed_count(0),
|
||||
_success(true),
|
||||
_mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {}
|
||||
|
||||
uintx missed_count() const {
|
||||
return _missed_count;
|
||||
}
|
||||
|
||||
bool success() {
|
||||
return _success;
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id);
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_HEAPINSPECTION_HPP
|
||||
|
@ -136,17 +136,23 @@ oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Pre-compute object identity hash at CDS dump time.
|
||||
obj->identity_hash();
|
||||
|
||||
oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
|
||||
if (archived_oop != NULL) {
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
||||
MetaspaceShared::relocate_klass_ptr(archived_oop);
|
||||
// Clear age -- it might have been set if a GC happened during -Xshare:dump
|
||||
markWord mark = archived_oop->mark_raw();
|
||||
mark = mark.set_age(0);
|
||||
archived_oop->set_mark_raw(mark);
|
||||
// Reinitialize markword to remove age/marking/locking/etc.
|
||||
//
|
||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||
// in the shared heap. This also has the side effect of pre-initializing the
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
int hash_original = obj->identity_hash();
|
||||
archived_oop->set_mark_raw(markWord::prototype().copy_set_hash(hash_original));
|
||||
assert(archived_oop->mark().is_unlocked(), "sanity");
|
||||
|
||||
DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
|
||||
assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
|
||||
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
cache->put(obj, archived_oop);
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,8 +37,11 @@ void MetaspaceClosure::Ref::update(address new_loc) const {
|
||||
void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref) {
|
||||
if (_nest_level < MAX_NEST_LEVEL) {
|
||||
do_push(ref);
|
||||
delete ref;
|
||||
if (!ref->keep_after_pushing()) {
|
||||
delete ref;
|
||||
}
|
||||
} else {
|
||||
do_pending_ref(ref);
|
||||
ref->set_next(_pending_refs);
|
||||
_pending_refs = ref;
|
||||
}
|
||||
@ -59,9 +62,15 @@ void MetaspaceClosure::do_push(MetaspaceClosure::Ref* ref) {
|
||||
assert(w == _default, "must be");
|
||||
read_only = ref->is_read_only_by_default();
|
||||
}
|
||||
if (_nest_level == 0) {
|
||||
assert(_enclosing_ref == NULL, "must be");
|
||||
}
|
||||
_nest_level ++;
|
||||
if (do_ref(ref, read_only)) { // true means we want to iterate the embedded pointer in <ref>
|
||||
Ref* saved = _enclosing_ref;
|
||||
_enclosing_ref = ref;
|
||||
ref->metaspace_pointers_do(this);
|
||||
_enclosing_ref = saved;
|
||||
}
|
||||
_nest_level --;
|
||||
}
|
||||
@ -73,7 +82,9 @@ void MetaspaceClosure::finish() {
|
||||
Ref* ref = _pending_refs;
|
||||
_pending_refs = _pending_refs->next();
|
||||
do_push(ref);
|
||||
delete ref;
|
||||
if (!ref->keep_after_pushing()) {
|
||||
delete ref;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,8 +60,7 @@
|
||||
// root references (such as all Klass'es in the SystemDictionary).
|
||||
//
|
||||
// Currently it is used for compacting the CDS archive by eliminate temporary
|
||||
// objects allocated during archive creation time. See ArchiveCompactor in
|
||||
// metaspaceShared.cpp for an example.
|
||||
// objects allocated during archive creation time. See ArchiveBuilder for an example.
|
||||
//
|
||||
// To support MetaspaceClosure, each subclass of MetaspaceObj must provide
|
||||
// a method of the type void metaspace_pointers_do(MetaspaceClosure*). This method
|
||||
@ -108,12 +107,14 @@ public:
|
||||
// [2] All Array<T> dimensions are statically declared.
|
||||
class Ref : public CHeapObj<mtInternal> {
|
||||
Writability _writability;
|
||||
bool _keep_after_pushing;
|
||||
Ref* _next;
|
||||
void* _user_data;
|
||||
NONCOPYABLE(Ref);
|
||||
|
||||
protected:
|
||||
virtual void** mpp() const = 0;
|
||||
Ref(Writability w) : _writability(w), _next(NULL) {}
|
||||
Ref(Writability w) : _writability(w), _keep_after_pushing(false), _next(NULL), _user_data(NULL) {}
|
||||
public:
|
||||
virtual bool not_null() const = 0;
|
||||
virtual int size() const = 0;
|
||||
@ -137,6 +138,10 @@ public:
|
||||
void update(address new_loc) const;
|
||||
|
||||
Writability writability() const { return _writability; };
|
||||
void set_keep_after_pushing() { _keep_after_pushing = true; }
|
||||
bool keep_after_pushing() { return _keep_after_pushing; }
|
||||
void set_user_data(void* data) { _user_data = data; }
|
||||
void* user_data() { return _user_data; }
|
||||
void set_next(Ref* n) { _next = n; }
|
||||
Ref* next() const { return _next; }
|
||||
|
||||
@ -243,21 +248,43 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
// If recursion is too deep, save the Refs in _pending_refs, and push them later using
|
||||
// MetaspaceClosure::finish()
|
||||
// Normally, chains of references like a->b->c->d are iterated recursively. However,
|
||||
// if recursion is too deep, we save the Refs in _pending_refs, and push them later in
|
||||
// MetaspaceClosure::finish(). This avoids overflowing the C stack.
|
||||
static const int MAX_NEST_LEVEL = 5;
|
||||
Ref* _pending_refs;
|
||||
int _nest_level;
|
||||
Ref* _enclosing_ref;
|
||||
|
||||
void push_impl(Ref* ref);
|
||||
void do_push(Ref* ref);
|
||||
|
||||
public:
|
||||
MetaspaceClosure(): _pending_refs(NULL), _nest_level(0) {}
|
||||
MetaspaceClosure(): _pending_refs(NULL), _nest_level(0), _enclosing_ref(NULL) {}
|
||||
~MetaspaceClosure();
|
||||
|
||||
void finish();
|
||||
|
||||
// enclosing_ref() is used to compute the offset of a field in a C++ class. For example
|
||||
// class Foo { intx scala; Bar* ptr; }
|
||||
// Foo *f = 0x100;
|
||||
// when the f->ptr field is iterated with do_ref() on 64-bit platforms, we will have
|
||||
// do_ref(Ref* r) {
|
||||
// r->addr() == 0x108; // == &f->ptr;
|
||||
// enclosing_ref()->obj() == 0x100; // == foo
|
||||
// So we know that we are iterating upon a field at offset 8 of the object at 0x100.
|
||||
//
|
||||
// Note that if we have stack overflow, do_pending_ref(r) will be called first and
|
||||
// do_ref(r) will be called later, for the same r. In this case, enclosing_ref() is valid only
|
||||
// when do_pending_ref(r) is called, and will return NULL when do_ref(r) is called.
|
||||
Ref* enclosing_ref() const {
|
||||
return _enclosing_ref;
|
||||
}
|
||||
|
||||
// This is called when a reference is placed in _pending_refs. Override this
|
||||
// function if you're using enclosing_ref(). See notes above.
|
||||
virtual void do_pending_ref(Ref* ref) {}
|
||||
|
||||
// returns true if we want to keep iterating the pointers embedded inside <ref>
|
||||
virtual bool do_ref(Ref* ref, bool read_only) = 0;
|
||||
|
||||
@ -285,7 +312,11 @@ public:
|
||||
}
|
||||
|
||||
template <class T> void push_method_entry(T** mpp, intptr_t* p) {
|
||||
push_special(_method_entry_ref, new ObjectRef<T>(mpp, _default), (intptr_t*)p);
|
||||
Ref* ref = new ObjectRef<T>(mpp, _default);
|
||||
push_special(_method_entry_ref, ref, (intptr_t*)p);
|
||||
if (!ref->keep_after_pushing()) {
|
||||
delete ref;
|
||||
}
|
||||
}
|
||||
|
||||
// This is for tagging special pointers that are not a reference to MetaspaceObj. It's currently
|
||||
|
@ -23,11 +23,9 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jvm.h"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "classfile/classListParser.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/loaderConstraints.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/placeholders.hpp"
|
||||
@ -36,11 +34,11 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/shared/softRefPolicy.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "memory/archiveBuilder.hpp"
|
||||
#include "memory/archiveUtils.inline.hpp"
|
||||
#include "memory/dynamicArchive.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
@ -59,11 +57,9 @@
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "prims/jvmtiRedefineClasses.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
@ -110,8 +106,8 @@ bool MetaspaceShared::_use_optimized_module_handling = true;
|
||||
// temporarily allocated outside of the shared regions. Only the method entry
|
||||
// trampolines are written into the mc region.
|
||||
// [2] C++ vtables are copied into the mc region.
|
||||
// [3] ArchiveCompactor copies RW metadata into the rw region.
|
||||
// [4] ArchiveCompactor copies RO metadata into the ro region.
|
||||
// [3] ArchiveBuilder copies RW metadata into the rw region.
|
||||
// [4] ArchiveBuilder copies RO metadata into the ro region.
|
||||
// [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
|
||||
// are copied into the ro region as read-only tables.
|
||||
//
|
||||
@ -383,7 +379,7 @@ void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
|
||||
// + The upper 1 GB is used as the "temporary compressed class space"
|
||||
// -- preload_classes() will store Klasses into this space.
|
||||
// + The lower 3 GB is used for the archive -- when preload_classes()
|
||||
// is done, ArchiveCompactor will copy the class metadata into this
|
||||
// is done, ArchiveBuilder will copy the class metadata into this
|
||||
// space, first the RW parts, then the RO parts.
|
||||
|
||||
// Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
|
||||
@ -624,72 +620,10 @@ uintx MetaspaceShared::object_delta_uintx(void* obj) {
|
||||
// is run at a safepoint just before exit, this is the entire set of classes.
|
||||
static GrowableArray<Klass*>* _global_klass_objects;
|
||||
|
||||
static int global_klass_compare(Klass** a, Klass **b) {
|
||||
return a[0]->name()->fast_compare(b[0]->name());
|
||||
}
|
||||
|
||||
GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
|
||||
return _global_klass_objects;
|
||||
}
|
||||
|
||||
static void collect_array_classes(Klass* k) {
|
||||
_global_klass_objects->append_if_missing(k);
|
||||
if (k->is_array_klass()) {
|
||||
// Add in the array classes too
|
||||
ArrayKlass* ak = ArrayKlass::cast(k);
|
||||
Klass* h = ak->higher_dimension();
|
||||
if (h != NULL) {
|
||||
h->array_klasses_do(collect_array_classes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class CollectClassesClosure : public KlassClosure {
|
||||
void do_klass(Klass* k) {
|
||||
if (k->is_instance_klass() &&
|
||||
SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) {
|
||||
// Don't add to the _global_klass_objects
|
||||
} else {
|
||||
_global_klass_objects->append_if_missing(k);
|
||||
}
|
||||
if (k->is_array_klass()) {
|
||||
// Add in the array classes too
|
||||
ArrayKlass* ak = ArrayKlass::cast(k);
|
||||
Klass* h = ak->higher_dimension();
|
||||
if (h != NULL) {
|
||||
h->array_klasses_do(collect_array_classes);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Global object for holding symbols that created during class loading. See SymbolTable::new_symbol
|
||||
static GrowableArray<Symbol*>* _global_symbol_objects = NULL;
|
||||
|
||||
static int compare_symbols_by_address(Symbol** a, Symbol** b) {
|
||||
if (a[0] < b[0]) {
|
||||
return -1;
|
||||
} else if (a[0] == b[0]) {
|
||||
ResourceMark rm;
|
||||
log_warning(cds)("Duplicated symbol %s unexpected", (*a)->as_C_string());
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceShared::add_symbol(Symbol* sym) {
|
||||
MutexLocker ml(CDSAddSymbol_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_global_symbol_objects == NULL) {
|
||||
_global_symbol_objects = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Symbol*>(2048, mtSymbol);
|
||||
}
|
||||
_global_symbol_objects->append(sym);
|
||||
}
|
||||
|
||||
GrowableArray<Symbol*>* MetaspaceShared::collected_symbols() {
|
||||
return _global_symbol_objects;
|
||||
}
|
||||
|
||||
static void remove_unshareable_in_classes() {
|
||||
for (int i = 0; i < _global_klass_objects->length(); i++) {
|
||||
Klass* k = _global_klass_objects->at(i);
|
||||
@ -1071,148 +1005,6 @@ void WriteClosure::do_region(u_char* start, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
// This is for dumping detailed statistics for the allocations
|
||||
// in the shared spaces.
|
||||
class DumpAllocStats : public ResourceObj {
|
||||
public:
|
||||
|
||||
// Here's poor man's enum inheritance
|
||||
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
|
||||
METASPACE_OBJ_TYPES_DO(f) \
|
||||
f(SymbolHashentry) \
|
||||
f(SymbolBucket) \
|
||||
f(StringHashentry) \
|
||||
f(StringBucket) \
|
||||
f(Other)
|
||||
|
||||
enum Type {
|
||||
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
|
||||
_number_of_types
|
||||
};
|
||||
|
||||
static const char * type_name(Type type) {
|
||||
switch(type) {
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
enum { RO = 0, RW = 1 };
|
||||
|
||||
int _counts[2][_number_of_types];
|
||||
int _bytes [2][_number_of_types];
|
||||
|
||||
DumpAllocStats() {
|
||||
memset(_counts, 0, sizeof(_counts));
|
||||
memset(_bytes, 0, sizeof(_bytes));
|
||||
};
|
||||
|
||||
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
|
||||
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
|
||||
int which = (read_only) ? RO : RW;
|
||||
_counts[which][type] ++;
|
||||
_bytes [which][type] += byte_size;
|
||||
}
|
||||
|
||||
void record_other_type(int byte_size, bool read_only) {
|
||||
int which = (read_only) ? RO : RW;
|
||||
_bytes [which][OtherType] += byte_size;
|
||||
}
|
||||
void print_stats(int ro_all, int rw_all, int mc_all);
|
||||
};
|
||||
|
||||
void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
|
||||
// Calculate size of data that was not allocated by Metaspace::allocate()
|
||||
MetaspaceSharedStats *stats = MetaspaceShared::stats();
|
||||
|
||||
// symbols
|
||||
_counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
|
||||
_bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
|
||||
|
||||
_counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
|
||||
_bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
|
||||
|
||||
// strings
|
||||
_counts[RO][StringHashentryType] = stats->string.hashentry_count;
|
||||
_bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
|
||||
|
||||
_counts[RO][StringBucketType] = stats->string.bucket_count;
|
||||
_bytes [RO][StringBucketType] = stats->string.bucket_bytes;
|
||||
|
||||
// TODO: count things like dictionary, vtable, etc
|
||||
_bytes[RW][OtherType] += mc_all;
|
||||
rw_all += mc_all; // mc is mapped Read/Write
|
||||
|
||||
// prevent divide-by-zero
|
||||
if (ro_all < 1) {
|
||||
ro_all = 1;
|
||||
}
|
||||
if (rw_all < 1) {
|
||||
rw_all = 1;
|
||||
}
|
||||
|
||||
int all_ro_count = 0;
|
||||
int all_ro_bytes = 0;
|
||||
int all_rw_count = 0;
|
||||
int all_rw_bytes = 0;
|
||||
|
||||
// To make fmt_stats be a syntactic constant (for format warnings), use #define.
|
||||
#define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
|
||||
const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
|
||||
const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
|
||||
|
||||
LogMessage(cds) msg;
|
||||
|
||||
msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
|
||||
msg.debug("%s", hdr);
|
||||
msg.debug("%s", sep);
|
||||
for (int type = 0; type < int(_number_of_types); type ++) {
|
||||
const char *name = type_name((Type)type);
|
||||
int ro_count = _counts[RO][type];
|
||||
int ro_bytes = _bytes [RO][type];
|
||||
int rw_count = _counts[RW][type];
|
||||
int rw_bytes = _bytes [RW][type];
|
||||
int count = ro_count + rw_count;
|
||||
int bytes = ro_bytes + rw_bytes;
|
||||
|
||||
double ro_perc = percent_of(ro_bytes, ro_all);
|
||||
double rw_perc = percent_of(rw_bytes, rw_all);
|
||||
double perc = percent_of(bytes, ro_all + rw_all);
|
||||
|
||||
msg.debug(fmt_stats, name,
|
||||
ro_count, ro_bytes, ro_perc,
|
||||
rw_count, rw_bytes, rw_perc,
|
||||
count, bytes, perc);
|
||||
|
||||
all_ro_count += ro_count;
|
||||
all_ro_bytes += ro_bytes;
|
||||
all_rw_count += rw_count;
|
||||
all_rw_bytes += rw_bytes;
|
||||
}
|
||||
|
||||
int all_count = all_ro_count + all_rw_count;
|
||||
int all_bytes = all_ro_bytes + all_rw_bytes;
|
||||
|
||||
double all_ro_perc = percent_of(all_ro_bytes, ro_all);
|
||||
double all_rw_perc = percent_of(all_rw_bytes, rw_all);
|
||||
double all_perc = percent_of(all_bytes, ro_all + rw_all);
|
||||
|
||||
msg.debug("%s", sep);
|
||||
msg.debug(fmt_stats, "Total",
|
||||
all_ro_count, all_ro_bytes, all_ro_perc,
|
||||
all_rw_count, all_rw_bytes, all_rw_perc,
|
||||
all_count, all_bytes, all_perc);
|
||||
|
||||
assert(all_ro_bytes == ro_all, "everything should have been counted");
|
||||
assert(all_rw_bytes == rw_all, "everything should have been counted");
|
||||
|
||||
#undef fmt_stats
|
||||
}
|
||||
|
||||
// Populate the shared space.
|
||||
|
||||
class VM_PopulateDumpSharedSpace: public VM_Operation {
|
||||
@ -1229,7 +1021,6 @@ private:
|
||||
GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
|
||||
void dump_symbols();
|
||||
char* dump_read_only_tables();
|
||||
void print_class_stats();
|
||||
void print_region_stats(FileMapInfo* map_info);
|
||||
void print_bitmap_region_stats(size_t size, size_t total_size);
|
||||
void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
|
||||
@ -1243,278 +1034,20 @@ public:
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
}; // class VM_PopulateDumpSharedSpace
|
||||
|
||||
// ArchiveCompactor --
|
||||
//
|
||||
// This class is the central piece of shared archive compaction -- all metaspace data are
|
||||
// initially allocated outside of the shared regions. ArchiveCompactor copies the
|
||||
// metaspace data into their final location in the shared regions.
|
||||
|
||||
class ArchiveCompactor : AllStatic {
|
||||
static const int INITIAL_TABLE_SIZE = 8087;
|
||||
static const int MAX_TABLE_SIZE = 1000000;
|
||||
|
||||
static DumpAllocStats* _alloc_stats;
|
||||
|
||||
typedef KVHashtable<address, address, mtInternal> RelocationTable;
|
||||
static RelocationTable* _new_loc_table;
|
||||
|
||||
class StaticArchiveBuilder : public ArchiveBuilder {
|
||||
public:
|
||||
static void initialize() {
|
||||
_alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
|
||||
_new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE);
|
||||
}
|
||||
static DumpAllocStats* alloc_stats() {
|
||||
return _alloc_stats;
|
||||
}
|
||||
StaticArchiveBuilder(DumpRegion* rw_region, DumpRegion* ro_region)
|
||||
: ArchiveBuilder(rw_region, ro_region) {}
|
||||
|
||||
// Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
|
||||
// outside of ArchiveCompactor::allocate(). These are usually for misc tables
|
||||
// that are allocated in the RO space.
|
||||
class OtherROAllocMark {
|
||||
char* _oldtop;
|
||||
public:
|
||||
OtherROAllocMark() {
|
||||
_oldtop = _ro_region.top();
|
||||
}
|
||||
~OtherROAllocMark() {
|
||||
char* newtop = _ro_region.top();
|
||||
ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
|
||||
}
|
||||
};
|
||||
|
||||
static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
|
||||
address obj = ref->obj();
|
||||
int bytes = ref->size() * BytesPerWord;
|
||||
char* p;
|
||||
size_t alignment = BytesPerWord;
|
||||
char* oldtop;
|
||||
char* newtop;
|
||||
|
||||
if (read_only) {
|
||||
oldtop = _ro_region.top();
|
||||
p = _ro_region.allocate(bytes, alignment);
|
||||
newtop = _ro_region.top();
|
||||
} else {
|
||||
oldtop = _rw_region.top();
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
// Save a pointer immediate in front of an InstanceKlass, so
|
||||
// we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
|
||||
// without building another hashtable. See RunTimeSharedClassInfo::get_for()
|
||||
// in systemDictionaryShared.cpp.
|
||||
Klass* klass = (Klass*)obj;
|
||||
if (klass->is_instance_klass()) {
|
||||
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
|
||||
_rw_region.allocate(sizeof(address), BytesPerWord);
|
||||
}
|
||||
}
|
||||
p = _rw_region.allocate(bytes, alignment);
|
||||
newtop = _rw_region.top();
|
||||
}
|
||||
memcpy(p, obj, bytes);
|
||||
|
||||
intptr_t* archived_vtable = MetaspaceShared::get_archived_cpp_vtable(ref->msotype(), (address)p);
|
||||
if (archived_vtable != NULL) {
|
||||
*(address*)p = (address)archived_vtable;
|
||||
ArchivePtrMarker::mark_pointer((address*)p);
|
||||
}
|
||||
|
||||
assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once");
|
||||
_new_loc_table->add(obj, (address)p);
|
||||
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
|
||||
if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) {
|
||||
log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size());
|
||||
}
|
||||
_alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
|
||||
}
|
||||
|
||||
static address get_new_loc(MetaspaceClosure::Ref* ref) {
|
||||
address* pp = _new_loc_table->lookup(ref->obj());
|
||||
assert(pp != NULL, "must be");
|
||||
return *pp;
|
||||
}
|
||||
|
||||
private:
|
||||
// Makes a shallow copy of visited MetaspaceObj's
|
||||
class ShallowCopier: public UniqueMetaspaceClosure {
|
||||
bool _read_only;
|
||||
public:
|
||||
ShallowCopier(bool read_only) : _read_only(read_only) {}
|
||||
|
||||
virtual bool do_unique_ref(Ref* ref, bool read_only) {
|
||||
if (read_only == _read_only) {
|
||||
allocate(ref, read_only);
|
||||
}
|
||||
return true; // recurse into ref.obj()
|
||||
}
|
||||
};
|
||||
|
||||
// Relocate embedded pointers within a MetaspaceObj's shallow copy
|
||||
class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
|
||||
public:
|
||||
virtual bool do_unique_ref(Ref* ref, bool read_only) {
|
||||
address new_loc = get_new_loc(ref);
|
||||
RefRelocator refer;
|
||||
ref->metaspace_pointers_do_at(&refer, new_loc);
|
||||
return true; // recurse into ref.obj()
|
||||
}
|
||||
virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
|
||||
assert(type == _method_entry_ref, "only special type allowed for now");
|
||||
address obj = ref->obj();
|
||||
address new_obj = get_new_loc(ref);
|
||||
size_t offset = pointer_delta(p, obj, sizeof(u1));
|
||||
intptr_t* new_p = (intptr_t*)(new_obj + offset);
|
||||
assert(*p == *new_p, "must be a copy");
|
||||
ArchivePtrMarker::mark_pointer((address*)new_p);
|
||||
}
|
||||
};
|
||||
|
||||
// Relocate a reference to point to its shallow copy
|
||||
class RefRelocator: public MetaspaceClosure {
|
||||
public:
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (ref->not_null()) {
|
||||
ref->update(get_new_loc(ref));
|
||||
ArchivePtrMarker::mark_pointer(ref->addr());
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
class IsRefInArchiveChecker: public MetaspaceClosure {
|
||||
public:
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (ref->not_null()) {
|
||||
char* obj = (char*)ref->obj();
|
||||
assert(_ro_region.contains(obj) || _rw_region.contains(obj),
|
||||
"must be relocated to point to CDS archive");
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
public:
|
||||
static void copy_and_compact() {
|
||||
ResourceMark rm;
|
||||
|
||||
log_info(cds)("Scanning all metaspace objects ... ");
|
||||
{
|
||||
// allocate and shallow-copy RW objects, immediately following the MC region
|
||||
log_info(cds)("Allocating RW objects ... ");
|
||||
_mc_region.pack(&_rw_region);
|
||||
|
||||
ResourceMark rm;
|
||||
ShallowCopier rw_copier(false);
|
||||
iterate_roots(&rw_copier);
|
||||
}
|
||||
{
|
||||
// allocate and shallow-copy of RO object, immediately following the RW region
|
||||
log_info(cds)("Allocating RO objects ... ");
|
||||
_rw_region.pack(&_ro_region);
|
||||
|
||||
ResourceMark rm;
|
||||
ShallowCopier ro_copier(true);
|
||||
iterate_roots(&ro_copier);
|
||||
}
|
||||
{
|
||||
log_info(cds)("Relocating embedded pointers ... ");
|
||||
ResourceMark rm;
|
||||
ShallowCopyEmbeddedRefRelocator emb_reloc;
|
||||
iterate_roots(&emb_reloc);
|
||||
}
|
||||
{
|
||||
log_info(cds)("Relocating external roots ... ");
|
||||
ResourceMark rm;
|
||||
RefRelocator ext_reloc;
|
||||
iterate_roots(&ext_reloc);
|
||||
}
|
||||
{
|
||||
log_info(cds)("Fixing symbol identity hash ... ");
|
||||
os::init_random(0x12345678);
|
||||
GrowableArray<Symbol*>* all_symbols = MetaspaceShared::collected_symbols();
|
||||
all_symbols->sort(compare_symbols_by_address);
|
||||
for (int i = 0; i < all_symbols->length(); i++) {
|
||||
assert(all_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
|
||||
all_symbols->at(i)->update_identity_hash();
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
{
|
||||
log_info(cds)("Verifying external roots ... ");
|
||||
ResourceMark rm;
|
||||
IsRefInArchiveChecker checker;
|
||||
iterate_roots(&checker);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// We must relocate the System::_well_known_klasses only after we have copied the
|
||||
// java objects in during dump_java_heap_objects(): during the object copy, we operate on
|
||||
// old objects which assert that their klass is the original klass.
|
||||
static void relocate_well_known_klasses() {
|
||||
{
|
||||
log_info(cds)("Relocating SystemDictionary::_well_known_klasses[] ... ");
|
||||
ResourceMark rm;
|
||||
RefRelocator ext_reloc;
|
||||
SystemDictionary::well_known_klasses_do(&ext_reloc);
|
||||
}
|
||||
// NOTE: after this point, we shouldn't have any globals that can reach the old
|
||||
// objects.
|
||||
|
||||
// We cannot use any of the objects in the heap anymore (except for the
|
||||
// shared strings) because their headers no longer point to valid Klasses.
|
||||
}
|
||||
|
||||
static void iterate_roots(MetaspaceClosure* it) {
|
||||
// To ensure deterministic contents in the archive, we just need to ensure that
|
||||
// we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
|
||||
// the MetsapceObjs are located originally, as they are copied sequentially into
|
||||
// the archive during the iteration.
|
||||
//
|
||||
// The only issue here is that the symbol table and the system directories may be
|
||||
// randomly ordered, so we copy the symbols and klasses into two arrays and sort
|
||||
// them deterministically.
|
||||
//
|
||||
// During -Xshare:dump, the order of Symbol creation is strictly determined by
|
||||
// the SharedClassListFile (class loading is done in a single thread and the JIT
|
||||
// is disabled). Also, Symbols are allocated in monotonically increasing addresses
|
||||
// (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
|
||||
// ascending address order, we ensure that all Symbols are copied into deterministic
|
||||
// locations in the archive.
|
||||
GrowableArray<Symbol*>* symbols = _global_symbol_objects;
|
||||
for (int i = 0; i < symbols->length(); i++) {
|
||||
it->push(symbols->adr_at(i));
|
||||
}
|
||||
if (_global_klass_objects != NULL) {
|
||||
// Need to fix up the pointers
|
||||
for (int i = 0; i < _global_klass_objects->length(); i++) {
|
||||
// NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
|
||||
it->push(_global_klass_objects->adr_at(i));
|
||||
}
|
||||
}
|
||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||
FileMapInfo::metaspace_pointers_do(it, false);
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
Universe::metaspace_pointers_do(it);
|
||||
SymbolTable::metaspace_pointers_do(it);
|
||||
vmSymbols::metaspace_pointers_do(it);
|
||||
|
||||
it->finish();
|
||||
}
|
||||
|
||||
static Klass* get_relocated_klass(Klass* orig_klass) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
address* pp = _new_loc_table->lookup((address)orig_klass);
|
||||
assert(pp != NULL, "must be");
|
||||
Klass* klass = (Klass*)(*pp);
|
||||
assert(klass->is_klass(), "must be");
|
||||
return klass;
|
||||
}
|
||||
};
|
||||
|
||||
DumpAllocStats* ArchiveCompactor::_alloc_stats;
|
||||
ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_symbols() {
|
||||
log_info(cds)("Dumping symbol table ...");
|
||||
|
||||
@ -1523,7 +1056,7 @@ void VM_PopulateDumpSharedSpace::dump_symbols() {
|
||||
}
|
||||
|
||||
char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
ArchiveCompactor::OtherROAllocMark mark;
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
|
||||
log_info(cds)("Removing java_mirror ... ");
|
||||
if (!HeapShared::is_heap_object_archiving_allowed()) {
|
||||
@ -1547,27 +1080,6 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
return start;
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::print_class_stats() {
|
||||
log_info(cds)("Number of classes %d", _global_klass_objects->length());
|
||||
{
|
||||
int num_type_array = 0, num_obj_array = 0, num_inst = 0;
|
||||
for (int i = 0; i < _global_klass_objects->length(); i++) {
|
||||
Klass* k = _global_klass_objects->at(i);
|
||||
if (k->is_instance_klass()) {
|
||||
num_inst ++;
|
||||
} else if (k->is_objArray_klass()) {
|
||||
num_obj_array ++;
|
||||
} else {
|
||||
assert(k->is_typeArray_klass(), "sanity");
|
||||
num_type_array ++;
|
||||
}
|
||||
}
|
||||
log_info(cds)(" instance classes = %5d", num_inst);
|
||||
log_info(cds)(" obj array classes = %5d", num_obj_array);
|
||||
log_info(cds)(" type array classes = %5d", num_type_array);
|
||||
}
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) {
|
||||
intx addr_delta = MetaspaceShared::final_delta();
|
||||
if (addr_delta == 0) {
|
||||
@ -1618,7 +1130,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
// (1) Metaspace::allocate might trigger GC if we have run out of
|
||||
// committed metaspace, but we can't GC because we're running
|
||||
// in the VM thread.
|
||||
// (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
|
||||
// (2) ArchiveBuilder needs to work with a stable set of MetaspaceObjs.
|
||||
Metaspace::freeze();
|
||||
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
|
||||
|
||||
@ -1640,12 +1152,10 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
// Gather systemDictionary classes in a global array and do everything to
|
||||
// that so we don't have to walk the SystemDictionary again.
|
||||
SystemDictionaryShared::check_excluded_classes();
|
||||
_global_klass_objects = new GrowableArray<Klass*>(1000);
|
||||
CollectClassesClosure collect_classes;
|
||||
ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
|
||||
_global_klass_objects->sort(global_klass_compare);
|
||||
|
||||
print_class_stats();
|
||||
StaticArchiveBuilder builder(&_rw_region, &_ro_region);
|
||||
builder.gather_klasses_and_symbols();
|
||||
_global_klass_objects = builder.klasses();
|
||||
|
||||
// Ensure the ConstMethods won't be modified at run-time
|
||||
log_info(cds)("Updating ConstMethods ... ");
|
||||
@ -1657,12 +1167,17 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
remove_unshareable_in_classes();
|
||||
log_info(cds)("done. ");
|
||||
|
||||
builder.gather_source_objs();
|
||||
|
||||
MetaspaceShared::allocate_cloned_cpp_vtptrs();
|
||||
char* cloned_vtables = _mc_region.top();
|
||||
MetaspaceShared::allocate_cpp_vtable_clones();
|
||||
|
||||
ArchiveCompactor::initialize();
|
||||
ArchiveCompactor::copy_and_compact();
|
||||
_mc_region.pack(&_rw_region);
|
||||
builder.dump_rw_region();
|
||||
_rw_region.pack(&_ro_region);
|
||||
builder.dump_ro_region();
|
||||
builder.relocate_pointers();
|
||||
|
||||
dump_symbols();
|
||||
|
||||
@ -1671,7 +1186,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
_open_archive_heap_regions = NULL;
|
||||
dump_java_heap_objects();
|
||||
|
||||
ArchiveCompactor::relocate_well_known_klasses();
|
||||
builder.relocate_well_known_klasses();
|
||||
|
||||
char* serialized_data = dump_read_only_tables();
|
||||
_ro_region.pack();
|
||||
@ -1714,8 +1229,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
mapinfo->close();
|
||||
|
||||
if (log_is_enabled(Info, cds)) {
|
||||
ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
|
||||
int(_mc_region.used()));
|
||||
builder.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()));
|
||||
}
|
||||
|
||||
if (PrintSystemDictionaryAtExit) {
|
||||
@ -1801,13 +1315,13 @@ void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpReg
|
||||
// shared archive has been compacted.
|
||||
void MetaspaceShared::relocate_klass_ptr(oop o) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
|
||||
Klass* k = ArchiveBuilder::get_relocated_klass(o->klass());
|
||||
o->set_klass(k);
|
||||
}
|
||||
|
||||
Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
k = ArchiveCompactor::get_relocated_klass(k);
|
||||
k = ArchiveBuilder::get_relocated_klass(k);
|
||||
if (is_final) {
|
||||
k = (Klass*)(address(k) + final_delta());
|
||||
}
|
||||
@ -2014,7 +1528,7 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
|
||||
_open_archive_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
HeapShared::archive_java_heap_objects(_closed_archive_heap_regions,
|
||||
_open_archive_heap_regions);
|
||||
ArchiveCompactor::OtherROAllocMark mark;
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
HeapShared::write_subgraph_info_table();
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
@ -213,8 +212,6 @@ class MetaspaceShared : AllStatic {
|
||||
TRAPS) NOT_CDS_RETURN_(0);
|
||||
|
||||
static GrowableArray<Klass*>* collected_klasses();
|
||||
static GrowableArray<Symbol*>* collected_symbols();
|
||||
static void add_symbol(Symbol* sym) NOT_CDS_RETURN;
|
||||
|
||||
static ReservedSpace* shared_rs() {
|
||||
CDS_ONLY(return &_shared_rs);
|
||||
|
@ -33,18 +33,15 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeBehaviours.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/dependencies.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/gcArguments.hpp"
|
||||
#include "gc/shared/gcConfig.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/heapShared.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
@ -53,11 +50,8 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/constantPool.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
@ -65,25 +59,17 @@
|
||||
#include "prims/resolvedMethodTable.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/flags/jvmFlagConstraintList.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/autoRestore.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/preserveException.hpp"
|
||||
|
@ -1890,7 +1890,7 @@ void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PhaseChaitin::dump(const Node *n) const {
|
||||
void PhaseChaitin::dump(const Node* n) const {
|
||||
uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
|
||||
tty->print("L%d",r);
|
||||
if (r && n->Opcode() != Op_Phi) {
|
||||
@ -1968,7 +1968,7 @@ void PhaseChaitin::dump(const Node *n) const {
|
||||
tty->print("\n");
|
||||
}
|
||||
|
||||
void PhaseChaitin::dump(const Block *b) const {
|
||||
void PhaseChaitin::dump(const Block* b) const {
|
||||
b->dump_head(&_cfg);
|
||||
|
||||
// For all instructions
|
||||
@ -2064,7 +2064,7 @@ void PhaseChaitin::dump_simplified() const {
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
|
||||
static char *print_reg(OptoReg::Name reg, const PhaseChaitin* pc, char* buf) {
|
||||
if ((int)reg < 0)
|
||||
sprintf(buf, "<OptoReg::%d>", (int)reg);
|
||||
else if (OptoReg::is_reg(reg))
|
||||
@ -2077,7 +2077,7 @@ static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
|
||||
|
||||
// Dump a register name into a buffer. Be intelligent if we get called
|
||||
// before allocation is complete.
|
||||
char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
|
||||
char *PhaseChaitin::dump_register(const Node* n, char* buf) const {
|
||||
if( _node_regs ) {
|
||||
// Post allocation, use direct mappings, no LRG info available
|
||||
print_reg( get_reg_first(n), this, buf );
|
||||
@ -2226,7 +2226,7 @@ void PhaseChaitin::dump_frame() const {
|
||||
tty->print_cr("#");
|
||||
}
|
||||
|
||||
void PhaseChaitin::dump_bb( uint pre_order ) const {
|
||||
void PhaseChaitin::dump_bb(uint pre_order) const {
|
||||
tty->print_cr("---dump of B%d---",pre_order);
|
||||
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
|
||||
Block* block = _cfg.get_block(i);
|
||||
@ -2236,7 +2236,7 @@ void PhaseChaitin::dump_bb( uint pre_order ) const {
|
||||
}
|
||||
}
|
||||
|
||||
void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
|
||||
void PhaseChaitin::dump_lrg(uint lidx, bool defs_only) const {
|
||||
tty->print_cr("---dump of L%d---",lidx);
|
||||
|
||||
if (_ifg) {
|
||||
@ -2294,6 +2294,102 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
|
||||
}
|
||||
#endif // not PRODUCT
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify that base pointers and derived pointers are still sane.
|
||||
void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const {
|
||||
Unique_Node_List worklist(a);
|
||||
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
|
||||
Block* block = _cfg.get_block(i);
|
||||
for (uint j = block->end_idx() + 1; j > 1; j--) {
|
||||
Node* n = block->get_node(j-1);
|
||||
if (n->is_Phi()) {
|
||||
break;
|
||||
}
|
||||
// Found a safepoint?
|
||||
if (n->is_MachSafePoint()) {
|
||||
MachSafePointNode* sfpt = n->as_MachSafePoint();
|
||||
JVMState* jvms = sfpt->jvms();
|
||||
if (jvms != NULL) {
|
||||
// Now scan for a live derived pointer
|
||||
if (jvms->oopoff() < sfpt->req()) {
|
||||
// Check each derived/base pair
|
||||
for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
|
||||
Node* check = sfpt->in(idx);
|
||||
bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
|
||||
// search upwards through spills and spill phis for AddP
|
||||
worklist.clear();
|
||||
worklist.push(check);
|
||||
uint k = 0;
|
||||
while (k < worklist.size()) {
|
||||
check = worklist.at(k);
|
||||
assert(check, "Bad base or derived pointer");
|
||||
// See PhaseChaitin::find_base_for_derived() for all cases.
|
||||
int isc = check->is_Copy();
|
||||
if (isc) {
|
||||
worklist.push(check->in(isc));
|
||||
} else if (check->is_Phi()) {
|
||||
for (uint m = 1; m < check->req(); m++) {
|
||||
worklist.push(check->in(m));
|
||||
}
|
||||
} else if (check->is_Con()) {
|
||||
if (is_derived) {
|
||||
// Derived is NULL+offset
|
||||
assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
|
||||
} else {
|
||||
assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer");
|
||||
// Base either ConP(NULL) or loadConP
|
||||
if (check->is_Mach()) {
|
||||
assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
|
||||
} else {
|
||||
assert(check->Opcode() == Op_ConP &&
|
||||
check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad base pointer");
|
||||
}
|
||||
}
|
||||
} else if (check->bottom_type()->is_ptr()->_offset == 0) {
|
||||
if (check->is_Proj() || (check->is_Mach() &&
|
||||
(check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
|
||||
#ifdef _LP64
|
||||
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
|
||||
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
|
||||
(UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
|
||||
#endif // _LP64
|
||||
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
|
||||
// Valid nodes
|
||||
} else {
|
||||
check->dump();
|
||||
assert(false, "Bad base or derived pointer");
|
||||
}
|
||||
} else {
|
||||
assert(is_derived, "Bad base pointer");
|
||||
assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP, "Bad derived pointer");
|
||||
}
|
||||
k++;
|
||||
assert(k < 100000, "Derived pointer checking in infinite loop");
|
||||
} // End while
|
||||
}
|
||||
} // End of check for derived pointers
|
||||
} // End of Kcheck for debug info
|
||||
} // End of if found a safepoint
|
||||
} // End of forall instructions in block
|
||||
} // End of forall blocks
|
||||
}
|
||||
|
||||
// Verify that graphs and base pointers are still sane.
|
||||
void PhaseChaitin::verify(ResourceArea* a, bool verify_ifg) const {
|
||||
if (VerifyRegisterAllocator) {
|
||||
_cfg.verify();
|
||||
verify_base_ptrs(a);
|
||||
if (verify_ifg) {
|
||||
_ifg->verify(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
int PhaseChaitin::_final_loads = 0;
|
||||
int PhaseChaitin::_final_stores = 0;
|
||||
int PhaseChaitin::_final_memoves= 0;
|
||||
|
@ -751,34 +751,34 @@ private:
|
||||
static int _used_cisc_instructions, _unused_cisc_instructions;
|
||||
static int _allocator_attempts, _allocator_successes;
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify that base pointers and derived pointers are still sane
|
||||
void verify_base_ptrs(ResourceArea* a) const;
|
||||
void verify(ResourceArea* a, bool verify_ifg = false) const;
|
||||
#endif // ASSERT
|
||||
|
||||
#ifndef PRODUCT
|
||||
static uint _high_pressure, _low_pressure;
|
||||
|
||||
void dump() const;
|
||||
void dump( const Node *n ) const;
|
||||
void dump( const Block * b ) const;
|
||||
void dump(const Node* n) const;
|
||||
void dump(const Block* b) const;
|
||||
void dump_degree_lists() const;
|
||||
void dump_simplified() const;
|
||||
void dump_lrg( uint lidx, bool defs_only) const;
|
||||
void dump_lrg( uint lidx) const {
|
||||
void dump_lrg(uint lidx, bool defs_only) const;
|
||||
void dump_lrg(uint lidx) const {
|
||||
// dump defs and uses by default
|
||||
dump_lrg(lidx, false);
|
||||
}
|
||||
void dump_bb( uint pre_order ) const;
|
||||
|
||||
// Verify that base pointers and derived pointers are still sane
|
||||
void verify_base_ptrs( ResourceArea *a ) const;
|
||||
|
||||
void verify( ResourceArea *a, bool verify_ifg = false ) const;
|
||||
|
||||
void dump_bb(uint pre_order) const;
|
||||
void dump_for_spill_split_recycle() const;
|
||||
|
||||
public:
|
||||
void dump_frame() const;
|
||||
char *dump_register( const Node *n, char *buf ) const;
|
||||
char *dump_register(const Node* n, char* buf) const;
|
||||
private:
|
||||
static void print_chaitin_statistics();
|
||||
#endif
|
||||
#endif // not PRODUCT
|
||||
friend class PhaseCoalesce;
|
||||
friend class PhaseAggressiveCoalesce;
|
||||
friend class PhaseConservativeCoalesce;
|
||||
|
@ -301,100 +301,4 @@ void PhaseLive::dump(const Block *b) const {
|
||||
tty->print("\n");
|
||||
}
|
||||
|
||||
// Verify that base pointers and derived pointers are still sane.
|
||||
void PhaseChaitin::verify_base_ptrs(ResourceArea *a) const {
|
||||
#ifdef ASSERT
|
||||
Unique_Node_List worklist(a);
|
||||
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
|
||||
Block* block = _cfg.get_block(i);
|
||||
for (uint j = block->end_idx() + 1; j > 1; j--) {
|
||||
Node* n = block->get_node(j-1);
|
||||
if (n->is_Phi()) {
|
||||
break;
|
||||
}
|
||||
// Found a safepoint?
|
||||
if (n->is_MachSafePoint()) {
|
||||
MachSafePointNode *sfpt = n->as_MachSafePoint();
|
||||
JVMState* jvms = sfpt->jvms();
|
||||
if (jvms != NULL) {
|
||||
// Now scan for a live derived pointer
|
||||
if (jvms->oopoff() < sfpt->req()) {
|
||||
// Check each derived/base pair
|
||||
for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
|
||||
Node *check = sfpt->in(idx);
|
||||
bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
|
||||
// search upwards through spills and spill phis for AddP
|
||||
worklist.clear();
|
||||
worklist.push(check);
|
||||
uint k = 0;
|
||||
while (k < worklist.size()) {
|
||||
check = worklist.at(k);
|
||||
assert(check,"Bad base or derived pointer");
|
||||
// See PhaseChaitin::find_base_for_derived() for all cases.
|
||||
int isc = check->is_Copy();
|
||||
if (isc) {
|
||||
worklist.push(check->in(isc));
|
||||
} else if (check->is_Phi()) {
|
||||
for (uint m = 1; m < check->req(); m++)
|
||||
worklist.push(check->in(m));
|
||||
} else if (check->is_Con()) {
|
||||
if (is_derived) {
|
||||
// Derived is NULL+offset
|
||||
assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
|
||||
} else {
|
||||
assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
|
||||
// Base either ConP(NULL) or loadConP
|
||||
if (check->is_Mach()) {
|
||||
assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
|
||||
} else {
|
||||
assert(check->Opcode() == Op_ConP &&
|
||||
check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
|
||||
}
|
||||
}
|
||||
} else if (check->bottom_type()->is_ptr()->_offset == 0) {
|
||||
if (check->is_Proj() || (check->is_Mach() &&
|
||||
(check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
|
||||
#ifdef _LP64
|
||||
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP) ||
|
||||
(UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN) ||
|
||||
(UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass) ||
|
||||
#endif
|
||||
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
|
||||
check->as_Mach()->ideal_Opcode() == Op_LoadKlass))) {
|
||||
// Valid nodes
|
||||
} else {
|
||||
check->dump();
|
||||
assert(false,"Bad base or derived pointer");
|
||||
}
|
||||
} else {
|
||||
assert(is_derived,"Bad base pointer");
|
||||
assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
|
||||
}
|
||||
k++;
|
||||
assert(k < 100000,"Derived pointer checking in infinite loop");
|
||||
} // End while
|
||||
}
|
||||
} // End of check for derived pointers
|
||||
} // End of Kcheck for debug info
|
||||
} // End of if found a safepoint
|
||||
} // End of forall instructions in block
|
||||
} // End of forall blocks
|
||||
#endif
|
||||
}
|
||||
|
||||
// Verify that graphs and base pointers are still sane.
|
||||
void PhaseChaitin::verify(ResourceArea *a, bool verify_ifg) const {
|
||||
#ifdef ASSERT
|
||||
if (VerifyRegisterAllocator) {
|
||||
_cfg.verify();
|
||||
verify_base_ptrs(a);
|
||||
if(verify_ifg)
|
||||
_ifg->verify(this);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -530,32 +530,9 @@ bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
|
||||
// Find an arraycopy that must have set (can_see_stored_value=true) or
|
||||
// could have set (can_see_stored_value=false) the value for this load
|
||||
Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const {
|
||||
if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
|
||||
mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
|
||||
if (ld_alloc != NULL) {
|
||||
// Check if there is an array copy for a clone
|
||||
Node* mb = mem->in(0);
|
||||
ArrayCopyNode* ac = NULL;
|
||||
if (mb->in(0) != NULL && mb->in(0)->is_Proj() &&
|
||||
mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) {
|
||||
ac = mb->in(0)->in(0)->as_ArrayCopy();
|
||||
} else {
|
||||
// Step over GC barrier when ReduceInitialCardMarks is disabled
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
Node* control_proj_ac = bs->step_over_gc_barrier(mb->in(0));
|
||||
|
||||
if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
|
||||
ac = control_proj_ac->in(0)->as_ArrayCopy();
|
||||
}
|
||||
}
|
||||
|
||||
if (ac != NULL && ac->is_clonebasic()) {
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase);
|
||||
if (alloc != NULL && alloc == ld_alloc) {
|
||||
return ac;
|
||||
}
|
||||
}
|
||||
}
|
||||
ArrayCopyNode* ac = find_array_copy_clone(phase, ld_alloc, mem);
|
||||
if (ac != NULL) {
|
||||
return ac;
|
||||
} else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) {
|
||||
ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy();
|
||||
|
||||
@ -584,6 +561,37 @@ Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, N
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ArrayCopyNode* MemNode::find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const {
|
||||
if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
|
||||
mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
|
||||
if (ld_alloc != NULL) {
|
||||
// Check if there is an array copy for a clone
|
||||
Node* mb = mem->in(0);
|
||||
ArrayCopyNode* ac = NULL;
|
||||
if (mb->in(0) != NULL && mb->in(0)->is_Proj() &&
|
||||
mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) {
|
||||
ac = mb->in(0)->in(0)->as_ArrayCopy();
|
||||
} else {
|
||||
// Step over GC barrier when ReduceInitialCardMarks is disabled
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
Node* control_proj_ac = bs->step_over_gc_barrier(mb->in(0));
|
||||
|
||||
if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
|
||||
ac = control_proj_ac->in(0)->as_ArrayCopy();
|
||||
}
|
||||
}
|
||||
|
||||
if (ac != NULL && ac->is_clonebasic()) {
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase);
|
||||
if (alloc != NULL && alloc == ld_alloc) {
|
||||
return ac;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The logic for reordering loads and stores uses four steps:
|
||||
// (a) Walk carefully past stores and initializations which we
|
||||
// can prove are independent of this load.
|
||||
@ -1103,7 +1111,12 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
|
||||
// (This is one of the few places where a generic PhaseTransform
|
||||
// can create new nodes. Think of it as lazily manifesting
|
||||
// virtually pre-existing constants.)
|
||||
return phase->zerocon(memory_type());
|
||||
if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) {
|
||||
// If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
|
||||
// ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
|
||||
// by the ArrayCopyNode.
|
||||
return phase->zerocon(memory_type());
|
||||
}
|
||||
}
|
||||
|
||||
// A load from an initialization barrier can match a captured store.
|
||||
|
@ -93,6 +93,7 @@ protected:
|
||||
}
|
||||
|
||||
virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; }
|
||||
ArrayCopyNode* find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const;
|
||||
static bool check_if_adr_maybe_raw(Node* adr);
|
||||
|
||||
public:
|
||||
|
@ -528,7 +528,8 @@ void Parse::do_lookupswitch() {
|
||||
for (int j = 0; j < len; j++) {
|
||||
table[3*j+0] = iter().get_int_table(2+2*j);
|
||||
table[3*j+1] = iter().get_dest_table(2+2*j+1);
|
||||
table[3*j+2] = profile == NULL ? 1 : profile->count_at(j);
|
||||
// Handle overflow when converting from uint to jint
|
||||
table[3*j+2] = (profile == NULL) ? 1 : MIN2<uint>(max_jint, profile->count_at(j));
|
||||
}
|
||||
qsort(table, len, 3*sizeof(table[0]), jint_cmp);
|
||||
}
|
||||
|
@ -449,12 +449,6 @@ class Arguments : AllStatic {
|
||||
static ArgsRange check_memory_size(julong size, julong min_size, julong max_size);
|
||||
static ArgsRange parse_memory_size(const char* s, julong* long_arg,
|
||||
julong min_size, julong max_size = max_uintx);
|
||||
// Parse a string for a unsigned integer. Returns true if value
|
||||
// is an unsigned integer greater than or equal to the minimum
|
||||
// parameter passed and returns the value in uintx_arg. Returns
|
||||
// false otherwise, with uintx_arg undefined.
|
||||
static bool parse_uintx(const char* value, uintx* uintx_arg,
|
||||
uintx min_size);
|
||||
|
||||
// methods to build strings from individual args
|
||||
static void build_jvm_args(const char* arg);
|
||||
@ -498,6 +492,12 @@ class Arguments : AllStatic {
|
||||
public:
|
||||
// Parses the arguments, first phase
|
||||
static jint parse(const JavaVMInitArgs* args);
|
||||
// Parse a string for a unsigned integer. Returns true if value
|
||||
// is an unsigned integer greater than or equal to the minimum
|
||||
// parameter passed and returns the value in uintx_arg. Returns
|
||||
// false otherwise, with uintx_arg undefined.
|
||||
static bool parse_uintx(const char* value, uintx* uintx_arg,
|
||||
uintx min_size);
|
||||
// Apply ergonomics
|
||||
static jint apply_ergo();
|
||||
// Adjusts the arguments after the OS have adjusted the arguments
|
||||
|
@ -908,7 +908,6 @@ void BiasedLocking::preserve_marks() {
|
||||
|
||||
Thread* cur = Thread::current();
|
||||
ResourceMark rm(cur);
|
||||
HandleMark hm(cur);
|
||||
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
|
||||
if (thread->has_last_Java_frame()) {
|
||||
|
@ -151,7 +151,6 @@ Mutex* CDSClassFileStream_lock = NULL;
|
||||
#endif
|
||||
Mutex* DumpTimeTable_lock = NULL;
|
||||
Mutex* CDSLambda_lock = NULL;
|
||||
Mutex* CDSAddSymbol_lock = NULL;
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
@ -347,7 +346,6 @@ void mutex_init() {
|
||||
#endif
|
||||
def(DumpTimeTable_lock , PaddedMutex , leaf - 1, true, _safepoint_check_never);
|
||||
def(CDSLambda_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||
def(CDSAddSymbol_lock , PaddedMutex , leaf - 1, true, _safepoint_check_never);
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
|
@ -130,7 +130,6 @@ extern Mutex* CDSClassFileStream_lock; // FileMapInfo::open_stream_for
|
||||
#endif
|
||||
extern Mutex* DumpTimeTable_lock; // SystemDictionaryShared::find_or_allocate_info_for
|
||||
extern Mutex* CDSLambda_lock; // SystemDictionaryShared::get_shared_lambda_proxy_class
|
||||
extern Mutex* CDSAddSymbol_lock; // SystemDictionaryShared::add_symbol
|
||||
#endif // INCLUDE_CDS
|
||||
#if INCLUDE_JFR
|
||||
extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table
|
||||
|
@ -69,10 +69,17 @@ class OopHandleList : public CHeapObj<mtInternal> {
|
||||
static OopHandleList* _oop_handle_list = NULL;
|
||||
|
||||
static void release_oop_handles() {
|
||||
assert_lock_strong(Service_lock);
|
||||
while (_oop_handle_list != NULL) {
|
||||
OopHandleList* l = _oop_handle_list;
|
||||
_oop_handle_list = l->next();
|
||||
OopHandleList* list;
|
||||
{
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
list = _oop_handle_list;
|
||||
_oop_handle_list = NULL;
|
||||
}
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "cannot be called at a safepoint");
|
||||
|
||||
while (list != NULL) {
|
||||
OopHandleList* l = list;
|
||||
list = l->next();
|
||||
delete l;
|
||||
}
|
||||
}
|
||||
@ -137,6 +144,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
bool oopstorage_work = false;
|
||||
bool deflate_idle_monitors = false;
|
||||
JvmtiDeferredEvent jvmti_event;
|
||||
bool oop_handles_to_release = false;
|
||||
{
|
||||
// Need state transition ThreadBlockInVM so that this thread
|
||||
// will be handled by safepoint correctly when this thread is
|
||||
@ -163,7 +171,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
(thread_id_table_work = ThreadIdTable::has_work()) |
|
||||
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
|
||||
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()) |
|
||||
(_oop_handle_list != NULL) |
|
||||
(oop_handles_to_release = (_oop_handle_list != NULL)) |
|
||||
(deflate_idle_monitors = ObjectSynchronizer::is_async_deflation_needed())
|
||||
) == 0) {
|
||||
// Wait until notified that there is some work to do.
|
||||
@ -177,11 +185,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
jvmti_event = _jvmti_service_queue.dequeue();
|
||||
_jvmti_event = &jvmti_event;
|
||||
}
|
||||
|
||||
// Release thread OopHandles in lock
|
||||
if (_oop_handle_list != NULL) {
|
||||
release_oop_handles();
|
||||
}
|
||||
}
|
||||
|
||||
if (stringtable_work) {
|
||||
@ -230,6 +233,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
if (deflate_idle_monitors) {
|
||||
ObjectSynchronizer::deflate_idle_monitors_using_JT();
|
||||
}
|
||||
|
||||
if (oop_handles_to_release) {
|
||||
release_oop_handles();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1007,11 +1007,11 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
|
||||
if (obj->mark().has_bias_pattern()) {
|
||||
// Handle for oop obj in case of STW safepoint
|
||||
Handle hobj(self, obj);
|
||||
// Relaxing assertion for bug 6320749.
|
||||
assert(Universe::verify_in_progress() ||
|
||||
!SafepointSynchronize::is_at_safepoint(),
|
||||
"biases should not be seen by VM thread here");
|
||||
BiasedLocking::revoke(hobj, JavaThread::current());
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
BiasedLocking::revoke_at_safepoint(hobj);
|
||||
} else {
|
||||
BiasedLocking::revoke(hobj, self);
|
||||
}
|
||||
obj = hobj();
|
||||
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
|
||||
}
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "code/location.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/stackValue.hpp"
|
||||
#include "runtime/stackValueCollection.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
@ -248,11 +248,13 @@ jint dump_heap(AttachOperation* op, outputStream* out) {
|
||||
// Input arguments :-
|
||||
// arg0: "-live" or "-all"
|
||||
// arg1: Name of the dump file or NULL
|
||||
// arg2: parallel thread number
|
||||
static jint heap_inspection(AttachOperation* op, outputStream* out) {
|
||||
bool live_objects_only = true; // default is true to retain the behavior before this change is made
|
||||
outputStream* os = out; // if path not specified or path is NULL, use out
|
||||
fileStream* fs = NULL;
|
||||
const char* arg0 = op->arg(0);
|
||||
uint parallel_thread_num = MAX2<uint>(1, (uint)os::initial_active_processor_count() * 3 / 8);
|
||||
if (arg0 != NULL && (strlen(arg0) > 0)) {
|
||||
if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) {
|
||||
out->print_cr("Invalid argument to inspectheap operation: %s", arg0);
|
||||
@ -262,21 +264,26 @@ static jint heap_inspection(AttachOperation* op, outputStream* out) {
|
||||
}
|
||||
|
||||
const char* path = op->arg(1);
|
||||
if (path != NULL) {
|
||||
if (path[0] == '\0') {
|
||||
out->print_cr("No dump file specified");
|
||||
} else {
|
||||
// create file
|
||||
fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path);
|
||||
if (fs == NULL) {
|
||||
out->print_cr("Failed to allocate space for file: %s", path);
|
||||
return JNI_ERR;
|
||||
}
|
||||
os = fs;
|
||||
if (path != NULL && path[0] != '\0') {
|
||||
// create file
|
||||
fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path);
|
||||
if (fs == NULL) {
|
||||
out->print_cr("Failed to allocate space for file: %s", path);
|
||||
}
|
||||
os = fs;
|
||||
}
|
||||
|
||||
VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */);
|
||||
const char* num_str = op->arg(2);
|
||||
if (num_str != NULL && num_str[0] != '\0') {
|
||||
uintx num;
|
||||
if (!Arguments::parse_uintx(num_str, &num, 0)) {
|
||||
out->print_cr("Invalid parallel thread number: [%s]", num_str);
|
||||
return JNI_ERR;
|
||||
}
|
||||
parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num;
|
||||
}
|
||||
|
||||
VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num);
|
||||
VMThread::execute(&heapop);
|
||||
if (os != NULL && os != out) {
|
||||
out->print_cr("Heap inspection file created: %s", path);
|
||||
|
@ -46,6 +46,10 @@
|
||||
#define ATTRIBUTE_ALIGNED(x)
|
||||
#endif
|
||||
|
||||
#ifndef ATTRIBUTE_FLATTEN
|
||||
#define ATTRIBUTE_FLATTEN
|
||||
#endif
|
||||
|
||||
// These are #defines to selectively turn on/off the Print(Opto)Assembly
|
||||
// capabilities. Choices should be led by a tradeoff between
|
||||
// code size and improved supportability.
|
||||
|
@ -159,6 +159,7 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
|
||||
// Inlining support
|
||||
#define NOINLINE __attribute__ ((noinline))
|
||||
#define ALWAYSINLINE inline __attribute__ ((always_inline))
|
||||
#define ATTRIBUTE_FLATTEN __attribute__ ((flatten))
|
||||
|
||||
// Alignment
|
||||
//
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -301,13 +301,14 @@ protected:
|
||||
public:
|
||||
KVHashtable(int table_size) : BasicHashtable<F>(table_size, sizeof(KVHashtableEntry)) {}
|
||||
|
||||
void add(K key, V value) {
|
||||
V* add(K key, V value) {
|
||||
unsigned int hash = HASH(key);
|
||||
KVHashtableEntry* entry = new_entry(hash, key, value);
|
||||
BasicHashtable<F>::add_entry(BasicHashtable<F>::hash_to_index(hash), entry);
|
||||
return &(entry->_value);
|
||||
}
|
||||
|
||||
V* lookup(K key) {
|
||||
V* lookup(K key) const {
|
||||
unsigned int hash = HASH(key);
|
||||
int index = BasicHashtable<F>::hash_to_index(hash);
|
||||
for (KVHashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
@ -317,6 +318,23 @@ public:
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int table_size() const {
|
||||
return BasicHashtable<F>::table_size();
|
||||
}
|
||||
|
||||
// ITER contains bool do_entry(K, V const&), which will be
|
||||
// called for each entry in the table. If do_entry() returns false,
|
||||
// the iteration is cancelled.
|
||||
template<class ITER>
|
||||
void iterate(ITER* iter) const {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (KVHashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
bool cont = iter->do_entry(e->_key, &e->_value);
|
||||
if (!cont) { return; }
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -283,14 +283,6 @@ public class BigDecimal extends Number implements Comparable<BigDecimal> {
|
||||
@java.io.Serial
|
||||
private static final long serialVersionUID = 6108874887143696463L;
|
||||
|
||||
private static final ThreadLocal<StringBuilderHelper>
|
||||
threadLocalStringBuilderHelper = new ThreadLocal<StringBuilderHelper>() {
|
||||
@Override
|
||||
protected StringBuilderHelper initialValue() {
|
||||
return new StringBuilderHelper();
|
||||
}
|
||||
};
|
||||
|
||||
// Cache of common small BigDecimal values.
|
||||
private static final BigDecimal ZERO_THROUGH_TEN[] = {
|
||||
new BigDecimal(BigInteger.ZERO, 0, 0, 1),
|
||||
@ -3798,19 +3790,17 @@ public class BigDecimal extends Number implements Comparable<BigDecimal> {
|
||||
return BigDecimal.valueOf(1, this.scale(), 1);
|
||||
}
|
||||
|
||||
// Private class to build a string representation for BigDecimal object.
|
||||
// "StringBuilderHelper" is constructed as a thread local variable so it is
|
||||
// thread safe. The StringBuilder field acts as a buffer to hold the temporary
|
||||
// representation of BigDecimal. The cmpCharArray holds all the characters for
|
||||
// the compact representation of BigDecimal (except for '-' sign' if it is
|
||||
// negative) if its intCompact field is not INFLATED. It is shared by all
|
||||
// calls to toString() and its variants in that particular thread.
|
||||
// Private class to build a string representation for BigDecimal object. The
|
||||
// StringBuilder field acts as a buffer to hold the temporary representation
|
||||
// of BigDecimal. The cmpCharArray holds all the characters for the compact
|
||||
// representation of BigDecimal (except for '-' sign' if it is negative) if
|
||||
// its intCompact field is not INFLATED.
|
||||
static class StringBuilderHelper {
|
||||
final StringBuilder sb; // Placeholder for BigDecimal string
|
||||
final char[] cmpCharArray; // character array to place the intCompact
|
||||
|
||||
StringBuilderHelper() {
|
||||
sb = new StringBuilder();
|
||||
sb = new StringBuilder(32);
|
||||
// All non negative longs can be made to fit into 19 character array.
|
||||
cmpCharArray = new char[19];
|
||||
}
|
||||
@ -3921,7 +3911,7 @@ public class BigDecimal extends Number implements Comparable<BigDecimal> {
|
||||
StringBuilderHelper.DIGIT_ONES[lowInt]) ;
|
||||
}
|
||||
|
||||
StringBuilderHelper sbHelper = threadLocalStringBuilderHelper.get();
|
||||
StringBuilderHelper sbHelper = new StringBuilderHelper();
|
||||
char[] coeff;
|
||||
int offset; // offset is the starting index for coeff array
|
||||
// Get the significand as an absolute value
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,67 +33,204 @@ package java.util.zip;
|
||||
* @since 1.1
|
||||
*/
|
||||
interface ZipConstants {
|
||||
/*
|
||||
* Header signatures
|
||||
|
||||
/**
|
||||
* Local file (LOC) header signature.
|
||||
*/
|
||||
static long LOCSIG = 0x04034b50L; // "PK\003\004"
|
||||
|
||||
/**
|
||||
* Extra local (EXT) header signature.
|
||||
*/
|
||||
static long EXTSIG = 0x08074b50L; // "PK\007\008"
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header signature.
|
||||
*/
|
||||
static long CENSIG = 0x02014b50L; // "PK\001\002"
|
||||
|
||||
/**
|
||||
* End of central directory (END) header signature.
|
||||
*/
|
||||
static long ENDSIG = 0x06054b50L; // "PK\005\006"
|
||||
|
||||
/*
|
||||
* Header sizes in bytes (including signatures)
|
||||
/**
|
||||
* Local file (LOC) header size in bytes (including signature).
|
||||
*/
|
||||
static final int LOCHDR = 30; // LOC header size
|
||||
static final int EXTHDR = 16; // EXT header size
|
||||
static final int CENHDR = 46; // CEN header size
|
||||
static final int ENDHDR = 22; // END header size
|
||||
static final int LOCHDR = 30;
|
||||
|
||||
/*
|
||||
* Local file (LOC) header field offsets
|
||||
/**
|
||||
* Extra local (EXT) header size in bytes (including signature).
|
||||
*/
|
||||
static final int LOCVER = 4; // version needed to extract
|
||||
static final int LOCFLG = 6; // general purpose bit flag
|
||||
static final int LOCHOW = 8; // compression method
|
||||
static final int LOCTIM = 10; // modification time
|
||||
static final int LOCCRC = 14; // uncompressed file crc-32 value
|
||||
static final int LOCSIZ = 18; // compressed size
|
||||
static final int LOCLEN = 22; // uncompressed size
|
||||
static final int LOCNAM = 26; // filename length
|
||||
static final int LOCEXT = 28; // extra field length
|
||||
static final int EXTHDR = 16;
|
||||
|
||||
/*
|
||||
* Extra local (EXT) header field offsets
|
||||
/**
|
||||
* Central directory (CEN) header size in bytes (including signature).
|
||||
*/
|
||||
static final int EXTCRC = 4; // uncompressed file crc-32 value
|
||||
static final int EXTSIZ = 8; // compressed size
|
||||
static final int EXTLEN = 12; // uncompressed size
|
||||
static final int CENHDR = 46;
|
||||
|
||||
/*
|
||||
* Central directory (CEN) header field offsets
|
||||
/**
|
||||
* End of central directory (END) header size in bytes (including signature).
|
||||
*/
|
||||
static final int CENVEM = 4; // version made by
|
||||
static final int CENVER = 6; // version needed to extract
|
||||
static final int CENFLG = 8; // encrypt, decrypt flags
|
||||
static final int CENHOW = 10; // compression method
|
||||
static final int CENTIM = 12; // modification time
|
||||
static final int CENCRC = 16; // uncompressed file crc-32 value
|
||||
static final int CENSIZ = 20; // compressed size
|
||||
static final int CENLEN = 24; // uncompressed size
|
||||
static final int CENNAM = 28; // filename length
|
||||
static final int CENEXT = 30; // extra field length
|
||||
static final int CENCOM = 32; // comment length
|
||||
static final int CENDSK = 34; // disk number start
|
||||
static final int CENATT = 36; // internal file attributes
|
||||
static final int CENATX = 38; // external file attributes
|
||||
static final int CENOFF = 42; // LOC header offset
|
||||
static final int ENDHDR = 22;
|
||||
|
||||
/*
|
||||
* End of central directory (END) header field offsets
|
||||
/**
|
||||
* Local file (LOC) header version needed to extract field offset.
|
||||
*/
|
||||
static final int ENDSUB = 8; // number of entries on this disk
|
||||
static final int ENDTOT = 10; // total number of entries
|
||||
static final int ENDSIZ = 12; // central directory size in bytes
|
||||
static final int ENDOFF = 16; // offset of first CEN header
|
||||
static final int ENDCOM = 20; // zip file comment length
|
||||
static final int LOCVER = 4;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header general purpose bit flag field offset.
|
||||
*/
|
||||
static final int LOCFLG = 6;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header compression method field offset.
|
||||
*/
|
||||
static final int LOCHOW = 8;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header modification time field offset.
|
||||
*/
|
||||
static final int LOCTIM = 10;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header uncompressed file crc-32 value field offset.
|
||||
*/
|
||||
static final int LOCCRC = 14;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header compressed size field offset.
|
||||
*/
|
||||
static final int LOCSIZ = 18;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header uncompressed size field offset.
|
||||
*/
|
||||
static final int LOCLEN = 22;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header filename length field offset.
|
||||
*/
|
||||
static final int LOCNAM = 26;
|
||||
|
||||
/**
|
||||
* Local file (LOC) header extra field length field offset.
|
||||
*/
|
||||
static final int LOCEXT = 28;
|
||||
|
||||
/**
|
||||
* Extra local (EXT) header uncompressed file crc-32 value field offset.
|
||||
*/
|
||||
static final int EXTCRC = 4;
|
||||
|
||||
/**
|
||||
* Extra local (EXT) header compressed size field offset.
|
||||
*/
|
||||
static final int EXTSIZ = 8;
|
||||
|
||||
/**
|
||||
* Extra local (EXT) header uncompressed size field offset.
|
||||
*/
|
||||
static final int EXTLEN = 12;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header version made by field offset.
|
||||
*/
|
||||
static final int CENVEM = 4;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header version needed to extract field offset.
|
||||
*/
|
||||
static final int CENVER = 6;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header encrypt, decrypt flags field offset.
|
||||
*/
|
||||
static final int CENFLG = 8;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header compression method field offset.
|
||||
*/
|
||||
static final int CENHOW = 10;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header modification time field offset.
|
||||
*/
|
||||
static final int CENTIM = 12;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header uncompressed file crc-32 value field offset.
|
||||
*/
|
||||
static final int CENCRC = 16;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header compressed size field offset.
|
||||
*/
|
||||
static final int CENSIZ = 20;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header uncompressed size field offset.
|
||||
*/
|
||||
static final int CENLEN = 24;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header filename length field offset.
|
||||
*/
|
||||
static final int CENNAM = 28;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header extra field length field offset.
|
||||
*/
|
||||
static final int CENEXT = 30;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header comment length field offset.
|
||||
*/
|
||||
static final int CENCOM = 32;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header disk number start field offset.
|
||||
*/
|
||||
static final int CENDSK = 34;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header internal file attributes field offset.
|
||||
*/
|
||||
static final int CENATT = 36;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header external file attributes field offset.
|
||||
*/
|
||||
static final int CENATX = 38;
|
||||
|
||||
/**
|
||||
* Central directory (CEN) header LOC header offset field offset.
|
||||
*/
|
||||
static final int CENOFF = 42;
|
||||
|
||||
/**
|
||||
* End of central directory (END) header number of entries on this disk field offset.
|
||||
*/
|
||||
static final int ENDSUB = 8;
|
||||
|
||||
/**
|
||||
* End of central directory (END) header total number of entries field offset.
|
||||
*/
|
||||
static final int ENDTOT = 10;
|
||||
|
||||
/**
|
||||
* End of central directory (END) header central directory size in bytes field offset.
|
||||
*/
|
||||
static final int ENDSIZ = 12;
|
||||
|
||||
/**
|
||||
* End of central directory (END) header offset for the first CEN header field offset.
|
||||
*/
|
||||
static final int ENDOFF = 16;
|
||||
|
||||
/**
|
||||
* End of central directory (END) header zip file comment length field offset.
|
||||
*/
|
||||
static final int ENDCOM = 20;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -324,8 +324,7 @@ class SocketChannelImpl
|
||||
/**
|
||||
* Marks the beginning of a read operation that might block.
|
||||
*
|
||||
* @throws ClosedChannelException if the channel is closed
|
||||
* @throws NotYetConnectedException if the channel is not yet connected
|
||||
* @throws ClosedChannelException if blocking and the channel is closed
|
||||
*/
|
||||
private void beginRead(boolean blocking) throws ClosedChannelException {
|
||||
if (blocking) {
|
||||
@ -333,12 +332,10 @@ class SocketChannelImpl
|
||||
begin();
|
||||
|
||||
synchronized (stateLock) {
|
||||
ensureOpenAndConnected();
|
||||
ensureOpen();
|
||||
// record thread so it can be signalled if needed
|
||||
readerThread = NativeThread.current();
|
||||
}
|
||||
} else {
|
||||
ensureOpenAndConnected();
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,6 +370,7 @@ class SocketChannelImpl
|
||||
|
||||
readLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
boolean blocking = isBlocking();
|
||||
int n = 0;
|
||||
try {
|
||||
@ -415,6 +413,7 @@ class SocketChannelImpl
|
||||
|
||||
readLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
boolean blocking = isBlocking();
|
||||
long n = 0;
|
||||
try {
|
||||
@ -452,8 +451,7 @@ class SocketChannelImpl
|
||||
/**
|
||||
* Marks the beginning of a write operation that might block.
|
||||
*
|
||||
* @throws ClosedChannelException if the channel is closed or output shutdown
|
||||
* @throws NotYetConnectedException if the channel is not yet connected
|
||||
* @throws ClosedChannelException if blocking and the channel is closed
|
||||
*/
|
||||
private void beginWrite(boolean blocking) throws ClosedChannelException {
|
||||
if (blocking) {
|
||||
@ -461,14 +459,12 @@ class SocketChannelImpl
|
||||
begin();
|
||||
|
||||
synchronized (stateLock) {
|
||||
ensureOpenAndConnected();
|
||||
ensureOpen();
|
||||
if (isOutputClosed)
|
||||
throw new ClosedChannelException();
|
||||
// record thread so it can be signalled if needed
|
||||
writerThread = NativeThread.current();
|
||||
}
|
||||
} else {
|
||||
ensureOpenAndConnected();
|
||||
}
|
||||
}
|
||||
|
||||
@ -496,9 +492,9 @@ class SocketChannelImpl
|
||||
@Override
|
||||
public int write(ByteBuffer buf) throws IOException {
|
||||
Objects.requireNonNull(buf);
|
||||
|
||||
writeLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
boolean blocking = isBlocking();
|
||||
int n = 0;
|
||||
try {
|
||||
@ -529,6 +525,7 @@ class SocketChannelImpl
|
||||
|
||||
writeLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
boolean blocking = isBlocking();
|
||||
long n = 0;
|
||||
try {
|
||||
@ -557,6 +554,7 @@ class SocketChannelImpl
|
||||
int sendOutOfBandData(byte b) throws IOException {
|
||||
writeLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
boolean blocking = isBlocking();
|
||||
int n = 0;
|
||||
try {
|
||||
@ -1177,6 +1175,8 @@ class SocketChannelImpl
|
||||
|
||||
readLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
|
||||
// check that channel is configured blocking
|
||||
if (!isBlocking())
|
||||
throw new IllegalBlockingModeException();
|
||||
@ -1254,6 +1254,8 @@ class SocketChannelImpl
|
||||
|
||||
writeLock.lock();
|
||||
try {
|
||||
ensureOpenAndConnected();
|
||||
|
||||
// check that channel is configured blocking
|
||||
if (!isBlocking())
|
||||
throw new IllegalBlockingModeException();
|
||||
@ -1261,8 +1263,8 @@ class SocketChannelImpl
|
||||
// loop until all bytes have been written
|
||||
int pos = off;
|
||||
int end = off + len;
|
||||
beginWrite(true);
|
||||
try {
|
||||
beginWrite(true);
|
||||
while (pos < end && isOpen()) {
|
||||
int size = end - pos;
|
||||
int n = tryWrite(b, pos, size);
|
||||
|
@ -30,26 +30,42 @@ import java.io.InputStream;
|
||||
import java.io.Writer;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.PathMatcher;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
|
||||
import static jdk.incubator.jpackage.internal.OverridableResource.createResource;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.APP_NAME;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.CONFIG_ROOT;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.DESCRIPTION;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.LICENSE_FILE;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.RESOURCE_DIR;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.TEMP_ROOT;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.VENDOR;
|
||||
import static jdk.incubator.jpackage.internal.StandardBundlerParam.VERSION;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/**
|
||||
* WinMsiBundler
|
||||
@ -416,7 +432,7 @@ public class WinMsiBundler extends AbstractBundler {
|
||||
}
|
||||
}
|
||||
|
||||
// Copy l10n files.
|
||||
// Copy standard l10n files.
|
||||
for (String loc : Arrays.asList("en", "ja", "zh_CN")) {
|
||||
String fname = "MsiInstallerStrings_" + loc + ".wxl";
|
||||
try (InputStream is = OverridableResource.readDefault(fname)) {
|
||||
@ -470,9 +486,23 @@ public class WinMsiBundler extends AbstractBundler {
|
||||
wixPipeline.addLightOptions("-ext", "WixUIExtension");
|
||||
}
|
||||
|
||||
wixPipeline.addLightOptions("-loc",
|
||||
CONFIG_ROOT.fetchFrom(params).resolve(I18N.getString(
|
||||
"resource.wxl-file-name")).toAbsolutePath().toString());
|
||||
final Path primaryWxlFile = CONFIG_ROOT.fetchFrom(params).resolve(
|
||||
I18N.getString("resource.wxl-file-name")).toAbsolutePath();
|
||||
|
||||
wixPipeline.addLightOptions("-loc", primaryWxlFile.toString());
|
||||
|
||||
List<String> cultures = new ArrayList<>();
|
||||
for (var wxl : getCustomWxlFiles(params)) {
|
||||
wixPipeline.addLightOptions("-loc", wxl.toAbsolutePath().toString());
|
||||
cultures.add(getCultureFromWxlFile(wxl));
|
||||
}
|
||||
cultures.add(getCultureFromWxlFile(primaryWxlFile));
|
||||
|
||||
// Build ordered list of unique cultures.
|
||||
Set<String> uniqueCultures = new LinkedHashSet<>();
|
||||
uniqueCultures.addAll(cultures);
|
||||
wixPipeline.addLightOptions(uniqueCultures.stream().collect(
|
||||
Collectors.joining(";", "-cultures:", "")));
|
||||
|
||||
// Only needed if we using CA dll, so Wix can find it
|
||||
if (enableInstalldirUI) {
|
||||
@ -485,6 +515,52 @@ public class WinMsiBundler extends AbstractBundler {
|
||||
return msiOut;
|
||||
}
|
||||
|
||||
private static List<Path> getCustomWxlFiles(Map<String, ? super Object> params)
|
||||
throws IOException {
|
||||
Path resourceDir = RESOURCE_DIR.fetchFrom(params);
|
||||
if (resourceDir == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
final String glob = "glob:**/*.wxl";
|
||||
final PathMatcher pathMatcher = FileSystems.getDefault().getPathMatcher(
|
||||
glob);
|
||||
|
||||
try (var walk = Files.walk(resourceDir, 1)) {
|
||||
return walk
|
||||
.filter(Files::isReadable)
|
||||
.filter(pathMatcher::matches)
|
||||
.sorted((a, b) -> a.getFileName().toString().compareToIgnoreCase(b.getFileName().toString()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
private static String getCultureFromWxlFile(Path wxlPath) throws IOException {
|
||||
try {
|
||||
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
|
||||
factory.setNamespaceAware(false);
|
||||
DocumentBuilder builder = factory.newDocumentBuilder();
|
||||
|
||||
Document doc = builder.parse(wxlPath.toFile());
|
||||
|
||||
XPath xPath = XPathFactory.newInstance().newXPath();
|
||||
NodeList nodes = (NodeList) xPath.evaluate(
|
||||
"//WixLocalization/@Culture", doc,
|
||||
XPathConstants.NODESET);
|
||||
if (nodes.getLength() != 1) {
|
||||
throw new IOException(MessageFormat.format(I18N.getString(
|
||||
"error.extract-culture-from-wix-l10n-file"),
|
||||
wxlPath.toAbsolutePath()));
|
||||
}
|
||||
|
||||
return nodes.item(0).getNodeValue();
|
||||
} catch (XPathExpressionException | ParserConfigurationException
|
||||
| SAXException ex) {
|
||||
throw new IOException(MessageFormat.format(I18N.getString(
|
||||
"error.read-wix-l10n-file"), wxlPath.toAbsolutePath()), ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static void ensureByMutationFileIsRTF(Path f) {
|
||||
if (f == null || !Files.isRegularFile(f)) return;
|
||||
|
||||
|
@ -48,6 +48,8 @@ error.msi-product-version-minor-out-of-range=Minor version must be in the range
|
||||
error.version-swap=Failed to update version information for {0}
|
||||
error.invalid-envvar=Invalid value of {0} environment variable
|
||||
error.lock-resource=Failed to lock: {0}
|
||||
error.read-wix-l10n-file=Failed to parse {0} file
|
||||
error.extract-culture-from-wix-l10n-file=Failed to read value of culture from {0} file
|
||||
|
||||
message.icon-not-ico=The specified icon "{0}" is not an ICO file and will not be used. The default icon will be used in it's place.
|
||||
message.potential.windows.defender.issue=Warning: Windows Defender may prevent jpackage from functioning. If there is an issue, it can be addressed by either disabling realtime monitoring, or adding an exclusion for the directory "{0}".
|
||||
|
@ -48,6 +48,8 @@ error.msi-product-version-minor-out-of-range=\u30DE\u30A4\u30CA\u30FC\u30FB\u30D
|
||||
error.version-swap={0}\u306E\u30D0\u30FC\u30B8\u30E7\u30F3\u60C5\u5831\u306E\u66F4\u65B0\u306B\u5931\u6557\u3057\u307E\u3057\u305F
|
||||
error.invalid-envvar={0}\u74B0\u5883\u5909\u6570\u306E\u5024\u304C\u7121\u52B9\u3067\u3059
|
||||
error.lock-resource=\u30ED\u30C3\u30AF\u306B\u5931\u6557\u3057\u307E\u3057\u305F: {0}
|
||||
error.read-wix-l10n-file=Failed to parse {0} file
|
||||
error.extract-culture-from-wix-l10n-file=Failed to read value of culture from {0} file
|
||||
|
||||
message.icon-not-ico=\u6307\u5B9A\u3057\u305F\u30A2\u30A4\u30B3\u30F3"{0}"\u306FICO\u30D5\u30A1\u30A4\u30EB\u3067\u306F\u306A\u304F\u3001\u4F7F\u7528\u3055\u308C\u307E\u305B\u3093\u3002\u30C7\u30D5\u30A9\u30EB\u30C8\u30FB\u30A2\u30A4\u30B3\u30F3\u304C\u305D\u306E\u4F4D\u7F6E\u306B\u4F7F\u7528\u3055\u308C\u307E\u3059\u3002
|
||||
message.potential.windows.defender.issue=\u8B66\u544A: Windows Defender\u304C\u539F\u56E0\u3067jpackage\u304C\u6A5F\u80FD\u3057\u306A\u3044\u3053\u3068\u304C\u3042\u308A\u307E\u3059\u3002\u554F\u984C\u304C\u767A\u751F\u3057\u305F\u5834\u5408\u306F\u3001\u30EA\u30A2\u30EB\u30BF\u30A4\u30E0\u30FB\u30E2\u30CB\u30BF\u30EA\u30F3\u30B0\u3092\u7121\u52B9\u306B\u3059\u308B\u304B\u3001\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA"{0}"\u306E\u9664\u5916\u3092\u8FFD\u52A0\u3059\u308B\u3053\u3068\u306B\u3088\u308A\u3001\u554F\u984C\u306B\u5BFE\u51E6\u3067\u304D\u307E\u3059\u3002
|
||||
|
@ -48,6 +48,8 @@ error.msi-product-version-minor-out-of-range=\u6B21\u7248\u672C\u5FC5\u987B\u4F4
|
||||
error.version-swap=\u65E0\u6CD5\u66F4\u65B0 {0} \u7684\u7248\u672C\u4FE1\u606F
|
||||
error.invalid-envvar={0} \u73AF\u5883\u53D8\u91CF\u7684\u503C\u65E0\u6548
|
||||
error.lock-resource=\u65E0\u6CD5\u9501\u5B9A\uFF1A{0}
|
||||
error.read-wix-l10n-file=Failed to parse {0} file
|
||||
error.extract-culture-from-wix-l10n-file=Failed to read value of culture from {0} file
|
||||
|
||||
message.icon-not-ico=\u6307\u5B9A\u7684\u56FE\u6807 "{0}" \u4E0D\u662F ICO \u6587\u4EF6, \u4E0D\u4F1A\u4F7F\u7528\u3002\u5C06\u4F7F\u7528\u9ED8\u8BA4\u56FE\u6807\u4EE3\u66FF\u3002
|
||||
message.potential.windows.defender.issue=\u8B66\u544A\uFF1AWindows Defender \u53EF\u80FD\u4F1A\u963B\u6B62 jpackage \u6B63\u5E38\u5DE5\u4F5C\u3002\u5982\u679C\u5B58\u5728\u95EE\u9898\uFF0C\u53EF\u4EE5\u901A\u8FC7\u7981\u7528\u5B9E\u65F6\u76D1\u89C6\u6216\u8005\u4E3A\u76EE\u5F55 "{0}" \u6DFB\u52A0\u6392\u9664\u9879\u6765\u89E3\u51B3\u3002
|
||||
|
@ -169,6 +169,7 @@ public class JMap {
|
||||
UnsupportedEncodingException {
|
||||
String liveopt = "-all";
|
||||
String filename = null;
|
||||
String parallel = null;
|
||||
String subopts[] = options.split(",");
|
||||
|
||||
for (int i = 0; i < subopts.length; i++) {
|
||||
@ -180,9 +181,17 @@ public class JMap {
|
||||
} else if (subopt.startsWith("file=")) {
|
||||
filename = parseFileName(subopt);
|
||||
if (filename == null) {
|
||||
usage(1); // invalid options or no filename
|
||||
System.err.println("Fail: invalid option or no file name '" + subopt +"'");
|
||||
usage(1);
|
||||
}
|
||||
} else if (subopt.startsWith("parallel=")) {
|
||||
parallel = subopt.substring("parallel=".length());
|
||||
if (parallel == null) {
|
||||
System.err.println("Fail: no number provided in option: '" + subopt + "'");
|
||||
usage(1);
|
||||
}
|
||||
} else {
|
||||
System.err.println("Fail: invalid option: '" + subopt + "'");
|
||||
usage(1);
|
||||
}
|
||||
}
|
||||
@ -190,7 +199,7 @@ public class JMap {
|
||||
System.out.flush();
|
||||
|
||||
// inspectHeap is not the same as jcmd GC.class_histogram
|
||||
executeCommandForPid(pid, "inspectheap", liveopt, filename);
|
||||
executeCommandForPid(pid, "inspectheap", liveopt, filename, parallel);
|
||||
}
|
||||
|
||||
private static void dump(String pid, String options)
|
||||
@ -207,11 +216,17 @@ public class JMap {
|
||||
liveopt = "-live";
|
||||
} else if (subopt.startsWith("file=")) {
|
||||
filename = parseFileName(subopt);
|
||||
} else if (subopt.equals("format=b")) {
|
||||
// ignore format (not needed at this time)
|
||||
} else {
|
||||
System.err.println("Fail: invalid option: '" + subopt + "'");
|
||||
usage(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (filename == null) {
|
||||
usage(1); // invalid options or no filename
|
||||
System.err.println("Fail: invalid option or no file name");
|
||||
usage(1);
|
||||
}
|
||||
|
||||
// dumpHeap is not the same as jcmd GC.heap_dump
|
||||
@ -287,6 +302,10 @@ public class JMap {
|
||||
System.err.println(" live count only live objects");
|
||||
System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)");
|
||||
System.err.println(" file=<file> dump data to <file>");
|
||||
System.err.println(" parallel=<number> parallel threads number for heap iteration:");
|
||||
System.err.println(" parallel=0 default behavior, use predefined number of threads");
|
||||
System.err.println(" parallel=1 disable parallel heap iteration");
|
||||
System.err.println(" parallel=<N> use N threads for parallel heap iteration");
|
||||
System.err.println("");
|
||||
System.err.println(" Example: jmap -histo:live,file=/tmp/histo.data <pid>");
|
||||
System.exit(exit);
|
||||
|
@ -326,6 +326,7 @@ hotspot_appcds_dynamic = \
|
||||
-runtime/cds/appcds/javaldr/ArrayTest.java \
|
||||
-runtime/cds/appcds/javaldr/GCSharedStringsDuringDump.java \
|
||||
-runtime/cds/appcds/javaldr/HumongousDuringDump.java \
|
||||
-runtime/cds/appcds/javaldr/LockDuringDump.java \
|
||||
-runtime/cds/appcds/sharedStrings \
|
||||
-runtime/cds/appcds/ArchiveRelocationTest.java \
|
||||
-runtime/cds/appcds/DumpClassList.java \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
* @requires vm.aot
|
||||
* @library / /test/lib /testlibrary
|
||||
* @compile IllegalClass.jasm
|
||||
* @run driver compiler.aot.cli.jaotc.IgnoreErrorsTest
|
||||
* @run driver/timeout=360 compiler.aot.cli.jaotc.IgnoreErrorsTest
|
||||
*/
|
||||
|
||||
package compiler.aot.cli.jaotc;
|
||||
|
101
test/hotspot/jtreg/compiler/arraycopy/TestCloneAccess.java
Normal file
101
test/hotspot/jtreg/compiler/arraycopy/TestCloneAccess.java
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8248791
|
||||
* @summary Test cloning with more than 8 (=ArrayCopyLoadStoreMaxElem) where loads are wrongly replaced by zero.
|
||||
* @run main/othervm -XX:-ReduceBulkZeroing
|
||||
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestCloneAccess::*
|
||||
* compiler.arraycopy.TestCloneAccess
|
||||
* @run main/othervm -XX:-ReduceBulkZeroing -XX:-ReduceInitialCardMarks
|
||||
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestCloneAccess::*
|
||||
* compiler.arraycopy.TestCloneAccess
|
||||
*/
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestCloneAccess {
|
||||
static int test(E src) throws CloneNotSupportedException {
|
||||
// ArrayCopyNode for this clone is not inlined since there are more than 8 (=ArrayCopyLoadStoreMaxElem) fields
|
||||
src.i1 = 3;
|
||||
E dest = (E)src.clone();
|
||||
dontInline(dest.i1, dest.i2);
|
||||
|
||||
// Both loads are wrongly optimized and replaced by a constant zero. LoadNode::Value() tries to find out if a load
|
||||
// is done from a freshly-allocated object. If that is the case, the load can be replaced by the default value zero.
|
||||
// However, in this case, the Allocation node belongs to an ArrayCopyNode which is responsible for initializing 'dest'.
|
||||
// If -XX:-ReduceBulkZeroing is set, the InitializationNode of the allocation does not bail out of this optimization
|
||||
// which results in a replacement of both loads by zero. This is addressed by this fix. If -XX:+ReduceBulkZeroing is
|
||||
// set, then we already bail out and perform the load correctly.
|
||||
return dest.i1 + dest.i2;
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
E e = new E();
|
||||
e.i2 = 4;
|
||||
int res = 0;
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
res = test(e);
|
||||
if (res != 7 || e.i1 != 3 || e.i2 != 4) {
|
||||
throw new RuntimeException("Wrong result! Expected: res = 7, e.i1 = 3, e.i2 = 4 "
|
||||
+ "but got: res = " + res + ", e.i1 = " + e.i1 + ", e.i2 = " + e.i2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Dont inline this method
|
||||
public static void dontInline(int i1, int i2) {
|
||||
}
|
||||
}
|
||||
|
||||
class E implements Cloneable {
|
||||
/*
|
||||
* Need more than 8 (=ArrayCopyLoadStoreMaxElem) fields
|
||||
*/
|
||||
int i1;
|
||||
int i2;
|
||||
int i3;
|
||||
int i4;
|
||||
int i5;
|
||||
int i6;
|
||||
int i7;
|
||||
int i8;
|
||||
int i9;
|
||||
|
||||
E() {
|
||||
i1 = 0;
|
||||
i2 = 1;
|
||||
i3 = 2;
|
||||
i4 = 3;
|
||||
i5 = 4;
|
||||
i6 = 5;
|
||||
i7 = 6;
|
||||
i8 = 7;
|
||||
i9 = 8;
|
||||
}
|
||||
|
||||
public Object clone() throws CloneNotSupportedException {
|
||||
return super.clone();
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 8251458
|
||||
* @summary Test int range overflow of MultiBranchData counter.
|
||||
* @run main/othervm -XX:CompileCommand=dontinline,compiler.profiling.TestMultiBranchDataOverflow::test
|
||||
* -Xbatch -XX:Tier4BackEdgeThreshold=2147483647
|
||||
* compiler.profiling.TestMultiBranchDataOverflow
|
||||
*/
|
||||
|
||||
package compiler.profiling;
|
||||
|
||||
public class TestMultiBranchDataOverflow {
|
||||
|
||||
public static int test(int val, long max) {
|
||||
int res = 0;
|
||||
for (long l = 0; l < max; ++l) {
|
||||
switch (val) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 42:
|
||||
res++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
// Warmup to generate profile information that has a MultiBranchData
|
||||
// counter > Integer.MAX_VALUE for the i == 42 lookupswitch branch.
|
||||
long max = Integer.MAX_VALUE + 100_000L;
|
||||
test(42, max);
|
||||
|
||||
// Trigger C2 compilation
|
||||
for (int i = 0; i < 10_000; ++i) {
|
||||
test(42, 1);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8249603
|
||||
* @summary The C1 register allocator uses a register hint interval i as spill location for an interval j which
|
||||
* overlaps with one of i's split children which has the same spill location which lets verification fail.
|
||||
*
|
||||
* @run main/othervm -Xcomp -XX:CompileCommand=compileonly,compiler.regalloc.TestC1OverlappingRegisterHint::*
|
||||
* compiler.regalloc.TestC1OverlappingRegisterHint
|
||||
*/
|
||||
package compiler.regalloc;
|
||||
|
||||
public class TestC1OverlappingRegisterHint {
|
||||
|
||||
public static int iFldStatic = 10;
|
||||
public int iFld = 11;
|
||||
|
||||
public int test() {
|
||||
int a = 1;
|
||||
int b = 2;
|
||||
int c = 3;
|
||||
int v = 4;
|
||||
int w = 5;
|
||||
int x = 6;
|
||||
int y = 7;
|
||||
int z = 8;
|
||||
int iArr[] = new int[400];
|
||||
|
||||
double d = 1.5;
|
||||
|
||||
int k = 0;
|
||||
for (a = 9; a < 283; a += 2) {
|
||||
for (int i = 8; i < 183; i++) {
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 12; i < 283; i++) {
|
||||
iFldStatic += i;
|
||||
for (int j = 1; 93 > j; j += 2) {
|
||||
x += (j - z);
|
||||
c -= iFld;
|
||||
k = 3;
|
||||
while ((k -= 2) > 0) {
|
||||
}
|
||||
switch ((i % 8) + 52) {
|
||||
case 52:
|
||||
iArr[8] = 5;
|
||||
for (int i20 = 1; i20 < 3; ++i20) {
|
||||
x *= (int)d;
|
||||
w += 5;
|
||||
}
|
||||
break;
|
||||
case 53:
|
||||
case 55:
|
||||
v *= iFldStatic;
|
||||
break;
|
||||
case 56:
|
||||
case 57:
|
||||
try {
|
||||
iArr[5] = a;
|
||||
v = (a / b);
|
||||
} catch (ArithmeticException a_e) {}
|
||||
break;
|
||||
default:
|
||||
iFldStatic += iFldStatic;
|
||||
}
|
||||
}
|
||||
}
|
||||
return y + k;
|
||||
}
|
||||
|
||||
public static void main(String[] strArr) {
|
||||
TestC1OverlappingRegisterHint _instance = new TestC1OverlappingRegisterHint();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
_instance.test();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
* @test
|
||||
* @bug 8193518 8249608
|
||||
* @summary C2: Vector registers are sometimes corrupted at safepoint
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:+UseCountedLoopSafepoints -XX:LoopStripMiningIter=1000 TestVectorsNotSavedAtSafepoint test1
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:+UseCountedLoopSafepoints -XX:LoopStripMiningIter=2 -XX:-TieredCompilation TestVectorsNotSavedAtSafepoint test1
|
||||
* @run main/othervm -XX:-BackgroundCompilation TestVectorsNotSavedAtSafepoint test2
|
||||
*/
|
||||
|
||||
@ -58,10 +58,13 @@ public class TestVectorsNotSavedAtSafepoint {
|
||||
static class GarbageProducerThread extends Thread {
|
||||
public void run() {
|
||||
for(;;) {
|
||||
// Produce some garbage and then let the GC do its work which will
|
||||
// corrupt vector registers if they are not saved at safepoints.
|
||||
Object[] arrays = new Object[1024];
|
||||
for (int i = 0; i < arrays.length; i++) {
|
||||
arrays[i] = new int[1024];
|
||||
}
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -72,23 +75,20 @@ public class TestVectorsNotSavedAtSafepoint {
|
||||
garbage_producer.start();
|
||||
|
||||
if (args[0].equals("test1")) {
|
||||
byte[] barray = new byte[10];
|
||||
long[] larray1 = new long[1000];
|
||||
long[] larray2 = new long[100_000_000];
|
||||
for (int i = 0; i < 20_000; i++) {
|
||||
test1(barray, barray, barray, larray1, -1);
|
||||
}
|
||||
for (int i = 0; i < 100; i++) {
|
||||
test1(barray, barray, barray, larray2, -1);
|
||||
if (larray2[larray2.length-1] != -1) {
|
||||
System.out.println("Iter " + i + " Failed with " + Long.toHexString(larray2[larray2.length-1]));
|
||||
throw new RuntimeException("Test1 failed");
|
||||
byte[] bArr = new byte[10];
|
||||
long[] lArr = new long[1000];
|
||||
for (int i = 0; i < 10_000; ++i) {
|
||||
test1(bArr, bArr, bArr, lArr, -1);
|
||||
for (int j = 0; j < lArr.length; ++j) {
|
||||
if (bArr[j % 10] != 0 || lArr[j] != -1) {
|
||||
throw new RuntimeException("Test1 failed at iteration " + i + ": bArr[" + (j % 10) + "] = " + bArr[j % 10] + ", lArr[" + j + "] = " + lArr[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int iArr[] = new int[100];
|
||||
long lArr[] = new long[100];
|
||||
for (int i = 0; i < 600_000; ++i) {
|
||||
for (int i = 0; i < 10_000; ++i) {
|
||||
test2(iArr, lArr);
|
||||
for (int j = 0; j < lArr.length; ++j) {
|
||||
if (iArr[j] != 1 || lArr[j] != 1) {
|
||||
|
@ -25,10 +25,11 @@ package gc;
|
||||
|
||||
/*
|
||||
* @test TestFullGCALot
|
||||
* @bug 4187687 8187819
|
||||
* @bug 4187687 8187819 8251118
|
||||
* @summary Ensure no access violation when using FullGCALot
|
||||
* @requires vm.debug
|
||||
* @run main/othervm -XX:NewSize=10m -XX:+FullGCALot -XX:FullGCALotInterval=120 gc.TestFullGCALot
|
||||
* @run main/othervm -XX:NewSize=10m -XX:+FullGCALot -XX:FullGCALotInterval=120 -XX:+UseBiasedLocking gc.TestFullGCALot
|
||||
*/
|
||||
|
||||
public class TestFullGCALot {
|
||||
|
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8249276
|
||||
* @summary When dumping the CDS archive, try to lock some objects. These objects should be archived
|
||||
* without the locking bits in the markWord.
|
||||
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
|
||||
* @requires vm.cds
|
||||
* @requires vm.flavor != "minimal"
|
||||
* @modules java.instrument
|
||||
* @run driver LockDuringDump
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
|
||||
public class LockDuringDump {
|
||||
public static String appClasses[] = {
|
||||
LockDuringDumpApp.class.getName(),
|
||||
};
|
||||
public static String agentClasses[] = {
|
||||
LockDuringDumpAgent.class.getName(),
|
||||
};
|
||||
|
||||
private static final String MANIFEST =
|
||||
"Manifest-Version: 1.0\nPremain-Class: LockDuringDumpAgent\n";
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
String agentJar =
|
||||
ClassFileInstaller.writeJar("LockDuringDumpAgent.jar",
|
||||
ClassFileInstaller.Manifest.fromString(MANIFEST),
|
||||
agentClasses);
|
||||
|
||||
String appJar =
|
||||
ClassFileInstaller.writeJar("LockDuringDumpApp.jar", appClasses);
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
// i = 0 -- dump without agent
|
||||
// i = 1 -- dump with agent = disable BiasedLocking
|
||||
// i = 2 -- dump with agent = enable BiasedLocking
|
||||
|
||||
String agentArg = (i == 0) ? "-showversion" : "-javaagent:" + agentJar;
|
||||
String agentArg2 = (i == 0) ? "-showversion" : "-XX:+AllowArchivingWithJavaAgent";
|
||||
String biasedLock = (i != 2) ? "-showversion" : "-XX:+UseBiasedLocking";
|
||||
|
||||
OutputAnalyzer out =
|
||||
TestCommon.testDump(appJar, TestCommon.list(LockDuringDumpApp.class.getName()),
|
||||
"-XX:+UnlockDiagnosticVMOptions",
|
||||
agentArg, agentArg2, biasedLock);
|
||||
if (i != 0) {
|
||||
out.shouldContain("Let's hold the lock on the literal string");
|
||||
}
|
||||
|
||||
TestCommon.run(
|
||||
"-cp", appJar,
|
||||
"-XX:+UnlockDiagnosticVMOptions", agentArg2, biasedLock,
|
||||
LockDuringDumpApp.class.getName())
|
||||
.assertNormalExit("I am able to lock the literal string");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.instrument.Instrumentation;
|
||||
|
||||
public class LockDuringDumpAgent implements Runnable {
|
||||
static boolean threadStarted = false;
|
||||
static Object lock = new Object();
|
||||
|
||||
// The following literal string will be stored into the VM's interned string table when this
|
||||
// class (or the LockDuringDumpApp class) is loaded during -Xshare:dump. As a result it will be
|
||||
// stored in the CDS archived heap (all strings in the dump-time interned string table are archived).
|
||||
//
|
||||
// We try to make sure this string is locked while the archived heap is dumped. CDS should
|
||||
// clear the lock states in this string's object header. See JDK-8249276.
|
||||
//
|
||||
// At run time, when LockDuringDumpApp loads this literal string (from the shared string table)
|
||||
// it should be able to lock it without problems.
|
||||
static String LITERAL = "@@LockDuringDump@@LITERAL"; // must be the same as in LockDuringDumpAgent
|
||||
|
||||
public static void premain(String agentArg, Instrumentation instrumentation) {
|
||||
System.out.println("inside LockDuringDumpAgent: " + LockDuringDumpAgent.class.getClassLoader());
|
||||
|
||||
Thread t = new Thread(new LockDuringDumpAgent());
|
||||
t.setDaemon(true);
|
||||
t.start();
|
||||
|
||||
waitForThreadStart();
|
||||
}
|
||||
|
||||
static void waitForThreadStart() {
|
||||
try {
|
||||
synchronized (lock) {
|
||||
while (!threadStarted) {
|
||||
lock.wait();
|
||||
}
|
||||
System.out.println("Thread has started");
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
System.err.println("Unexpected: " + t);
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
synchronized (LITERAL) {
|
||||
System.out.println("Let's hold the lock on the literal string \"" + LITERAL + "\" + forever .....");
|
||||
synchronized (lock) {
|
||||
threadStarted = true;
|
||||
lock.notifyAll();
|
||||
}
|
||||
//if (false) {
|
||||
while (true) {
|
||||
Thread.sleep(1);
|
||||
}
|
||||
//}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
System.err.println("Unexpected: " + t);
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
public class LockDuringDumpApp {
|
||||
static String LITERAL = "@@LockDuringDump@@LITERAL"; // must be the same as in LockDuringDumpAgent
|
||||
|
||||
public static void main(String args[]) {
|
||||
synchronized (LITERAL) { // See comments in LockDuringDumpAgent.java
|
||||
System.out.println("I am able to lock the literal string \"" + LITERAL + "\"");
|
||||
}
|
||||
}
|
||||
}
|
@ -45,6 +45,7 @@ SRC_FILES = $(shell find $(SRC_DIR) -name '*.java')
|
||||
LIB_FILES = $(shell find $(TESTLIBRARY_DIR)/jdk/test/lib/ \
|
||||
$(TESTLIBRARY_DIR)/jdk/test/lib/process \
|
||||
$(TESTLIBRARY_DIR)/jdk/test/lib/util \
|
||||
$(TESTLIBRARY_DIR)/jtreg \
|
||||
-maxdepth 1 -name '*.java')
|
||||
WB_SRC_FILES = $(shell find $(TESTLIBRARY_DIR)/sun/hotspot -name '*.java')
|
||||
EXPORTS=--add-exports java.base/jdk.internal.jimage=ALL-UNNAMED \
|
||||
|
@ -111,6 +111,13 @@ class ProviderTest implements Callable<Boolean> {
|
||||
env.put(Context.PROVIDER_URL, url);
|
||||
}
|
||||
|
||||
// Set JNDI LDAP connect timeout property. It helps to prevent
|
||||
// initial bind operation from blocking in case of a local process
|
||||
// listening on the port specified in the URL. With the property set,
|
||||
// the bind operation will fail with timeout exception, and then it
|
||||
// could be retried with another port number.
|
||||
env.put("com.sun.jndi.ldap.connect.timeout", "1000");
|
||||
|
||||
try {
|
||||
ctx = new InitialDirContext(env);
|
||||
SearchControls scl = new SearchControls();
|
||||
@ -119,8 +126,13 @@ class ProviderTest implements Callable<Boolean> {
|
||||
"ou=People,o=Test", "(objectClass=*)", scl);
|
||||
throw new RuntimeException("Search should not complete");
|
||||
} catch (NamingException e) {
|
||||
e.printStackTrace();
|
||||
passed = e.toString().contains(expected);
|
||||
System.err.println((passed ? "Expected" : "Unexpected") +
|
||||
" NamingException observed: " + e.toString());
|
||||
// Print stack trace only for unexpected exceptions
|
||||
if (!passed) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
} finally {
|
||||
shutItDown(ctx);
|
||||
}
|
||||
@ -243,7 +255,8 @@ public class LdapDnsProviderTest {
|
||||
// Construct text expected to be present in Exception message
|
||||
String expected = "localhost:" + port;
|
||||
|
||||
System.err.printf("Iteration %d: Testing: %s, %s%n", attempt, url, expected);
|
||||
System.err.printf("Iteration %d: Testing: url='%s', expected content='%s'%n",
|
||||
attempt, url, expected);
|
||||
|
||||
FutureTask<Boolean> future = new FutureTask<>(
|
||||
new ProviderTest(url, expected));
|
||||
@ -278,7 +291,7 @@ public class LdapDnsProviderTest {
|
||||
new ProviderTest(url, expected));
|
||||
new Thread(future).start();
|
||||
|
||||
System.err.println("Testing: " + url + ", " + expected);
|
||||
System.err.printf("Testing: url='%s', expected content='%s'%n", url, expected);
|
||||
while (!future.isDone()) {
|
||||
try {
|
||||
if (!future.get()) {
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
import javax.naming.Context;
|
||||
import javax.naming.NamingException;
|
||||
import javax.naming.ServiceUnavailableException;
|
||||
import javax.naming.directory.InitialDirContext;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
@ -67,8 +68,16 @@ public class NamingExceptionMessageTest {
|
||||
ldapServer.awaitStartup();
|
||||
var env = ldapServer.getInitialLdapCtxEnvironment(0);
|
||||
var namingException = Assert.expectThrows(NamingException.class, () -> new InitialDirContext(env));
|
||||
System.out.println("Got naming exception:" + namingException);
|
||||
Assert.assertEquals(namingException.getMessage(), EXPECTED_CLOSURE_MESSAGE);
|
||||
if (namingException instanceof ServiceUnavailableException) {
|
||||
// If naming exception is ServiceUnavailableException it could mean
|
||||
// that the connection was closed on test server-side before LDAP client starts
|
||||
// read-out of the reply message. For such cases test run is considered as successful.
|
||||
System.out.println("Got ServiceUnavailableException: Test PASSED");
|
||||
} else {
|
||||
// If exception is not ServiceUnavailableException - check the exception message
|
||||
System.out.println("Got NamingException:" + namingException);
|
||||
Assert.assertEquals(namingException.getMessage(), EXPECTED_CLOSURE_MESSAGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,6 +149,12 @@ public class NamingExceptionMessageTest {
|
||||
switch (msg.getOperation()) {
|
||||
case BIND_REQUEST:
|
||||
if (closeConnections) {
|
||||
// Give some time for LDAP client to start-up
|
||||
try {
|
||||
TimeUnit.MILLISECONDS.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
// Close the socket
|
||||
closeSilently(socket);
|
||||
} else {
|
||||
try {
|
||||
|
@ -23,7 +23,8 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8242885 8250886
|
||||
* @bug 8242885 8250886 8240901
|
||||
* @key randomness
|
||||
* @summary This test verifies that on macOS, the send buffer size is configured
|
||||
* by default so that none of our implementations of the UDP protocol
|
||||
* will fail with a "packet too large" exception when trying to send a
|
||||
@ -32,7 +33,6 @@
|
||||
* limit.
|
||||
* @library /test/lib
|
||||
* @build jdk.test.lib.net.IPSupport
|
||||
* @requires os.family == "mac"
|
||||
* @run testng/othervm SendReceiveMaxSize
|
||||
* @run testng/othervm -Djava.net.preferIPv4Stack=true SendReceiveMaxSize
|
||||
* @run testng/othervm -Djava.net.preferIPv6Addresses=true SendReceiveMaxSize
|
||||
@ -41,6 +41,8 @@
|
||||
* @run testng/othervm -Djdk.net.usePlainDatagramSocketImpl -Djava.net.preferIPv6Addresses=true SendReceiveMaxSize
|
||||
*/
|
||||
|
||||
import jdk.test.lib.RandomFactory;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.net.IPSupport;
|
||||
import org.testng.annotations.BeforeTest;
|
||||
import org.testng.annotations.DataProvider;
|
||||
@ -54,7 +56,9 @@ import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MulticastSocket;
|
||||
import java.nio.channels.DatagramChannel;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.testng.Assert.assertEquals;
|
||||
import static org.testng.Assert.expectThrows;
|
||||
|
||||
public class SendReceiveMaxSize {
|
||||
@ -63,6 +67,7 @@ public class SendReceiveMaxSize {
|
||||
private final static int IPV4_SNDBUF = 65507;
|
||||
private final static int IPV6_SNDBUF = 65527;
|
||||
private final static Class<IOException> IOE = IOException.class;
|
||||
private final static Random random = RandomFactory.getRandom();
|
||||
|
||||
public interface DatagramSocketSupplier {
|
||||
DatagramSocket open() throws IOException;
|
||||
@ -102,7 +107,14 @@ public class SendReceiveMaxSize {
|
||||
var port = receiver.getLocalPort();
|
||||
var addr = new InetSocketAddress(HOST_ADDR, port);
|
||||
try (var sender = supplier.open()) {
|
||||
var sendPkt = new DatagramPacket(new byte[capacity], capacity, addr);
|
||||
if (!Platform.isOSX()) {
|
||||
if (sender.getSendBufferSize() < capacity)
|
||||
sender.setSendBufferSize(capacity);
|
||||
}
|
||||
byte[] testData = new byte[capacity];
|
||||
random.nextBytes(testData);
|
||||
var sendPkt = new DatagramPacket(testData, capacity, addr);
|
||||
|
||||
if (exception != null) {
|
||||
Exception ex = expectThrows(IOE, () -> sender.send(sendPkt));
|
||||
System.out.println(name + " got expected exception: " + ex);
|
||||
@ -110,6 +122,10 @@ public class SendReceiveMaxSize {
|
||||
sender.send(sendPkt);
|
||||
var receivePkt = new DatagramPacket(new byte[capacity], capacity);
|
||||
receiver.receive(receivePkt);
|
||||
|
||||
// check packet data has been fragmented and re-assembled correctly at receiver
|
||||
assertEquals(receivePkt.getLength(), capacity);
|
||||
assertEquals(receivePkt.getData(), testData);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 4503641 8130394
|
||||
* @bug 4503641 8130394 8249773
|
||||
* @summary Check that DatagramChannel.receive returns a new SocketAddress
|
||||
* when it receives a packet from the same source address but
|
||||
* different endpoint.
|
||||
@ -31,58 +31,91 @@
|
||||
import java.nio.*;
|
||||
import java.nio.channels.*;
|
||||
import java.net.*;
|
||||
import static java.lang.System.out;
|
||||
|
||||
public class ReceiveISA {
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
// clients
|
||||
DatagramChannel dc1 = DatagramChannel.open();
|
||||
DatagramChannel dc2 = DatagramChannel.open();
|
||||
String regex = "Dia duit![0-2]";
|
||||
|
||||
// bind server to any port
|
||||
DatagramChannel dc3 = DatagramChannel.open();
|
||||
dc3.socket().bind((SocketAddress)null);
|
||||
try (DatagramChannel dc1 = DatagramChannel.open(); // client
|
||||
DatagramChannel dc2 = DatagramChannel.open(); // client
|
||||
DatagramChannel dc3 = DatagramChannel.open();
|
||||
DatagramChannel dc4 = DatagramChannel.open()) { // client
|
||||
|
||||
// get server address
|
||||
InetAddress lh = InetAddress.getLocalHost();
|
||||
InetSocketAddress isa
|
||||
= new InetSocketAddress( lh, dc3.socket().getLocalPort() );
|
||||
dc3.socket().bind((SocketAddress) null); // bind server to any port
|
||||
|
||||
ByteBuffer bb = ByteBuffer.allocateDirect(100);
|
||||
bb.put("Dia duit!".getBytes());
|
||||
bb.flip();
|
||||
// get server address
|
||||
InetAddress lh = InetAddress.getLocalHost();
|
||||
InetSocketAddress isa = new InetSocketAddress(lh, dc3.socket().getLocalPort());
|
||||
|
||||
dc1.send(bb, isa); // packet 1 from dc1
|
||||
dc1.send(bb, isa); // packet 2 from dc1
|
||||
dc2.send(bb, isa); // packet 3 from dc1
|
||||
ByteBuffer bb = ByteBuffer.allocateDirect(100);
|
||||
bb.put("Dia duit!0".getBytes());
|
||||
bb.flip();
|
||||
|
||||
// receive 3 packets
|
||||
dc3.socket().setSoTimeout(1000);
|
||||
ByteBuffer rb = ByteBuffer.allocateDirect(100);
|
||||
SocketAddress sa[] = new SocketAddress[3];
|
||||
for (int i=0; i<3; i++) {
|
||||
sa[i] = dc3.receive(rb);
|
||||
System.out.println("received "+ sa[i] );
|
||||
rb.clear();
|
||||
}
|
||||
ByteBuffer bb1 = ByteBuffer.allocateDirect(100);
|
||||
bb1.put("Dia duit!1".getBytes());
|
||||
bb1.flip();
|
||||
|
||||
dc1.close();
|
||||
dc2.close();
|
||||
dc3.close();
|
||||
ByteBuffer bb2 = ByteBuffer.allocateDirect(100);
|
||||
bb2.put("Dia duit!2".getBytes());
|
||||
bb2.flip();
|
||||
|
||||
/*
|
||||
* Check that sa[0] equals sa[1] (both from dc1)
|
||||
* Check that sa[1] not equal to sa[2] (one from dc1, one from dc2)
|
||||
*/
|
||||
ByteBuffer bb3 = ByteBuffer.allocateDirect(100);
|
||||
bb3.put("garbage".getBytes());
|
||||
bb3.flip();
|
||||
|
||||
if (!sa[0].equals(sa[1])) {
|
||||
throw new Exception("Source address for packets 1 & 2 should be equal");
|
||||
}
|
||||
dc1.send(bb, isa); // packet 1 from dc1
|
||||
dc4.send(bb3, isa); // interference, packet 4 from dc4
|
||||
dc1.send(bb1, isa); // packet 2 from dc1
|
||||
dc2.send(bb2, isa); // packet 3 from dc2
|
||||
|
||||
if (sa[1].equals(sa[2])) {
|
||||
throw new Exception("Source address for packets 2 & 3 should be different");
|
||||
|
||||
// receive 4 packets
|
||||
dc3.socket().setSoTimeout(1000);
|
||||
ByteBuffer rb = ByteBuffer.allocateDirect(100);
|
||||
SocketAddress sa[] = new SocketAddress[3];
|
||||
|
||||
for (int i = 0; i < 3;) {
|
||||
SocketAddress receiver = dc3.receive(rb);
|
||||
rb.flip();
|
||||
byte[] bytes = new byte[rb.limit()];
|
||||
rb.get(bytes, 0, rb.limit());
|
||||
String msg = new String(bytes);
|
||||
|
||||
if (msg.matches("Dia duit![0-2]")) {
|
||||
if (msg.equals("Dia duit!0")) {
|
||||
sa[0] = receiver;
|
||||
i++;
|
||||
}
|
||||
if (msg.equals("Dia duit!1")) {
|
||||
sa[1] = receiver;
|
||||
i++;
|
||||
}
|
||||
if (msg.equals("Dia duit!2")) {
|
||||
sa[2] = receiver;
|
||||
i++;
|
||||
}
|
||||
} else {
|
||||
out.println("Interfered packet sender address is : " + receiver);
|
||||
out.println("random interfered packet is : " + msg);
|
||||
}
|
||||
rb.clear();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that sa[0] equals sa[1] (both from dc1)
|
||||
* Check that sa[1] not equal to sa[2] (one from dc1, one from dc2)
|
||||
*/
|
||||
|
||||
if (!sa[0].equals(sa[1])) {
|
||||
throw new Exception("Source address for packets 1 & 2 should be equal");
|
||||
}
|
||||
|
||||
if (sa[1].equals(sa[2])) {
|
||||
throw new Exception("Source address for packets 2 & 3 should be different");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -23,19 +23,21 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8239355 8242885
|
||||
* @bug 8239355 8242885 8240901
|
||||
* @key randomness
|
||||
* @summary Check that it is possible to send and receive datagrams of
|
||||
* maximum size on macOS.
|
||||
* @library /test/lib
|
||||
* @build jdk.test.lib.net.IPSupport
|
||||
* @requires os.family == "mac"
|
||||
* @run testng/othervm SendReceiveMaxSize
|
||||
* @run testng/othervm -Djava.net.preferIPv4Stack=true SendReceiveMaxSize
|
||||
* @run testng/othervm -Djdk.net.usePlainDatagramSocketImpl SendReceiveMaxSize
|
||||
* @run testng/othervm -Djdk.net.usePlainDatagramSocketImpl -Djava.net.preferIPv4Stack=true SendReceiveMaxSize
|
||||
*/
|
||||
|
||||
import jdk.test.lib.RandomFactory;
|
||||
import jdk.test.lib.NetworkConfiguration;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.net.IPSupport;
|
||||
import org.testng.annotations.BeforeTest;
|
||||
import org.testng.annotations.DataProvider;
|
||||
@ -49,6 +51,7 @@ import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.DatagramChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static java.net.StandardProtocolFamily.INET;
|
||||
@ -65,6 +68,7 @@ public class SendReceiveMaxSize {
|
||||
private final static int IPV4_SNDBUF = 65507;
|
||||
private final static int IPV6_SNDBUF = 65527;
|
||||
private final static Class<IOException> IOE = IOException.class;
|
||||
private final static Random random = RandomFactory.getRandom();
|
||||
|
||||
public interface DatagramChannelSupplier {
|
||||
DatagramChannel open() throws IOException;
|
||||
@ -118,8 +122,10 @@ public class SendReceiveMaxSize {
|
||||
@Test(dataProvider = "invariants")
|
||||
public void testGetOption(DatagramChannelSupplier supplier, int capacity, InetAddress host)
|
||||
throws IOException {
|
||||
try (var dc = supplier.open()) {
|
||||
assertTrue(dc.getOption(SO_SNDBUF) >= capacity);
|
||||
if (Platform.isOSX()) {
|
||||
try (var dc = supplier.open()){
|
||||
assertTrue(dc.getOption(SO_SNDBUF) >= capacity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,17 +139,43 @@ public class SendReceiveMaxSize {
|
||||
|
||||
try (var sender = supplier.open()) {
|
||||
sender.bind(null);
|
||||
var sendBuf = ByteBuffer.allocate(capacity);
|
||||
if (!Platform.isOSX()) {
|
||||
if (sender.getOption(SO_SNDBUF) < capacity)
|
||||
sender.setOption(SO_SNDBUF, capacity);
|
||||
}
|
||||
byte[] testData = new byte[capacity];
|
||||
random.nextBytes(testData);
|
||||
|
||||
var sendBuf = ByteBuffer.wrap(testData);
|
||||
sender.send(sendBuf, addr);
|
||||
var receiveBuf = ByteBuffer.allocate(capacity);
|
||||
receiver.receive(receiveBuf);
|
||||
assertEquals(sendBuf, receiveBuf);
|
||||
|
||||
sendBuf = ByteBuffer.allocate(capacity - 1);
|
||||
sendBuf.flip();
|
||||
receiveBuf.flip();
|
||||
|
||||
// check that data has been fragmented and re-assembled correctly at receiver
|
||||
System.out.println("sendBuf: " + sendBuf);
|
||||
System.out.println("receiveBuf: " + receiveBuf);
|
||||
assertEquals(sendBuf, receiveBuf);
|
||||
assertEquals(sendBuf.compareTo(receiveBuf), 0);
|
||||
|
||||
testData = new byte[capacity - 1];
|
||||
random.nextBytes(testData);
|
||||
|
||||
sendBuf = ByteBuffer.wrap(testData);
|
||||
sender.send(sendBuf, addr);
|
||||
receiveBuf = ByteBuffer.allocate(capacity - 1);
|
||||
receiver.receive(receiveBuf);
|
||||
assertTrue(sendBuf.compareTo(receiveBuf) == 0);
|
||||
|
||||
sendBuf.flip();
|
||||
receiveBuf.flip();
|
||||
|
||||
// check that data has been fragmented and re-assembled correctly at receiver
|
||||
System.out.println("sendBuf: " + sendBuf);
|
||||
System.out.println("receiveBuf: " + receiveBuf);
|
||||
assertEquals(sendBuf, receiveBuf);
|
||||
assertEquals(sendBuf.compareTo(receiveBuf), 0);
|
||||
|
||||
var failSendBuf = ByteBuffer.allocate(capacity + 1);
|
||||
assertThrows(IOE, () -> sender.send(failSendBuf, addr));
|
||||
|
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import org.testng.annotations.AfterTest;
|
||||
import org.testng.annotations.BeforeTest;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
|
||||
import static org.testng.Assert.*;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8246707
|
||||
* @library /test/lib
|
||||
* @summary Reading or Writing to a closed SocketChannel should throw a ClosedChannelException
|
||||
* @run testng/othervm ReadWriteAfterClose
|
||||
*/
|
||||
|
||||
public class ReadWriteAfterClose {
|
||||
|
||||
private ServerSocketChannel listener;
|
||||
private SocketAddress saddr;
|
||||
private static final int bufCapacity = 4;
|
||||
private static final int bufArraySize = 4;
|
||||
private static final Class<ClosedChannelException> CCE = ClosedChannelException.class;
|
||||
|
||||
@BeforeTest
|
||||
public void setUp() throws IOException {
|
||||
listener = ServerSocketChannel.open();
|
||||
listener.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
|
||||
saddr = listener.getLocalAddress();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteAfterClose1() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer bufWrite = ByteBuffer.allocate(bufCapacity);
|
||||
Throwable ex = expectThrows(CCE, () -> sc.write(bufWrite));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteAfterClose2() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer[] bufArrayWrite = allocateBufArray();
|
||||
Throwable ex = expectThrows(CCE, () -> sc.write(bufArrayWrite));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteAfterClose3() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer[] bufArrayWrite = allocateBufArray();
|
||||
Throwable ex = expectThrows(CCE, () -> sc.write(bufArrayWrite, 0, bufArraySize));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadAfterClose1() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer dst = ByteBuffer.allocate(bufCapacity);
|
||||
Throwable ex = expectThrows(CCE, () -> sc.read(dst));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadAfterClose2() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer[] dstArray = allocateBufArray();
|
||||
Throwable ex = expectThrows(CCE, () -> sc.read(dstArray));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadAfterClose3() throws IOException {
|
||||
SocketChannel sc = SocketChannel.open(saddr);
|
||||
sc.close();
|
||||
ByteBuffer[] dstArray = allocateBufArray();
|
||||
Throwable ex = expectThrows(CCE, () -> sc.read(dstArray, 0, bufArraySize));
|
||||
assertEquals(ex.getClass(), CCE);
|
||||
}
|
||||
|
||||
public ByteBuffer[] allocateBufArray() {
|
||||
ByteBuffer[] bufArr = new ByteBuffer[bufArraySize];
|
||||
for (int i = 0; i < bufArraySize; i++)
|
||||
bufArr[i] = ByteBuffer.allocate(bufCapacity);
|
||||
return bufArr;
|
||||
}
|
||||
|
||||
@AfterTest
|
||||
public void tearDown() throws IOException {
|
||||
listener.close();
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,15 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 1234567
|
||||
* @summary SSLEngine has not yet caused Solaris kernel to panic
|
||||
* @bug 8250839
|
||||
* @summary Improve test template SSLEngineTemplate with SSLContextTemplate
|
||||
* @build SSLContextTemplate
|
||||
* @run main/othervm SSLEngineTemplate
|
||||
*/
|
||||
import javax.net.ssl.*;
|
||||
import javax.net.ssl.SSLEngineResult.HandshakeStatus;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* A SSLEngine usage example which simplifies the presentation
|
||||
* by removing the I/O and multi-threading concerns.
|
||||
@ -44,231 +49,139 @@
|
||||
*
|
||||
* When this application runs, notice that several messages
|
||||
* (wrap/unwrap) pass before any application data is consumed or
|
||||
* produced. (For more information, please see the SSL/TLS
|
||||
* specifications.) There may several steps for a successful handshake,
|
||||
* so it's typical to see the following series of operations:
|
||||
*
|
||||
* client server message
|
||||
* ====== ====== =======
|
||||
* wrap() ... ClientHello
|
||||
* ... unwrap() ClientHello
|
||||
* ... wrap() ServerHello/Certificate
|
||||
* unwrap() ... ServerHello/Certificate
|
||||
* wrap() ... ClientKeyExchange
|
||||
* wrap() ... ChangeCipherSpec
|
||||
* wrap() ... Finished
|
||||
* ... unwrap() ClientKeyExchange
|
||||
* ... unwrap() ChangeCipherSpec
|
||||
* ... unwrap() Finished
|
||||
* ... wrap() ChangeCipherSpec
|
||||
* ... wrap() Finished
|
||||
* unwrap() ... ChangeCipherSpec
|
||||
* unwrap() ... Finished
|
||||
* produced.
|
||||
*/
|
||||
import javax.net.ssl.*;
|
||||
import javax.net.ssl.SSLEngineResult.*;
|
||||
import java.io.*;
|
||||
import java.security.*;
|
||||
import java.nio.*;
|
||||
public class SSLEngineTemplate implements SSLContextTemplate {
|
||||
private final SSLEngine clientEngine; // client Engine
|
||||
private final ByteBuffer clientOut; // write side of clientEngine
|
||||
private final ByteBuffer clientIn; // read side of clientEngine
|
||||
|
||||
public class SSLEngineTemplate {
|
||||
private final SSLEngine serverEngine; // server Engine
|
||||
private final ByteBuffer serverOut; // write side of serverEngine
|
||||
private final ByteBuffer serverIn; // read side of serverEngine
|
||||
|
||||
// For data transport, this example uses local ByteBuffers. This
|
||||
// isn't really useful, but the purpose of this example is to show
|
||||
// SSLEngine concepts, not how to do network transport.
|
||||
private final ByteBuffer cTOs; // "reliable" transport client->server
|
||||
private final ByteBuffer sTOc; // "reliable" transport server->client
|
||||
|
||||
private SSLEngineTemplate() throws Exception {
|
||||
serverEngine = configureServerEngine(
|
||||
createServerSSLContext().createSSLEngine());
|
||||
|
||||
clientEngine = configureClientEngine(
|
||||
createClientSSLContext().createSSLEngine());
|
||||
|
||||
// We'll assume the buffer sizes are the same
|
||||
// between client and server.
|
||||
SSLSession session = clientEngine.getSession();
|
||||
int appBufferMax = session.getApplicationBufferSize();
|
||||
int netBufferMax = session.getPacketBufferSize();
|
||||
|
||||
// We'll make the input buffers a bit bigger than the max needed
|
||||
// size, so that unwrap()s following a successful data transfer
|
||||
// won't generate BUFFER_OVERFLOWS.
|
||||
//
|
||||
// We'll use a mix of direct and indirect ByteBuffers for
|
||||
// tutorial purposes only. In reality, only use direct
|
||||
// ByteBuffers when they give a clear performance enhancement.
|
||||
clientIn = ByteBuffer.allocate(appBufferMax + 50);
|
||||
serverIn = ByteBuffer.allocate(appBufferMax + 50);
|
||||
|
||||
cTOs = ByteBuffer.allocateDirect(netBufferMax);
|
||||
sTOc = ByteBuffer.allocateDirect(netBufferMax);
|
||||
|
||||
clientOut = ByteBuffer.wrap("Hi Server, I'm Client".getBytes());
|
||||
serverOut = ByteBuffer.wrap("Hello Client, I'm Server".getBytes());
|
||||
}
|
||||
|
||||
//
|
||||
// Protected methods could be used to customize the test case.
|
||||
//
|
||||
|
||||
/*
|
||||
* Enables logging of the SSLEngine operations.
|
||||
* Configure the client side engine.
|
||||
*/
|
||||
private static final boolean logging = true;
|
||||
protected SSLEngine configureClientEngine(SSLEngine clientEngine) {
|
||||
clientEngine.setUseClientMode(true);
|
||||
|
||||
/*
|
||||
* Enables the JSSE system debugging system property:
|
||||
*
|
||||
* -Djavax.net.debug=all
|
||||
*
|
||||
* This gives a lot of low-level information about operations underway,
|
||||
* including specific handshake messages, and might be best examined
|
||||
* after gaining some familiarity with this application.
|
||||
*/
|
||||
private static final boolean debug = false;
|
||||
// Get/set parameters if needed
|
||||
// SSLParameters paramsClient = clientEngine.getSSLParameters();
|
||||
// clientEngine.setSSLParameters(paramsClient);
|
||||
|
||||
private final SSLContext sslc;
|
||||
|
||||
private SSLEngine clientEngine; // client Engine
|
||||
private ByteBuffer clientOut; // write side of clientEngine
|
||||
private ByteBuffer clientIn; // read side of clientEngine
|
||||
|
||||
private SSLEngine serverEngine; // server Engine
|
||||
private ByteBuffer serverOut; // write side of serverEngine
|
||||
private ByteBuffer serverIn; // read side of serverEngine
|
||||
|
||||
/*
|
||||
* For data transport, this example uses local ByteBuffers. This
|
||||
* isn't really useful, but the purpose of this example is to show
|
||||
* SSLEngine concepts, not how to do network transport.
|
||||
*/
|
||||
private ByteBuffer cTOs; // "reliable" transport client->server
|
||||
private ByteBuffer sTOc; // "reliable" transport server->client
|
||||
|
||||
/*
|
||||
* The following is to set up the keystores.
|
||||
*/
|
||||
private static final String pathToStores = "../etc";
|
||||
private static final String keyStoreFile = "keystore";
|
||||
private static final String trustStoreFile = "truststore";
|
||||
private static final char[] passphrase = "passphrase".toCharArray();
|
||||
|
||||
private static final String keyFilename =
|
||||
System.getProperty("test.src", ".") + "/" + pathToStores +
|
||||
"/" + keyStoreFile;
|
||||
private static final String trustFilename =
|
||||
System.getProperty("test.src", ".") + "/" + pathToStores +
|
||||
"/" + trustStoreFile;
|
||||
|
||||
/*
|
||||
* Main entry point for this test.
|
||||
*/
|
||||
public static void main(String args[]) throws Exception {
|
||||
if (debug) {
|
||||
System.setProperty("javax.net.debug", "all");
|
||||
}
|
||||
|
||||
SSLEngineTemplate test = new SSLEngineTemplate();
|
||||
test.runTest();
|
||||
|
||||
System.out.println("Test Passed.");
|
||||
return clientEngine;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create an initialized SSLContext to use for these tests.
|
||||
* Configure the server side engine.
|
||||
*/
|
||||
public SSLEngineTemplate() throws Exception {
|
||||
protected SSLEngine configureServerEngine(SSLEngine serverEngine) {
|
||||
serverEngine.setUseClientMode(false);
|
||||
serverEngine.setNeedClientAuth(true);
|
||||
|
||||
KeyStore ks = KeyStore.getInstance("JKS");
|
||||
KeyStore ts = KeyStore.getInstance("JKS");
|
||||
// Get/set parameters if needed
|
||||
//
|
||||
// SSLParameters paramsServer = serverEngine.getSSLParameters();
|
||||
// serverEngine.setSSLParameters(paramsServer);
|
||||
|
||||
ks.load(new FileInputStream(keyFilename), passphrase);
|
||||
ts.load(new FileInputStream(trustFilename), passphrase);
|
||||
|
||||
KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
|
||||
kmf.init(ks, passphrase);
|
||||
|
||||
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
|
||||
tmf.init(ts);
|
||||
|
||||
SSLContext sslCtx = SSLContext.getInstance("TLS");
|
||||
|
||||
sslCtx.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
||||
|
||||
sslc = sslCtx;
|
||||
return serverEngine;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run the test.
|
||||
*
|
||||
* Sit in a tight loop, both engines calling wrap/unwrap regardless
|
||||
* of whether data is available or not. We do this until both engines
|
||||
* report back they are closed.
|
||||
*
|
||||
* The main loop handles all of the I/O phases of the SSLEngine's
|
||||
* lifetime:
|
||||
*
|
||||
* initial handshaking
|
||||
* application data transfer
|
||||
* engine closing
|
||||
*
|
||||
* One could easily separate these phases into separate
|
||||
* sections of code.
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
new SSLEngineTemplate().runTest();
|
||||
}
|
||||
|
||||
//
|
||||
// Private methods that used to build the common part of the test.
|
||||
//
|
||||
|
||||
private void runTest() throws Exception {
|
||||
boolean dataDone = false;
|
||||
|
||||
createSSLEngines();
|
||||
createBuffers();
|
||||
|
||||
// results from client's last operation
|
||||
SSLEngineResult clientResult;
|
||||
|
||||
// results from server's last operation
|
||||
SSLEngineResult serverResult;
|
||||
|
||||
/*
|
||||
* Examining the SSLEngineResults could be much more involved,
|
||||
* and may alter the overall flow of the application.
|
||||
*
|
||||
* For example, if we received a BUFFER_OVERFLOW when trying
|
||||
* to write to the output pipe, we could reallocate a larger
|
||||
* pipe, but instead we wait for the peer to drain it.
|
||||
*/
|
||||
Exception clientException = null;
|
||||
Exception serverException = null;
|
||||
boolean dataDone = false;
|
||||
while (isOpen(clientEngine) || isOpen(serverEngine)) {
|
||||
log("=================");
|
||||
|
||||
while (!isEngineClosed(clientEngine)
|
||||
|| !isEngineClosed(serverEngine)) {
|
||||
|
||||
log("================");
|
||||
|
||||
try {
|
||||
clientResult = clientEngine.wrap(clientOut, cTOs);
|
||||
log("client wrap: ", clientResult);
|
||||
} catch (Exception e) {
|
||||
clientException = e;
|
||||
System.out.println("Client wrap() threw: " + e.getMessage());
|
||||
}
|
||||
logEngineStatus(clientEngine);
|
||||
// client wrap
|
||||
log("---Client Wrap---");
|
||||
clientResult = clientEngine.wrap(clientOut, cTOs);
|
||||
logEngineStatus(clientEngine, clientResult);
|
||||
runDelegatedTasks(clientEngine);
|
||||
|
||||
log("----");
|
||||
|
||||
try {
|
||||
serverResult = serverEngine.wrap(serverOut, sTOc);
|
||||
log("server wrap: ", serverResult);
|
||||
} catch (Exception e) {
|
||||
serverException = e;
|
||||
System.out.println("Server wrap() threw: " + e.getMessage());
|
||||
}
|
||||
logEngineStatus(serverEngine);
|
||||
// server wrap
|
||||
log("---Server Wrap---");
|
||||
serverResult = serverEngine.wrap(serverOut, sTOc);
|
||||
logEngineStatus(serverEngine, serverResult);
|
||||
runDelegatedTasks(serverEngine);
|
||||
|
||||
cTOs.flip();
|
||||
sTOc.flip();
|
||||
|
||||
log("--------");
|
||||
|
||||
try {
|
||||
clientResult = clientEngine.unwrap(sTOc, clientIn);
|
||||
log("client unwrap: ", clientResult);
|
||||
} catch (Exception e) {
|
||||
clientException = e;
|
||||
System.out.println("Client unwrap() threw: " + e.getMessage());
|
||||
}
|
||||
logEngineStatus(clientEngine);
|
||||
// client unwrap
|
||||
log("---Client Unwrap---");
|
||||
clientResult = clientEngine.unwrap(sTOc, clientIn);
|
||||
logEngineStatus(clientEngine, clientResult);
|
||||
runDelegatedTasks(clientEngine);
|
||||
|
||||
log("----");
|
||||
|
||||
try {
|
||||
serverResult = serverEngine.unwrap(cTOs, serverIn);
|
||||
log("server unwrap: ", serverResult);
|
||||
} catch (Exception e) {
|
||||
serverException = e;
|
||||
System.out.println("Server unwrap() threw: " + e.getMessage());
|
||||
}
|
||||
logEngineStatus(serverEngine);
|
||||
// server unwrap
|
||||
log("---Server Unwrap---");
|
||||
serverResult = serverEngine.unwrap(cTOs, serverIn);
|
||||
logEngineStatus(serverEngine, serverResult);
|
||||
runDelegatedTasks(serverEngine);
|
||||
|
||||
cTOs.compact();
|
||||
sTOc.compact();
|
||||
|
||||
/*
|
||||
* After we've transfered all application data between the client
|
||||
* and server, we close the clientEngine's outbound stream.
|
||||
* This generates a close_notify handshake message, which the
|
||||
* server engine receives and responds by closing itself.
|
||||
*/
|
||||
// After we've transferred all application data between the client
|
||||
// and server, we close the clientEngine's outbound stream.
|
||||
// This generates a close_notify handshake message, which the
|
||||
// server engine receives and responds by closing itself.
|
||||
if (!dataDone && (clientOut.limit() == serverIn.position()) &&
|
||||
(serverOut.limit() == clientIn.position())) {
|
||||
|
||||
/*
|
||||
* A sanity check to ensure we got what was sent.
|
||||
*/
|
||||
// A sanity check to ensure we got what was sent.
|
||||
checkTransfer(serverOut, clientIn);
|
||||
checkTransfer(clientOut, serverIn);
|
||||
|
||||
@ -284,78 +197,33 @@ public class SSLEngineTemplate {
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isOpen(SSLEngine engine) {
|
||||
return (!engine.isOutboundDone() || !engine.isInboundDone());
|
||||
}
|
||||
|
||||
private static void logEngineStatus(SSLEngine engine) {
|
||||
log("\tCurrent HS State " + engine.getHandshakeStatus().toString());
|
||||
log("\tisInboundDone(): " + engine.isInboundDone());
|
||||
log("\tCurrent HS State: " + engine.getHandshakeStatus());
|
||||
log("\tisInboundDone() : " + engine.isInboundDone());
|
||||
log("\tisOutboundDone(): " + engine.isOutboundDone());
|
||||
}
|
||||
|
||||
/*
|
||||
* Using the SSLContext created during object creation,
|
||||
* create/configure the SSLEngines we'll use for this test.
|
||||
*/
|
||||
private void createSSLEngines() throws Exception {
|
||||
/*
|
||||
* Configure the serverEngine to act as a server in the SSL/TLS
|
||||
* handshake. Also, require SSL client authentication.
|
||||
*/
|
||||
serverEngine = sslc.createSSLEngine();
|
||||
serverEngine.setUseClientMode(false);
|
||||
serverEngine.setNeedClientAuth(true);
|
||||
|
||||
// Get/set parameters if needed
|
||||
SSLParameters paramsServer = serverEngine.getSSLParameters();
|
||||
serverEngine.setSSLParameters(paramsServer);
|
||||
|
||||
/*
|
||||
* Similar to above, but using client mode instead.
|
||||
*/
|
||||
clientEngine = sslc.createSSLEngine("client", 80);
|
||||
clientEngine.setUseClientMode(true);
|
||||
|
||||
// Get/set parameters if needed
|
||||
SSLParameters paramsClient = clientEngine.getSSLParameters();
|
||||
clientEngine.setSSLParameters(paramsClient);
|
||||
private static void logEngineStatus(
|
||||
SSLEngine engine, SSLEngineResult result) {
|
||||
log("\tResult Status : " + result.getStatus());
|
||||
log("\tResult HS Status : " + result.getHandshakeStatus());
|
||||
log("\tEngine HS Status : " + engine.getHandshakeStatus());
|
||||
log("\tisInboundDone() : " + engine.isInboundDone());
|
||||
log("\tisOutboundDone() : " + engine.isOutboundDone());
|
||||
log("\tMore Result : " + result);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create and size the buffers appropriately.
|
||||
*/
|
||||
private void createBuffers() {
|
||||
|
||||
/*
|
||||
* We'll assume the buffer sizes are the same
|
||||
* between client and server.
|
||||
*/
|
||||
SSLSession session = clientEngine.getSession();
|
||||
int appBufferMax = session.getApplicationBufferSize();
|
||||
int netBufferMax = session.getPacketBufferSize();
|
||||
|
||||
/*
|
||||
* We'll make the input buffers a bit bigger than the max needed
|
||||
* size, so that unwrap()s following a successful data transfer
|
||||
* won't generate BUFFER_OVERFLOWS.
|
||||
*
|
||||
* We'll use a mix of direct and indirect ByteBuffers for
|
||||
* tutorial purposes only. In reality, only use direct
|
||||
* ByteBuffers when they give a clear performance enhancement.
|
||||
*/
|
||||
clientIn = ByteBuffer.allocate(appBufferMax + 50);
|
||||
serverIn = ByteBuffer.allocate(appBufferMax + 50);
|
||||
|
||||
cTOs = ByteBuffer.allocateDirect(netBufferMax);
|
||||
sTOc = ByteBuffer.allocateDirect(netBufferMax);
|
||||
|
||||
clientOut = ByteBuffer.wrap("Hi Server, I'm Client".getBytes());
|
||||
serverOut = ByteBuffer.wrap("Hello Client, I'm Server".getBytes());
|
||||
private static void log(String message) {
|
||||
System.err.println(message);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the result indicates that we have outstanding tasks to do,
|
||||
* go ahead and run them in this thread.
|
||||
*/
|
||||
// If the result indicates that we have outstanding tasks to do,
|
||||
// go ahead and run them in this thread.
|
||||
private static void runDelegatedTasks(SSLEngine engine) throws Exception {
|
||||
|
||||
if (engine.getHandshakeStatus() == HandshakeStatus.NEED_TASK) {
|
||||
Runnable runnable;
|
||||
while ((runnable = engine.getDelegatedTask()) != null) {
|
||||
@ -365,19 +233,13 @@ public class SSLEngineTemplate {
|
||||
HandshakeStatus hsStatus = engine.getHandshakeStatus();
|
||||
if (hsStatus == HandshakeStatus.NEED_TASK) {
|
||||
throw new Exception(
|
||||
"handshake shouldn't need additional tasks");
|
||||
"handshake shouldn't need additional tasks");
|
||||
}
|
||||
logEngineStatus(engine);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isEngineClosed(SSLEngine engine) {
|
||||
return (engine.isOutboundDone() && engine.isInboundDone());
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple check to make sure everything came across as expected.
|
||||
*/
|
||||
// Simple check to make sure everything came across as expected.
|
||||
private static void checkTransfer(ByteBuffer a, ByteBuffer b)
|
||||
throws Exception {
|
||||
a.flip();
|
||||
@ -394,35 +256,4 @@ public class SSLEngineTemplate {
|
||||
a.limit(a.capacity());
|
||||
b.limit(b.capacity());
|
||||
}
|
||||
|
||||
/*
|
||||
* Logging code
|
||||
*/
|
||||
private static boolean resultOnce = true;
|
||||
|
||||
private static void log(String str, SSLEngineResult result) {
|
||||
if (!logging) {
|
||||
return;
|
||||
}
|
||||
if (resultOnce) {
|
||||
resultOnce = false;
|
||||
System.out.println("The format of the SSLEngineResult is: \n" +
|
||||
"\t\"getStatus() / getHandshakeStatus()\" +\n" +
|
||||
"\t\"bytesConsumed() / bytesProduced()\"\n");
|
||||
}
|
||||
HandshakeStatus hsStatus = result.getHandshakeStatus();
|
||||
log(str +
|
||||
result.getStatus() + "/" + hsStatus + ", " +
|
||||
result.bytesConsumed() + "/" + result.bytesProduced() +
|
||||
" bytes");
|
||||
if (hsStatus == HandshakeStatus.FINISHED) {
|
||||
log("\t...ready for application data");
|
||||
}
|
||||
}
|
||||
|
||||
private static void log(String str) {
|
||||
if (logging) {
|
||||
System.out.println(str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,40 +79,68 @@ public class BasicJMapTest {
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoParallelZero() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=0");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoParallel() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=2");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoNonParallel() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=1");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoToFile() throws Exception {
|
||||
histoToFile(false);
|
||||
histoToFile(false, false, 1);
|
||||
}
|
||||
|
||||
private static void testHistoLiveToFile() throws Exception {
|
||||
histoToFile(true);
|
||||
histoToFile(true, false, 1);
|
||||
}
|
||||
|
||||
private static void testHistoAllToFile() throws Exception {
|
||||
boolean explicitAll = true;
|
||||
histoToFile(false, explicitAll);
|
||||
histoToFile(false, true, 1);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live) throws Exception {
|
||||
boolean explicitAll = false;
|
||||
histoToFile(live, explicitAll);
|
||||
private static void testHistoFileParallelZero() throws Exception {
|
||||
histoToFile(false, false, 0);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live, boolean explicitAll) throws Exception {
|
||||
if (live == true && explicitAll == true) {
|
||||
private static void testHistoFileParallel() throws Exception {
|
||||
histoToFile(false, false, 2);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live,
|
||||
boolean explicitAll,
|
||||
int parallelThreadNum) throws Exception {
|
||||
String liveArg = "";
|
||||
String fileArg = "";
|
||||
String parArg = "parallel=" + parallelThreadNum;
|
||||
String allArgs = "-histo:";
|
||||
|
||||
if (live && explicitAll) {
|
||||
fail("Illegal argument setting for jmap -histo");
|
||||
}
|
||||
if (live) {
|
||||
liveArg = "live,";
|
||||
}
|
||||
if (explicitAll) {
|
||||
liveArg = "all,";
|
||||
}
|
||||
|
||||
File file = new File("jmap.histo.file" + System.currentTimeMillis() + ".histo");
|
||||
if (file.exists()) {
|
||||
file.delete();
|
||||
}
|
||||
fileArg = "file=" + file.getName();
|
||||
|
||||
OutputAnalyzer output;
|
||||
if (live) {
|
||||
output = jmap("-histo:live,file=" + file.getName());
|
||||
} else if (explicitAll == true) {
|
||||
output = jmap("-histo:all,file=" + file.getName());
|
||||
} else {
|
||||
output = jmap("-histo:file=" + file.getName());
|
||||
}
|
||||
allArgs = allArgs + liveArg + fileArg + ',' + parArg;
|
||||
output = jmap(allArgs);
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Heap inspection file created");
|
||||
file.delete();
|
||||
@ -129,43 +157,45 @@ public class BasicJMapTest {
|
||||
}
|
||||
|
||||
private static void testDump() throws Exception {
|
||||
dump(false);
|
||||
dump(false, false);
|
||||
}
|
||||
|
||||
private static void testDumpLive() throws Exception {
|
||||
dump(true);
|
||||
dump(true, false);
|
||||
}
|
||||
|
||||
private static void testDumpAll() throws Exception {
|
||||
boolean explicitAll = true;
|
||||
dump(false, explicitAll);
|
||||
}
|
||||
|
||||
private static void dump(boolean live) throws Exception {
|
||||
boolean explicitAll = false;
|
||||
dump(live, explicitAll);
|
||||
dump(false, true);
|
||||
}
|
||||
|
||||
private static void dump(boolean live, boolean explicitAll) throws Exception {
|
||||
if (live == true && explicitAll == true) {
|
||||
fail("Illegal argument setting for jmap -dump");
|
||||
String liveArg = "";
|
||||
String fileArg = "";
|
||||
String allArgs = "-dump:";
|
||||
|
||||
if (live && explicitAll) {
|
||||
fail("Illegal argument setting for jmap -dump");
|
||||
}
|
||||
File dump = new File("jmap.dump." + System.currentTimeMillis() + ".hprof");
|
||||
if (dump.exists()) {
|
||||
dump.delete();
|
||||
}
|
||||
OutputAnalyzer output;
|
||||
if (live) {
|
||||
output = jmap("-dump:live,format=b,file=" + dump.getName());
|
||||
} else if (explicitAll == true) {
|
||||
output = jmap("-dump:all,format=b,file=" + dump.getName());
|
||||
} else {
|
||||
output = jmap("-dump:format=b,file=" + dump.getName());
|
||||
liveArg = "live,";
|
||||
}
|
||||
if (explicitAll) {
|
||||
liveArg = "all,";
|
||||
}
|
||||
|
||||
File file = new File("jmap.dump" + System.currentTimeMillis() + ".hprof");
|
||||
if (file.exists()) {
|
||||
file.delete();
|
||||
}
|
||||
fileArg = "file=" + file.getName();
|
||||
|
||||
OutputAnalyzer output;
|
||||
allArgs = allArgs + liveArg + "format=b," + fileArg;
|
||||
output = jmap(allArgs);
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Heap dump file created");
|
||||
verifyDumpFile(dump);
|
||||
dump.delete();
|
||||
verifyDumpFile(file);
|
||||
file.delete();
|
||||
}
|
||||
|
||||
private static void verifyDumpFile(File dump) {
|
||||
@ -195,5 +225,4 @@ public class BasicJMapTest {
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user