Merge
This commit is contained in:
commit
c200fc1f34
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,8 +30,8 @@ import java.io.*;
|
||||
class AbstractCommandNode extends AbstractNamedNode {
|
||||
|
||||
void document(PrintWriter writer) {
|
||||
writer.println("<h5 id=\"" + context.whereC + "\">" + name +
|
||||
" Command (" + nameNode.value() + ")</h5>");
|
||||
writer.println("<h3 id=\"" + context.whereC + "\">" + name +
|
||||
" Command (" + nameNode.value() + ")</h3>");
|
||||
writer.println(comment());
|
||||
writer.println("<dl>");
|
||||
for (Node node : components) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,8 +62,8 @@ abstract class AbstractNamedNode extends Node {
|
||||
}
|
||||
|
||||
void document(PrintWriter writer) {
|
||||
writer.println("<h4 id=\"" + name + "\">" + name +
|
||||
" Command Set</h4>");
|
||||
writer.println("<h2 id=\"" + name + "\">" + name +
|
||||
" Command Set</h2>");
|
||||
for (Node node : components) {
|
||||
node.document(writer);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,9 +38,9 @@ class CommandSetNode extends AbstractNamedNode {
|
||||
}
|
||||
|
||||
void document(PrintWriter writer) {
|
||||
writer.println("<h4 id=\"" + context.whereC + "\">" + name +
|
||||
writer.println("<h2 id=\"" + context.whereC + "\">" + name +
|
||||
" Command Set (" +
|
||||
nameNode.value() + ")</h4>");
|
||||
nameNode.value() + ")</h2>");
|
||||
writer.println(comment());
|
||||
for (Node node : components) {
|
||||
node.document(writer);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,8 +54,8 @@ class ConstantSetNode extends AbstractNamedNode {
|
||||
}
|
||||
|
||||
void document(PrintWriter writer) {
|
||||
writer.println("<h4 id=\"" + context.whereC + "\">" + name +
|
||||
" Constants</h4>");
|
||||
writer.println("<h2 id=\"" + context.whereC + "\">" + name +
|
||||
" Constants</h2>");
|
||||
writer.println(comment());
|
||||
writer.println("<table><tr>");
|
||||
writer.println("<th style=\"width: 20%\"><th style=\"width: 5%\"><th style=\"width: 65%\">");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,11 +52,16 @@ class RootNode extends AbstractNamedNode {
|
||||
writer.println("</style>");
|
||||
writer.println("</head>");
|
||||
writer.println("<body>");
|
||||
writer.println("<ul role=\"navigation\">");
|
||||
writer.println("<div class=\"centered\" role=\"banner\">");
|
||||
writer.println("<h1 id=\"Protocol Details\">Java Debug Wire Protocol Details</h1>");
|
||||
writer.println("</div>");
|
||||
writer.println("<nav>");
|
||||
writer.println("<ul>");
|
||||
for (Node node : components) {
|
||||
node.documentIndex(writer);
|
||||
}
|
||||
writer.println("</ul>");
|
||||
writer.println("</nav>");
|
||||
writer.println("<div role=\"main\">");
|
||||
for (Node node : components) {
|
||||
node.document(writer);
|
||||
|
@ -61,7 +61,7 @@ static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst,
|
||||
//
|
||||
// Execute ZGC load barrier (strong) slow path
|
||||
//
|
||||
instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
|
||||
instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
|
||||
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
|
||||
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
|
||||
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
|
||||
@ -69,20 +69,22 @@ instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
|
||||
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
|
||||
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
|
||||
vRegD_V30 v30, vRegD_V31 v31) %{
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(!n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
|
||||
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
|
||||
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
|
||||
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
|
||||
KILL v29, KILL v30, KILL v31);
|
||||
|
||||
format %{"LoadBarrierSlowReg $dst, $mem" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
|
||||
$mem$$index, $mem$$scale, $mem$$disp, false);
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
|
||||
$src$$index, $src$$scale, $src$$disp, false);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -90,7 +92,7 @@ instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
|
||||
//
|
||||
// Execute ZGC load barrier (weak) slow path
|
||||
//
|
||||
instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
|
||||
instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
|
||||
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
|
||||
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
|
||||
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
|
||||
@ -98,20 +100,22 @@ instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
|
||||
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
|
||||
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
|
||||
vRegD_V30 v30, vRegD_V31 v31) %{
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
|
||||
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
|
||||
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
|
||||
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
|
||||
KILL v29, KILL v30, KILL v31);
|
||||
|
||||
format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
|
||||
$mem$$index, $mem$$scale, $mem$$disp, true);
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
|
||||
$src$$index, $src$$scale, $src$$disp, true);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
@ -45,32 +45,31 @@ static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
|
||||
format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// For ZMM enabled processors
|
||||
instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
|
||||
@ -79,10 +78,10 @@ instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
@ -92,43 +91,42 @@ instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
KILL x24, KILL x25, KILL x26, KILL x27,
|
||||
KILL x28, KILL x29, KILL x30, KILL x31);
|
||||
|
||||
format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
|
||||
format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// For ZMM enabled processors
|
||||
instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
|
||||
@ -137,10 +135,10 @@ instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
@ -150,12 +148,12 @@ instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
KILL x24, KILL x25, KILL x26, KILL x27,
|
||||
KILL x28, KILL x29, KILL x30, KILL x31);
|
||||
|
||||
format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
|
@ -3513,7 +3513,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
#if INCLUDE_ZGC
|
||||
"LoadBarrierSlowReg", "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
|
||||
"ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
|
||||
#endif
|
||||
"ClearArray"
|
||||
};
|
||||
|
@ -1774,10 +1774,9 @@ void nmethod::do_unloading(bool unloading_occurred) {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
|
||||
void nmethod::oops_do(OopClosure* f, bool allow_dead) {
|
||||
// make sure the oops ready to receive visitors
|
||||
assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
|
||||
assert(!is_unloaded(), "should not call follow on unloaded nmethod");
|
||||
assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
|
||||
|
||||
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||
if (relocInfo::mustIterateImmediateOopsInCode()) {
|
||||
|
@ -473,7 +473,7 @@ public:
|
||||
|
||||
public:
|
||||
void oops_do(OopClosure* f) { oops_do(f, false); }
|
||||
void oops_do(OopClosure* f, bool allow_zombie);
|
||||
void oops_do(OopClosure* f, bool allow_dead);
|
||||
|
||||
bool test_set_oops_do_mark();
|
||||
static void oops_do_marking_prologue();
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
bool ClosureIsUnloadingBehaviour::is_unloading(CompiledMethod* cm) const {
|
||||
if (cm->is_nmethod()) {
|
||||
IsCompiledMethodUnloadingOopClosure cl(_cl);
|
||||
static_cast<nmethod*>(cm)->oops_do(&cl);
|
||||
static_cast<nmethod*>(cm)->oops_do(&cl, true /* allow_dead */);
|
||||
return cl.is_unloading();
|
||||
} else {
|
||||
return false;
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
@ -414,14 +415,6 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
|
||||
oop* OopStorage::allocate() {
|
||||
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Note: Without this we might never perform cleanup. As it is,
|
||||
// cleanup is only requested here, when completing a concurrent
|
||||
// iteration, or when someone entirely else wakes up the service
|
||||
// thread, which isn't ideal. But we can't notify in release().
|
||||
if (reduce_deferred_updates()) {
|
||||
notify_needs_cleanup();
|
||||
}
|
||||
|
||||
Block* block = block_for_allocation();
|
||||
if (block == NULL) return NULL; // Block allocation failed.
|
||||
assert(!block->is_full(), "invariant");
|
||||
@ -474,23 +467,20 @@ bool OopStorage::try_add_block() {
|
||||
|
||||
OopStorage::Block* OopStorage::block_for_allocation() {
|
||||
assert_lock_strong(_allocation_mutex);
|
||||
|
||||
while (true) {
|
||||
// Use the first block in _allocation_list for the allocation.
|
||||
Block* block = _allocation_list.head();
|
||||
if (block != NULL) {
|
||||
return block;
|
||||
} else if (reduce_deferred_updates()) {
|
||||
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
notify_needs_cleanup();
|
||||
// Might have added a block to the _allocation_list, so retry.
|
||||
} else if (try_add_block()) {
|
||||
block = _allocation_list.head();
|
||||
assert(block != NULL, "invariant");
|
||||
return block;
|
||||
} else if (reduce_deferred_updates()) { // Once more before failure.
|
||||
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
notify_needs_cleanup();
|
||||
} else {
|
||||
// Successfully added a new block to the list, so retry.
|
||||
assert(_allocation_list.chead() != NULL, "invariant");
|
||||
} else if (_allocation_list.chead() != NULL) {
|
||||
// Trying to add a block failed, but some other thread added to the
|
||||
// list while we'd dropped the lock over the new block allocation.
|
||||
} else if (!reduce_deferred_updates()) { // Once more before failure.
|
||||
// Attempt to add a block failed, no other thread added a block,
|
||||
// and no deferred updated added a block, then allocation failed.
|
||||
log_debug(oopstorage, blocks)("%s: failed block allocation", name());
|
||||
@ -635,7 +625,14 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
if (fetched == head) break; // Successful update.
|
||||
head = fetched; // Retry with updated head.
|
||||
}
|
||||
owner->record_needs_cleanup();
|
||||
// Only request cleanup for to-empty transitions, not for from-full.
|
||||
// There isn't any rush to process from-full transitions. Allocation
|
||||
// will reduce deferrals before allocating new blocks, so may process
|
||||
// some. And the service thread will drain the entire deferred list
|
||||
// if there are any pending to-empty transitions.
|
||||
if (releasing == old_allocated) {
|
||||
owner->record_needs_cleanup();
|
||||
}
|
||||
log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
|
||||
_owner->name(), p2i(this));
|
||||
}
|
||||
@ -684,7 +681,6 @@ bool OopStorage::reduce_deferred_updates() {
|
||||
if (is_empty_bitmask(allocated)) {
|
||||
_allocation_list.unlink(*block);
|
||||
_allocation_list.push_back(*block);
|
||||
notify_needs_cleanup();
|
||||
}
|
||||
|
||||
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
|
||||
@ -740,11 +736,6 @@ const char* dup_name(const char* name) {
|
||||
return dup;
|
||||
}
|
||||
|
||||
// Possible values for OopStorage::_needs_cleanup.
|
||||
const uint needs_cleanup_none = 0; // No cleanup needed.
|
||||
const uint needs_cleanup_marked = 1; // Requested, but no notification made.
|
||||
const uint needs_cleanup_notified = 2; // Requested and Service thread notified.
|
||||
|
||||
const size_t initial_active_array_size = 8;
|
||||
|
||||
OopStorage::OopStorage(const char* name,
|
||||
@ -758,7 +749,7 @@ OopStorage::OopStorage(const char* name,
|
||||
_active_mutex(active_mutex),
|
||||
_allocation_count(0),
|
||||
_concurrent_iteration_count(0),
|
||||
_needs_cleanup(needs_cleanup_none)
|
||||
_needs_cleanup(false)
|
||||
{
|
||||
_active_array->increment_refcount();
|
||||
assert(_active_mutex->rank() < _allocation_mutex->rank(),
|
||||
@ -796,40 +787,89 @@ OopStorage::~OopStorage() {
|
||||
FREE_C_HEAP_ARRAY(char, _name);
|
||||
}
|
||||
|
||||
// Called by service thread to check for pending work.
|
||||
bool OopStorage::needs_delete_empty_blocks() const {
|
||||
return Atomic::load(&_needs_cleanup) != needs_cleanup_none;
|
||||
// Managing service thread notifications.
|
||||
//
|
||||
// We don't want cleanup work to linger indefinitely, but we also don't want
|
||||
// to run the service thread too often. We're also very limited in what we
|
||||
// can do in a release operation, where cleanup work is created.
|
||||
//
|
||||
// When a release operation changes a block's state to empty, it records the
|
||||
// need for cleanup in both the associated storage object and in the global
|
||||
// request state. A safepoint cleanup task notifies the service thread when
|
||||
// there may be cleanup work for any storage object, based on the global
|
||||
// request state. But that notification is deferred if the service thread
|
||||
// has run recently, and we also avoid duplicate notifications. The service
|
||||
// thread updates the timestamp and resets the state flags on every iteration.
|
||||
|
||||
// Global cleanup request state.
|
||||
static volatile bool needs_cleanup_requested = false;
|
||||
|
||||
// Flag for avoiding duplicate notifications.
|
||||
static bool needs_cleanup_triggered = false;
|
||||
|
||||
// Time after which a notification can be made.
|
||||
static jlong cleanup_trigger_permit_time = 0;
|
||||
|
||||
// Minimum time since last service thread check before notification is
|
||||
// permitted. The value of 500ms was an arbitrary choice; frequent, but not
|
||||
// too frequent.
|
||||
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
|
||||
|
||||
void OopStorage::trigger_cleanup_if_needed() {
|
||||
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
|
||||
if (Atomic::load(&needs_cleanup_requested) &&
|
||||
!needs_cleanup_triggered &&
|
||||
(os::javaTimeNanos() > cleanup_trigger_permit_time)) {
|
||||
needs_cleanup_triggered = true;
|
||||
ml.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
bool OopStorage::has_cleanup_work_and_reset() {
|
||||
assert_lock_strong(Service_lock);
|
||||
cleanup_trigger_permit_time =
|
||||
os::javaTimeNanos() + cleanup_trigger_defer_period;
|
||||
needs_cleanup_triggered = false;
|
||||
// Set the request flag false and return its old value.
|
||||
// Needs to be atomic to avoid dropping a concurrent request.
|
||||
// Can't use Atomic::xchg, which may not support bool.
|
||||
return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
|
||||
}
|
||||
|
||||
// Record that cleanup is needed, without notifying the Service thread.
|
||||
// Used by release(), where we can't lock even Service_lock.
|
||||
void OopStorage::record_needs_cleanup() {
|
||||
Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none);
|
||||
}
|
||||
|
||||
// Record that cleanup is needed, and notify the Service thread.
|
||||
void OopStorage::notify_needs_cleanup() {
|
||||
// Avoid re-notification if already notified.
|
||||
const uint notified = needs_cleanup_notified;
|
||||
if (Atomic::xchg(notified, &_needs_cleanup) != notified) {
|
||||
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
|
||||
ml.notify_all();
|
||||
}
|
||||
// Set local flag first, else service thread could wake up and miss
|
||||
// the request. This order may instead (rarely) unnecessarily notify.
|
||||
OrderAccess::release_store(&_needs_cleanup, true);
|
||||
OrderAccess::release_store_fence(&needs_cleanup_requested, true);
|
||||
}
|
||||
|
||||
bool OopStorage::delete_empty_blocks() {
|
||||
// Service thread might have oopstorage work, but not for this object.
|
||||
// Check for deferred updates even though that's not a service thread
|
||||
// trigger; since we're here, we might as well process them.
|
||||
if (!OrderAccess::load_acquire(&_needs_cleanup) &&
|
||||
(OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Clear the request before processing.
|
||||
Atomic::store(needs_cleanup_none, &_needs_cleanup);
|
||||
OrderAccess::fence();
|
||||
OrderAccess::release_store_fence(&_needs_cleanup, false);
|
||||
|
||||
// Other threads could be adding to the empty block count or the
|
||||
// deferred update list while we're working. Set an upper bound on
|
||||
// how many updates we'll process and blocks we'll try to release,
|
||||
// so other threads can't cause an unbounded stay in this function.
|
||||
size_t limit = block_count();
|
||||
if (limit == 0) return false; // Empty storage; nothing at all to do.
|
||||
// We add a bit of slop because the reduce_deferred_updates clause
|
||||
// can cause blocks to be double counted. If there are few blocks
|
||||
// and many of them are deferred and empty, we might hit the limit
|
||||
// and spin the caller without doing very much work. Otherwise,
|
||||
// we don't normally hit the limit anyway, instead running out of
|
||||
// work to do.
|
||||
size_t limit = block_count() + 10;
|
||||
|
||||
for (size_t i = 0; i < limit; ++i) {
|
||||
// Process deferred updates, which might make empty blocks available.
|
||||
@ -946,8 +986,8 @@ OopStorage::BasicParState::~BasicParState() {
|
||||
_storage->relinquish_block_array(_active_array);
|
||||
update_concurrent_iteration_count(-1);
|
||||
if (_concurrent) {
|
||||
// We may have deferred some work.
|
||||
const_cast<OopStorage*>(_storage)->notify_needs_cleanup();
|
||||
// We may have deferred some cleanup work.
|
||||
const_cast<OopStorage*>(_storage)->record_needs_cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,18 +152,26 @@ public:
|
||||
template<bool concurrent, bool is_const> class ParState;
|
||||
|
||||
// Service thread cleanup support.
|
||||
// Stops deleting if there is an in-progress concurrent iteration.
|
||||
// Locks both the _allocation_mutex and the _active_mutex, and may
|
||||
// safepoint. Deletion may be throttled, with only some available
|
||||
// work performed, in order to allow other Service thread subtasks
|
||||
// to run. Returns true if there may be more work to do, false if
|
||||
// nothing to do.
|
||||
|
||||
// Called by the service thread to process any pending cleanups for this
|
||||
// storage object. Drains the _deferred_updates list, and deletes empty
|
||||
// blocks. Stops deleting if there is an in-progress concurrent
|
||||
// iteration. Locks both the _allocation_mutex and the _active_mutex, and
|
||||
// may safepoint. Deletion may be throttled, with only some available
|
||||
// work performed, in order to allow other Service thread subtasks to run.
|
||||
// Returns true if there may be more work to do, false if nothing to do.
|
||||
bool delete_empty_blocks();
|
||||
|
||||
// Service thread cleanup support.
|
||||
// Called by the service thread (while holding Service_lock) to test
|
||||
// whether a call to delete_empty_blocks should be made.
|
||||
bool needs_delete_empty_blocks() const;
|
||||
// Called by safepoint cleanup to notify the service thread (via
|
||||
// Service_lock) that there may be some OopStorage objects with pending
|
||||
// cleanups to process.
|
||||
static void trigger_cleanup_if_needed();
|
||||
|
||||
// Called by the service thread (while holding Service_lock) to to test
|
||||
// for pending cleanup requests, and resets the request state to allow
|
||||
// recognition of new requests. Returns true if there was a pending
|
||||
// request.
|
||||
static bool has_cleanup_work_and_reset();
|
||||
|
||||
// Debugging and logging support.
|
||||
const char* name() const;
|
||||
@ -232,7 +240,7 @@ AIX_ONLY(private:)
|
||||
// mutable because this gets set even for const iteration.
|
||||
mutable int _concurrent_iteration_count;
|
||||
|
||||
volatile uint _needs_cleanup;
|
||||
volatile bool _needs_cleanup;
|
||||
|
||||
bool try_add_block();
|
||||
Block* block_for_allocation();
|
||||
@ -240,7 +248,6 @@ AIX_ONLY(private:)
|
||||
Block* find_block_or_null(const oop* ptr) const;
|
||||
void delete_empty_block(const Block& block);
|
||||
bool reduce_deferred_updates();
|
||||
void notify_needs_cleanup();
|
||||
AIX_ONLY(public:) // xlC 12 on AIX doesn't implement C++ DR45.
|
||||
void record_needs_cleanup();
|
||||
AIX_ONLY(private:)
|
||||
|
@ -170,7 +170,7 @@ void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
|
||||
ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock);
|
||||
|
||||
ShenandoahNMethodOopDetector detector;
|
||||
nm->oops_do(&detector, /* allow_zombie = */ true);
|
||||
nm->oops_do(&detector, /* allow_dead = */ true);
|
||||
|
||||
if (detector.has_oops()) {
|
||||
int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
|
||||
|
@ -545,8 +545,8 @@ void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrier
|
||||
Node* then = igvn.transform(new IfTrueNode(iff));
|
||||
Node* elsen = igvn.transform(new IfFalseNode(iff));
|
||||
|
||||
Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
|
||||
(const TypePtr*) in_val->bottom_type(), MemNode::unordered, barrier->is_weak()));
|
||||
Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
|
||||
(const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
|
||||
|
||||
// Create the final region/phi pair to converge cntl/data paths to downstream code
|
||||
Node* result_region = igvn.transform(new RegionNode(3));
|
||||
@ -672,7 +672,6 @@ bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode
|
||||
case Op_ZCompareAndExchangeP:
|
||||
case Op_ZCompareAndSwapP:
|
||||
case Op_ZWeakCompareAndSwapP:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
#ifdef ASSERT
|
||||
if (VerifyOptoOopOffsets) {
|
||||
MemNode *mem = n->as_Mem();
|
||||
|
@ -104,22 +104,25 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class LoadBarrierSlowRegNode : public LoadPNode {
|
||||
class LoadBarrierSlowRegNode : public TypeNode {
|
||||
private:
|
||||
bool _is_weak;
|
||||
bool _is_weak;
|
||||
public:
|
||||
LoadBarrierSlowRegNode(Node *c,
|
||||
Node *mem,
|
||||
Node *adr,
|
||||
const TypePtr *at,
|
||||
Node *src,
|
||||
const TypePtr* t,
|
||||
MemOrd mo,
|
||||
bool weak = false,
|
||||
ControlDependency control_dependency = DependsOnlyOnTest) :
|
||||
LoadPNode(c, mem, adr, at, t, mo, control_dependency), _is_weak(weak) {
|
||||
bool weak) :
|
||||
TypeNode(t, 3), _is_weak(weak) {
|
||||
init_req(1, adr);
|
||||
init_req(2, src);
|
||||
init_class_id(Class_LoadBarrierSlowReg);
|
||||
}
|
||||
|
||||
virtual uint size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
virtual const char * name() {
|
||||
return "LoadBarrierSlowRegNode";
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -99,7 +99,6 @@ void BFSClosure::log_dfs_fallback() const {
|
||||
}
|
||||
|
||||
void BFSClosure::process() {
|
||||
|
||||
process_root_set();
|
||||
process_queue();
|
||||
}
|
||||
@ -138,7 +137,6 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
|
||||
|
||||
// if we are processinig initial root set, don't add to queue
|
||||
if (_current_parent != NULL) {
|
||||
assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
|
||||
_edge_queue->add(_current_parent, reference);
|
||||
}
|
||||
|
||||
@ -151,20 +149,8 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
|
||||
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
|
||||
assert(pointee != NULL, "invariant");
|
||||
assert(NULL == pointee->mark(), "invariant");
|
||||
|
||||
const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
|
||||
ResourceMark rm;
|
||||
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
|
||||
size_t idx = 0;
|
||||
chain[idx++] = Edge(NULL, reference);
|
||||
// aggregate from breadth-first search
|
||||
const Edge* current = _current_parent;
|
||||
while (current != NULL) {
|
||||
chain[idx++] = Edge(NULL, current->reference());
|
||||
current = current->parent();
|
||||
}
|
||||
assert(length == idx, "invariant");
|
||||
_edge_store->add_chain(chain, length);
|
||||
Edge leak_edge(_current_parent, reference);
|
||||
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
|
||||
}
|
||||
|
||||
void BFSClosure::dfs_fallback() {
|
||||
@ -241,3 +227,12 @@ void BFSClosure::do_oop(narrowOop* ref) {
|
||||
closure_impl(UnifiedOop::encode(ref), pointee);
|
||||
}
|
||||
}
|
||||
|
||||
void BFSClosure::do_root(const oop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(is_aligned(ref, HeapWordSize), "invariant");
|
||||
assert(*ref != NULL, "invariant");
|
||||
if (!_edge_queue->is_full()) {
|
||||
_edge_queue->add(NULL, ref);
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
class BitSet;
|
||||
class Edge;
|
||||
@ -65,6 +64,7 @@ class BFSClosure : public BasicOopIterateClosure {
|
||||
public:
|
||||
BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
|
||||
void process();
|
||||
void do_root(const oop* ref);
|
||||
|
||||
virtual void do_oop(oop* ref);
|
||||
virtual void do_oop(narrowOop* ref);
|
||||
|
@ -47,7 +47,7 @@ class BitSet : public CHeapObj<mtTracing> {
|
||||
|
||||
BitMap::idx_t mark_obj(const HeapWord* addr) {
|
||||
const BitMap::idx_t bit = addr_to_bit(addr);
|
||||
_bits.par_set_bit(bit);
|
||||
_bits.set_bit(bit);
|
||||
return bit;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,14 +23,14 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
|
||||
#include "jfr/leakprofiler/utilities/rootType.hpp"
|
||||
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
|
||||
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
|
||||
#include "jfr/leakprofiler/utilities/rootType.hpp"
|
||||
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
@ -88,15 +88,15 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
|
||||
// Mark root set, to avoid going sideways
|
||||
_max_depth = 1;
|
||||
_ignore_root_set = false;
|
||||
DFSClosure dfs1;
|
||||
RootSetClosure::process_roots(&dfs1);
|
||||
DFSClosure dfs;
|
||||
RootSetClosure<DFSClosure> rs(&dfs);
|
||||
rs.process();
|
||||
|
||||
// Depth-first search
|
||||
_max_depth = max_dfs_depth;
|
||||
_ignore_root_set = true;
|
||||
assert(_start_edge == NULL, "invariant");
|
||||
DFSClosure dfs2;
|
||||
RootSetClosure::process_roots(&dfs2);
|
||||
rs.process();
|
||||
}
|
||||
|
||||
void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
|
||||
@ -133,30 +133,29 @@ void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
|
||||
}
|
||||
|
||||
void DFSClosure::add_chain() {
|
||||
const size_t length = _start_edge == NULL ? _depth + 1 :
|
||||
_start_edge->distance_to_root() + 1 + _depth + 1;
|
||||
const size_t array_length = _depth + 2;
|
||||
|
||||
ResourceMark rm;
|
||||
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
|
||||
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length);
|
||||
size_t idx = 0;
|
||||
|
||||
// aggregate from depth-first search
|
||||
const DFSClosure* c = this;
|
||||
while (c != NULL) {
|
||||
chain[idx++] = Edge(NULL, c->reference());
|
||||
const size_t next = idx + 1;
|
||||
chain[idx++] = Edge(&chain[next], c->reference());
|
||||
c = c->parent();
|
||||
}
|
||||
|
||||
assert(idx == _depth + 1, "invariant");
|
||||
assert(_depth + 1 == idx, "invariant");
|
||||
assert(array_length == idx + 1, "invariant");
|
||||
|
||||
// aggregate from breadth-first search
|
||||
const Edge* current = _start_edge;
|
||||
while (current != NULL) {
|
||||
chain[idx++] = Edge(NULL, current->reference());
|
||||
current = current->parent();
|
||||
if (_start_edge != NULL) {
|
||||
chain[idx++] = *_start_edge;
|
||||
} else {
|
||||
chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
|
||||
}
|
||||
assert(idx == length, "invariant");
|
||||
_edge_store->add_chain(chain, length);
|
||||
_edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
|
||||
}
|
||||
|
||||
void DFSClosure::do_oop(oop* ref) {
|
||||
@ -176,3 +175,11 @@ void DFSClosure::do_oop(narrowOop* ref) {
|
||||
closure_impl(UnifiedOop::encode(ref), pointee);
|
||||
}
|
||||
}
|
||||
|
||||
void DFSClosure::do_root(const oop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(is_aligned(ref, HeapWordSize), "invariant");
|
||||
const oop pointee = *ref;
|
||||
assert(pointee != NULL, "invariant");
|
||||
closure_impl(ref, pointee);
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
class BitSet;
|
||||
class Edge;
|
||||
@ -34,7 +33,7 @@ class EdgeStore;
|
||||
class EdgeQueue;
|
||||
|
||||
// Class responsible for iterating the heap depth-first
|
||||
class DFSClosure: public BasicOopIterateClosure {
|
||||
class DFSClosure : public BasicOopIterateClosure {
|
||||
private:
|
||||
static EdgeStore* _edge_store;
|
||||
static BitSet* _mark_bits;
|
||||
@ -57,6 +56,7 @@ class DFSClosure: public BasicOopIterateClosure {
|
||||
public:
|
||||
static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
|
||||
static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
|
||||
void do_root(const oop* ref);
|
||||
|
||||
virtual void do_oop(oop* ref);
|
||||
virtual void do_oop(narrowOop* ref);
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
class Edge {
|
||||
private:
|
||||
protected:
|
||||
const Edge* _parent;
|
||||
const oop* _reference;
|
||||
public:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,37 +27,17 @@
|
||||
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
RoutableEdge::RoutableEdge() : Edge() {}
|
||||
RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
|
||||
_skip_edge(NULL),
|
||||
_skip_length(0),
|
||||
_processed(false) {}
|
||||
StoredEdge::StoredEdge() : Edge() {}
|
||||
StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
|
||||
|
||||
RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
|
||||
_skip_edge(NULL),
|
||||
_skip_length(0),
|
||||
_processed(false) {}
|
||||
StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
|
||||
|
||||
RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
|
||||
_skip_edge(edge._skip_edge),
|
||||
_skip_length(edge._skip_length),
|
||||
_processed(edge._processed) {}
|
||||
StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
|
||||
|
||||
void RoutableEdge::operator=(const RoutableEdge& edge) {
|
||||
void StoredEdge::operator=(const StoredEdge& edge) {
|
||||
Edge::operator=(edge);
|
||||
_skip_edge = edge._skip_edge;
|
||||
_gc_root_id = edge._gc_root_id;
|
||||
_skip_length = edge._skip_length;
|
||||
_processed = edge._processed;
|
||||
}
|
||||
|
||||
size_t RoutableEdge::logical_distance_to_root() const {
|
||||
size_t depth = 0;
|
||||
const RoutableEdge* current = logical_parent();
|
||||
while (current != NULL) {
|
||||
depth++;
|
||||
current = current->logical_parent();
|
||||
}
|
||||
return depth;
|
||||
}
|
||||
|
||||
traceid EdgeStore::_edge_id_counter = 0;
|
||||
@ -69,79 +49,12 @@ EdgeStore::EdgeStore() : _edges(NULL) {
|
||||
EdgeStore::~EdgeStore() {
|
||||
assert(_edges != NULL, "invariant");
|
||||
delete _edges;
|
||||
_edges = NULL;
|
||||
}
|
||||
|
||||
const Edge* EdgeStore::get_edge(const Edge* edge) const {
|
||||
assert(edge != NULL, "invariant");
|
||||
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
|
||||
return entry != NULL ? entry->literal_addr() : NULL;
|
||||
}
|
||||
|
||||
const Edge* EdgeStore::put(const Edge* edge) {
|
||||
assert(edge != NULL, "invariant");
|
||||
const RoutableEdge e = *edge;
|
||||
assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
|
||||
EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
|
||||
return entry.literal_addr();
|
||||
}
|
||||
|
||||
traceid EdgeStore::get_id(const Edge* edge) const {
|
||||
assert(edge != NULL, "invariant");
|
||||
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
|
||||
assert(entry != NULL, "invariant");
|
||||
return entry->id();
|
||||
}
|
||||
|
||||
traceid EdgeStore::get_root_id(const Edge* edge) const {
|
||||
assert(edge != NULL, "invariant");
|
||||
const Edge* root = EdgeUtils::root(*edge);
|
||||
assert(root != NULL, "invariant");
|
||||
return get_id(root);
|
||||
}
|
||||
|
||||
void EdgeStore::add_chain(const Edge* chain, size_t length) {
|
||||
assert(chain != NULL, "invariant");
|
||||
assert(length > 0, "invariant");
|
||||
|
||||
size_t bottom_index = length - 1;
|
||||
const size_t top_index = 0;
|
||||
|
||||
const Edge* stored_parent_edge = NULL;
|
||||
|
||||
// determine level of shared ancestry
|
||||
for (; bottom_index > top_index; --bottom_index) {
|
||||
const Edge* stored_edge = get_edge(&chain[bottom_index]);
|
||||
if (stored_edge != NULL) {
|
||||
stored_parent_edge = stored_edge;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// insertion of new Edges
|
||||
for (int i = (int)bottom_index; i >= (int)top_index; --i) {
|
||||
Edge edge(stored_parent_edge, chain[i].reference());
|
||||
stored_parent_edge = put(&edge);
|
||||
}
|
||||
|
||||
const oop sample_object = stored_parent_edge->pointee();
|
||||
assert(sample_object != NULL, "invariant");
|
||||
assert(NULL == sample_object->mark(), "invariant");
|
||||
|
||||
// Install the "top" edge of the chain into the sample object mark oop.
|
||||
// This associates the sample object with its navigable reference chain.
|
||||
sample_object->set_mark(markOop(stored_parent_edge));
|
||||
}
|
||||
|
||||
bool EdgeStore::is_empty() const {
|
||||
return !_edges->has_entries();
|
||||
}
|
||||
|
||||
size_t EdgeStore::number_of_entries() const {
|
||||
return _edges->cardinality();
|
||||
}
|
||||
|
||||
void EdgeStore::assign_id(EdgeEntry* entry) {
|
||||
assert(entry != NULL, "invariant");
|
||||
assert(entry->id() == 0, "invariant");
|
||||
@ -153,3 +66,254 @@ bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry
|
||||
assert(entry->hash() == hash, "invariant");
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool EdgeStore::contains(const oop* reference) const {
|
||||
return get(reference) != NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
StoredEdge* EdgeStore::get(const oop* reference) const {
|
||||
assert(reference != NULL, "invariant");
|
||||
const StoredEdge e(NULL, reference);
|
||||
EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
|
||||
return entry != NULL ? entry->literal_addr() : NULL;
|
||||
}
|
||||
|
||||
StoredEdge* EdgeStore::put(const oop* reference) {
|
||||
assert(reference != NULL, "invariant");
|
||||
const StoredEdge e(NULL, reference);
|
||||
assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
|
||||
EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
|
||||
return entry.literal_addr();
|
||||
}
|
||||
|
||||
traceid EdgeStore::get_id(const Edge* edge) const {
|
||||
assert(edge != NULL, "invariant");
|
||||
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
|
||||
assert(entry != NULL, "invariant");
|
||||
return entry->id();
|
||||
}
|
||||
|
||||
traceid EdgeStore::gc_root_id(const Edge* edge) const {
|
||||
assert(edge != NULL, "invariant");
|
||||
const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
|
||||
if (gc_root_id != 0) {
|
||||
return gc_root_id;
|
||||
}
|
||||
// not cached
|
||||
assert(edge != NULL, "invariant");
|
||||
const Edge* const root = EdgeUtils::root(*edge);
|
||||
assert(root != NULL, "invariant");
|
||||
assert(root->parent() == NULL, "invariant");
|
||||
return get_id(root);
|
||||
}
|
||||
|
||||
static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) {
|
||||
assert(distance_to_root >= EdgeUtils::root_context, "invariant");
|
||||
assert(*skip_length == 0, "invariant");
|
||||
*skip_length = distance_to_root - (EdgeUtils::root_context - 1);
|
||||
const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
|
||||
assert(target != NULL, "invariant");
|
||||
assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
|
||||
return target;
|
||||
}
|
||||
|
||||
bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
|
||||
assert(*previous != NULL, "invariant");
|
||||
assert((*previous)->parent() == NULL, "invariant");
|
||||
assert(*current != NULL, "invariant");
|
||||
assert((*current)->distance_to_root() == distance_to_root, "invariant");
|
||||
|
||||
if (distance_to_root < EdgeUtils::root_context) {
|
||||
// nothing to skip
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t skip_length = 0;
|
||||
const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
|
||||
assert(skip_ancestor != NULL, "invariant");
|
||||
(*previous)->set_skip_length(skip_length);
|
||||
|
||||
// lookup target
|
||||
StoredEdge* stored_target = get(skip_ancestor->reference());
|
||||
if (stored_target != NULL) {
|
||||
(*previous)->set_parent(stored_target);
|
||||
// linked to existing, complete
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(stored_target == NULL, "invariant");
|
||||
stored_target = put(skip_ancestor->reference());
|
||||
assert(stored_target != NULL, "invariant");
|
||||
(*previous)->set_parent(stored_target);
|
||||
*previous = stored_target;
|
||||
*current = skip_ancestor->parent();
|
||||
return false;
|
||||
}
|
||||
|
||||
static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
|
||||
assert(current_stored != NULL, "invariant");
|
||||
assert(*previous != NULL, "invariant");
|
||||
assert((*previous)->parent() == NULL, "invariant");
|
||||
(*previous)->set_parent(current_stored);
|
||||
}
|
||||
|
||||
static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
|
||||
assert(edge != NULL, "invariant");
|
||||
assert(distance != NULL, "invariant");
|
||||
const StoredEdge* current = edge;
|
||||
*distance = 1;
|
||||
while (current != NULL && !current->is_skip_edge()) {
|
||||
++(*distance);
|
||||
current = current->parent();
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
|
||||
assert(current_stored != NULL, "invariant");
|
||||
assert((*previous)->parent() == NULL, "invariant");
|
||||
size_t distance_to_skip_edge; // including the skip edge itself
|
||||
const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
|
||||
if (closest_skip_edge == NULL) {
|
||||
// no found skip edge implies root
|
||||
if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
|
||||
link_edge(current_stored, previous);
|
||||
return;
|
||||
}
|
||||
assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant");
|
||||
put_skip_edge(previous, reinterpret_cast<const Edge**>(¤t_stored), distance_to_skip_edge - 2);
|
||||
return;
|
||||
}
|
||||
assert(closest_skip_edge->is_skip_edge(), "invariant");
|
||||
if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) {
|
||||
link_edge(current_stored, previous);
|
||||
return;
|
||||
}
|
||||
// create a new skip edge with derived information from closest skip edge
|
||||
(*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length());
|
||||
(*previous)->set_parent(closest_skip_edge->parent());
|
||||
}
|
||||
|
||||
StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
|
||||
assert(*previous != NULL, "invariant");
|
||||
assert((*previous)->parent() == NULL, "invariant");
|
||||
assert(*current != NULL, "invariant");
|
||||
assert(!contains((*current)->reference()), "invariant");
|
||||
StoredEdge* const stored_edge = put((*current)->reference());
|
||||
assert(stored_edge != NULL, "invariant");
|
||||
link_edge(stored_edge, previous);
|
||||
return stored_edge;
|
||||
}
|
||||
|
||||
bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
|
||||
assert(*previous != NULL, "invariant");
|
||||
assert(*current != NULL, "invariant");
|
||||
size_t depth = 1;
|
||||
while (*current != NULL && depth < limit) {
|
||||
StoredEdge* stored_edge = get((*current)->reference());
|
||||
if (stored_edge != NULL) {
|
||||
link_with_existing_chain(stored_edge, previous, depth);
|
||||
return true;
|
||||
}
|
||||
stored_edge = link_new_edge(previous, current);
|
||||
assert((*previous)->parent() != NULL, "invariant");
|
||||
*previous = stored_edge;
|
||||
*current = (*current)->parent();
|
||||
++depth;
|
||||
}
|
||||
return NULL == *current;
|
||||
}
|
||||
|
||||
// Install the immediate edge into the mark word of the leak candidate object
|
||||
StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
|
||||
assert(edge != NULL, "invariant");
|
||||
assert(!contains(edge->reference()), "invariant");
|
||||
StoredEdge* const leak_context_edge = put(edge->reference());
|
||||
oop sample_object = edge->pointee();
|
||||
assert(sample_object != NULL, "invariant");
|
||||
assert(NULL == sample_object->mark(), "invariant");
|
||||
sample_object->set_mark(markOop(leak_context_edge));
|
||||
return leak_context_edge;
|
||||
}
|
||||
|
||||
/*
|
||||
* The purpose of put_chain() is to reify the edge sequence
|
||||
* discovered during heap traversal with a normalized logical copy.
|
||||
* This copy consist of two sub-sequences and a connecting link (skip edge).
|
||||
*
|
||||
* "current" can be thought of as the cursor (search) edge, it is not in the edge store.
|
||||
* "previous" is always an edge in the edge store.
|
||||
* The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
|
||||
*/
|
||||
void EdgeStore::put_chain(const Edge* chain, size_t length) {
|
||||
assert(chain != NULL, "invariant");
|
||||
assert(chain->distance_to_root() + 1 == length, "invariant");
|
||||
StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
|
||||
assert(leak_context_edge != NULL, "invariant");
|
||||
assert(leak_context_edge->parent() == NULL, "invariant");
|
||||
|
||||
if (1 == length) {
|
||||
return;
|
||||
}
|
||||
|
||||
const Edge* current = chain->parent();
|
||||
assert(current != NULL, "invariant");
|
||||
StoredEdge* previous = leak_context_edge;
|
||||
|
||||
// a leak context is the sequence of (limited) edges reachable from the leak candidate
|
||||
if (put_edges(&previous, ¤t, EdgeUtils::leak_context)) {
|
||||
// complete
|
||||
assert(previous != NULL, "invariant");
|
||||
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1;
|
||||
assert(current->distance_to_root() == distance_to_root, "invariant");
|
||||
|
||||
// a skip edge is the logical link
|
||||
// connecting the leak context sequence with the root context sequence
|
||||
if (put_skip_edge(&previous, ¤t, distance_to_root)) {
|
||||
// complete
|
||||
assert(previous != NULL, "invariant");
|
||||
assert(previous->is_skip_edge(), "invariant");
|
||||
assert(previous->parent() != NULL, "invariant");
|
||||
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
|
||||
return;
|
||||
}
|
||||
|
||||
assert(current->distance_to_root() < EdgeUtils::root_context, "invariant");
|
||||
|
||||
// a root context is the sequence of (limited) edges reachable from the root
|
||||
put_edges(&previous, ¤t, EdgeUtils::root_context);
|
||||
assert(previous != NULL, "invariant");
|
||||
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
|
||||
}
|
||||
|
||||
void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
|
||||
assert(leak_context_edge != NULL, "invariant");
|
||||
assert(root != NULL, "invariant");
|
||||
store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
|
||||
assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
|
||||
}
|
||||
|
||||
// To avoid another traversal to resolve the root edge id later,
|
||||
// cache it in the immediate leak context edge for fast retrieval.
|
||||
void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
|
||||
assert(leak_context_edge != NULL, "invariant");
|
||||
assert(leak_context_edge->gc_root_id() == 0, "invariant");
|
||||
assert(root != NULL, "invariant");
|
||||
assert(root->parent() == NULL, "invariant");
|
||||
assert(root->distance_to_root() == 0, "invariant");
|
||||
const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
|
||||
traceid root_id = stored_root->gc_root_id();
|
||||
if (root_id == 0) {
|
||||
root_id = get_id(root);
|
||||
stored_root->set_gc_root_id(root_id);
|
||||
}
|
||||
assert(root_id != 0, "invariant");
|
||||
leak_context_edge->set_gc_root_id(root_id);
|
||||
assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant");
|
||||
}
|
||||
|
@ -25,64 +25,40 @@
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
|
||||
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
typedef u8 traceid;
|
||||
|
||||
class RoutableEdge : public Edge {
|
||||
class StoredEdge : public Edge {
|
||||
private:
|
||||
mutable const RoutableEdge* _skip_edge;
|
||||
mutable size_t _skip_length;
|
||||
mutable bool _processed;
|
||||
mutable traceid _gc_root_id;
|
||||
size_t _skip_length;
|
||||
|
||||
public:
|
||||
RoutableEdge();
|
||||
RoutableEdge(const Edge* parent, const oop* reference);
|
||||
RoutableEdge(const Edge& edge);
|
||||
RoutableEdge(const RoutableEdge& edge);
|
||||
void operator=(const RoutableEdge& edge);
|
||||
StoredEdge();
|
||||
StoredEdge(const Edge* parent, const oop* reference);
|
||||
StoredEdge(const Edge& edge);
|
||||
StoredEdge(const StoredEdge& edge);
|
||||
void operator=(const StoredEdge& edge);
|
||||
|
||||
const RoutableEdge* skip_edge() const { return _skip_edge; }
|
||||
traceid gc_root_id() const { return _gc_root_id; }
|
||||
void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
|
||||
|
||||
bool is_skip_edge() const { return _skip_length != 0; }
|
||||
size_t skip_length() const { return _skip_length; }
|
||||
void set_skip_length(size_t length) { _skip_length = length; }
|
||||
|
||||
bool is_skip_edge() const { return _skip_edge != NULL; }
|
||||
bool processed() const { return _processed; }
|
||||
bool is_sentinel() const {
|
||||
return _skip_edge == NULL && _skip_length == 1;
|
||||
void set_parent(const Edge* edge) { this->_parent = edge; }
|
||||
|
||||
StoredEdge* parent() const {
|
||||
return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent()));
|
||||
}
|
||||
|
||||
void set_skip_edge(const RoutableEdge* edge) const {
|
||||
assert(!is_skip_edge(), "invariant");
|
||||
assert(edge != this, "invariant");
|
||||
_skip_edge = edge;
|
||||
}
|
||||
|
||||
void set_skip_length(size_t length) const {
|
||||
_skip_length = length;
|
||||
}
|
||||
|
||||
void set_processed() const {
|
||||
assert(!_processed, "invariant");
|
||||
_processed = true;
|
||||
}
|
||||
|
||||
// true navigation according to physical tree representation
|
||||
const RoutableEdge* physical_parent() const {
|
||||
return static_cast<const RoutableEdge*>(parent());
|
||||
}
|
||||
|
||||
// logical navigation taking skip levels into account
|
||||
const RoutableEdge* logical_parent() const {
|
||||
return is_skip_edge() ? skip_edge() : physical_parent();
|
||||
}
|
||||
|
||||
size_t logical_distance_to_root() const;
|
||||
};
|
||||
|
||||
class EdgeStore : public CHeapObj<mtTracing> {
|
||||
typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
|
||||
typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
|
||||
typedef EdgeHashTable::HashEntry EdgeEntry;
|
||||
template <typename,
|
||||
typename,
|
||||
@ -90,6 +66,9 @@ class EdgeStore : public CHeapObj<mtTracing> {
|
||||
typename,
|
||||
size_t>
|
||||
friend class HashTableHost;
|
||||
friend class EventEmitter;
|
||||
friend class ObjectSampleWriter;
|
||||
friend class ObjectSampleCheckpoint;
|
||||
private:
|
||||
static traceid _edge_id_counter;
|
||||
EdgeHashTable* _edges;
|
||||
@ -98,22 +77,31 @@ class EdgeStore : public CHeapObj<mtTracing> {
|
||||
void assign_id(EdgeEntry* entry);
|
||||
bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
|
||||
|
||||
const Edge* get_edge(const Edge* edge) const;
|
||||
const Edge* put(const Edge* edge);
|
||||
StoredEdge* get(const oop* reference) const;
|
||||
StoredEdge* put(const oop* reference);
|
||||
traceid gc_root_id(const Edge* edge) const;
|
||||
|
||||
bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
|
||||
bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root);
|
||||
void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const;
|
||||
|
||||
StoredEdge* associate_leak_context_with_candidate(const Edge* edge);
|
||||
void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const;
|
||||
StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current);
|
||||
void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length);
|
||||
|
||||
template <typename T>
|
||||
void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
|
||||
|
||||
DEBUG_ONLY(bool contains(const oop* reference) const;)
|
||||
|
||||
public:
|
||||
EdgeStore();
|
||||
~EdgeStore();
|
||||
|
||||
void add_chain(const Edge* chain, size_t length);
|
||||
bool is_empty() const;
|
||||
size_t number_of_entries() const;
|
||||
|
||||
traceid get_id(const Edge* edge) const;
|
||||
traceid get_root_id(const Edge* edge) const;
|
||||
|
||||
template <typename T>
|
||||
void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
|
||||
void put_chain(const Edge* chain, size_t length);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,11 +38,7 @@ bool EdgeUtils::is_leak_edge(const Edge& edge) {
|
||||
return (const Edge*)edge.pointee()->mark() == &edge;
|
||||
}
|
||||
|
||||
bool EdgeUtils::is_root(const Edge& edge) {
|
||||
return edge.is_root();
|
||||
}
|
||||
|
||||
static int field_offset(const Edge& edge) {
|
||||
static int field_offset(const StoredEdge& edge) {
|
||||
assert(!edge.is_root(), "invariant");
|
||||
const oop ref_owner = edge.reference_owner();
|
||||
assert(ref_owner != NULL, "invariant");
|
||||
@ -56,7 +52,7 @@ static int field_offset(const Edge& edge) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
static const InstanceKlass* field_type(const Edge& edge) {
|
||||
static const InstanceKlass* field_type(const StoredEdge& edge) {
|
||||
assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
|
||||
return (const InstanceKlass*)edge.reference_owner_klass();
|
||||
}
|
||||
@ -138,175 +134,18 @@ const Edge* EdgeUtils::root(const Edge& edge) {
|
||||
current = parent;
|
||||
parent = current->parent();
|
||||
}
|
||||
assert(current != NULL, "invariant");
|
||||
return current;
|
||||
}
|
||||
|
||||
// The number of references associated with the leak node;
|
||||
// can be viewed as the leak node "context".
|
||||
// Used to provide leak context for a "capped/skipped" reference chain.
|
||||
static const size_t leak_context = 100;
|
||||
|
||||
// The number of references associated with the root node;
|
||||
// can be viewed as the root node "context".
|
||||
// Used to provide root context for a "capped/skipped" reference chain.
|
||||
static const size_t root_context = 100;
|
||||
|
||||
// A limit on the reference chain depth to be serialized,
|
||||
static const size_t max_ref_chain_depth = leak_context + root_context;
|
||||
|
||||
const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
|
||||
const RoutableEdge* current = &edge;
|
||||
const RoutableEdge* parent = current->physical_parent();
|
||||
const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
|
||||
const Edge* current = &edge;
|
||||
const Edge* parent = current->parent();
|
||||
size_t seek = 0;
|
||||
while (parent != NULL && seek != skip_length) {
|
||||
while (parent != NULL && seek != distance) {
|
||||
seek++;
|
||||
current = parent;
|
||||
parent = parent->physical_parent();
|
||||
parent = parent->parent();
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static void validate_skip_target(const RoutableEdge* skip_target) {
|
||||
assert(skip_target != NULL, "invariant");
|
||||
assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
|
||||
assert(skip_target->is_sentinel(), "invariant");
|
||||
}
|
||||
|
||||
static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
|
||||
assert(new_skip_edge != NULL, "invariant");
|
||||
assert(new_skip_edge->is_skip_edge(), "invariant");
|
||||
if (last_skip_edge != NULL) {
|
||||
const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
|
||||
validate_skip_target(target->logical_parent());
|
||||
return;
|
||||
}
|
||||
assert(last_skip_edge == NULL, "invariant");
|
||||
// only one level of logical indirection
|
||||
validate_skip_target(new_skip_edge->logical_parent());
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
|
||||
assert(new_skip_edge != NULL, "invariant");
|
||||
assert(!new_skip_edge->is_skip_edge(), "invariant");
|
||||
assert(!new_skip_edge->processed(), "invariant");
|
||||
const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
|
||||
assert(skip_target != NULL, "invariant");
|
||||
new_skip_edge->set_skip_edge(skip_target);
|
||||
new_skip_edge->set_skip_length(skip_target_distance);
|
||||
assert(new_skip_edge->is_skip_edge(), "invariant");
|
||||
assert(new_skip_edge->logical_parent() == skip_target, "invariant");
|
||||
}
|
||||
|
||||
static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
|
||||
assert(distance == 0, "invariant");
|
||||
const RoutableEdge* current = &edge;
|
||||
while (current != NULL) {
|
||||
if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
|
||||
return current;
|
||||
}
|
||||
current = current->physical_parent();
|
||||
++distance;
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
static void collapse_overlapping_chain(const RoutableEdge& edge,
|
||||
const RoutableEdge* first_processed_edge,
|
||||
size_t first_processed_distance) {
|
||||
assert(first_processed_edge != NULL, "invariant");
|
||||
// first_processed_edge is already processed / written
|
||||
assert(first_processed_edge->processed(), "invariant");
|
||||
assert(first_processed_distance + 1 <= leak_context, "invariant");
|
||||
|
||||
// from this first processed edge, attempt to fetch the last skip edge
|
||||
size_t last_skip_edge_distance = 0;
|
||||
const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
|
||||
const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
|
||||
|
||||
if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
|
||||
// complete chain can be accommodated without modification
|
||||
return;
|
||||
}
|
||||
|
||||
// backtrack one edge from existing processed edge
|
||||
const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
|
||||
assert(new_skip_edge != NULL, "invariant");
|
||||
assert(!new_skip_edge->processed(), "invariant");
|
||||
assert(new_skip_edge->parent() == first_processed_edge, "invariant");
|
||||
|
||||
size_t adjustment = 0;
|
||||
if (last_skip_edge != NULL) {
|
||||
assert(leak_context - 1 > first_processed_distance - 1, "invariant");
|
||||
adjustment = leak_context - first_processed_distance - 1;
|
||||
assert(last_skip_edge_distance + 1 > adjustment, "invariant");
|
||||
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
|
||||
} else {
|
||||
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
|
||||
new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
|
||||
}
|
||||
|
||||
DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
|
||||
}
|
||||
|
||||
static void collapse_non_overlapping_chain(const RoutableEdge& edge,
|
||||
const RoutableEdge* first_processed_edge,
|
||||
size_t first_processed_distance) {
|
||||
assert(first_processed_edge != NULL, "invariant");
|
||||
assert(!first_processed_edge->processed(), "invariant");
|
||||
// this implies that the first "processed" edge is the leak context relative "leaf"
|
||||
assert(first_processed_distance + 1 == leak_context, "invariant");
|
||||
|
||||
const size_t distance_to_root = edge.distance_to_root();
|
||||
if (distance_to_root + 1 <= max_ref_chain_depth) {
|
||||
// complete chain can be accommodated without constructing a skip edge
|
||||
return;
|
||||
}
|
||||
|
||||
install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
|
||||
first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
|
||||
|
||||
DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
|
||||
}
|
||||
|
||||
static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
|
||||
assert(distance == 0, "invariant");
|
||||
const RoutableEdge* current = &edge;
|
||||
while (current != NULL && distance < leak_context - 1) {
|
||||
if (current->processed()) {
|
||||
return current;
|
||||
}
|
||||
current = current->physical_parent();
|
||||
++distance;
|
||||
}
|
||||
assert(distance <= leak_context - 1, "invariant");
|
||||
return current;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some vocabulary:
|
||||
* -----------
|
||||
* "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
|
||||
* "Processed / written" means an edge that has already been serialized.
|
||||
* "Skip edge" is an edge that contains additional information for logical routing purposes.
|
||||
* "Skip target" is an edge used as a destination for a skip edge
|
||||
*/
|
||||
void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
|
||||
assert(is_leak_edge(edge), "invariant");
|
||||
|
||||
// attempt to locate an already processed edge inside current leak context (if any)
|
||||
size_t first_processed_distance = 0;
|
||||
const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
|
||||
if (first_processed_edge == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (first_processed_edge->processed()) {
|
||||
collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
|
||||
} else {
|
||||
collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
|
||||
}
|
||||
|
||||
assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
|
||||
}
|
||||
|
@ -28,15 +28,17 @@
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class Edge;
|
||||
class RoutableEdge;
|
||||
class Symbol;
|
||||
|
||||
class EdgeUtils : public AllStatic {
|
||||
public:
|
||||
static bool is_leak_edge(const Edge& edge);
|
||||
static const size_t leak_context = 100;
|
||||
static const size_t root_context = 100;
|
||||
static const size_t max_ref_chain_depth = leak_context + root_context;
|
||||
|
||||
static bool is_leak_edge(const Edge& edge);
|
||||
static const Edge* root(const Edge& edge);
|
||||
static bool is_root(const Edge& edge);
|
||||
static const Edge* ancestor(const Edge& edge, size_t distance);
|
||||
|
||||
static bool is_array_element(const Edge& edge);
|
||||
static int array_index(const Edge& edge);
|
||||
@ -44,8 +46,6 @@ class EdgeUtils : public AllStatic {
|
||||
|
||||
static const Symbol* field_name_symbol(const Edge& edge);
|
||||
static jshort field_modifiers(const Edge& edge);
|
||||
|
||||
static void collapse_chain(const RoutableEdge& edge);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
|
||||
|
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
|
||||
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
|
||||
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSample.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
|
||||
_sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
|
||||
|
||||
/* The EdgeQueue is backed by directly managed virtual memory.
|
||||
* We will attempt to dimension an initial reservation
|
||||
* in proportion to the size of the heap (represented by heap_region).
|
||||
* Initial memory reservation: 5% of the heap OR at least 32 Mb
|
||||
* Commit ratio: 1 : 10 (subject to allocation granularties)
|
||||
*/
|
||||
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
|
||||
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
|
||||
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
|
||||
return memory_reservation_bytes;
|
||||
}
|
||||
|
||||
static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
|
||||
const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
|
||||
assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
|
||||
return memory_commit_block_size_bytes;
|
||||
}
|
||||
|
||||
static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
|
||||
log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
|
||||
log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
|
||||
log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
|
||||
if (edge_queue.reserved_size() > 0) {
|
||||
log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
|
||||
((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
|
||||
}
|
||||
}
|
||||
|
||||
void PathToGcRootsOperation::doit() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
assert(_cutoff_ticks > 0, "invariant");
|
||||
|
||||
// The bitset used for marking is dimensioned as a function of the heap size
|
||||
const MemRegion heap_region = Universe::heap()->reserved_region();
|
||||
BitSet mark_bits(heap_region);
|
||||
|
||||
// The edge queue is dimensioned as a fraction of the heap size
|
||||
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
|
||||
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
|
||||
|
||||
// The initialize() routines will attempt to reserve and allocate backing storage memory.
|
||||
// Failure to accommodate will render root chain processing impossible.
|
||||
// As a fallback on failure, just write out the existing samples, flat, without chains.
|
||||
if (!(mark_bits.initialize() && edge_queue.initialize())) {
|
||||
log_warning(jfr)("Unable to allocate memory for root chain processing");
|
||||
return;
|
||||
}
|
||||
|
||||
// Save the original markWord for the potential leak objects,
|
||||
// to be restored on function exit
|
||||
ObjectSampleMarker marker;
|
||||
if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
|
||||
// no valid samples to process
|
||||
return;
|
||||
}
|
||||
|
||||
// Necessary condition for attempting a root set iteration
|
||||
Universe::heap()->ensure_parsability(false);
|
||||
|
||||
BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
|
||||
RootSetClosure<BFSClosure> roots(&bfs);
|
||||
|
||||
GranularTimer::start(_cutoff_ticks, 1000000);
|
||||
roots.process();
|
||||
if (edge_queue.is_full()) {
|
||||
// Pathological case where roots don't fit in queue
|
||||
// Do a depth-first search, but mark roots first
|
||||
// to avoid walking sideways over roots
|
||||
DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
|
||||
} else {
|
||||
bfs.process();
|
||||
}
|
||||
GranularTimer::stop();
|
||||
log_edge_queue_summary(edge_queue);
|
||||
|
||||
// Emit old objects including their reference chains as events
|
||||
EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
|
||||
emitter.write_events(_sampler, _edge_store, _emit_all);
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
|
||||
|
||||
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
|
||||
|
||||
class EdgeStore;
|
||||
class ObjectSampler;
|
||||
|
||||
// Safepoint operation for finding paths to gc roots
|
||||
class PathToGcRootsOperation : public OldObjectVMOperation {
|
||||
private:
|
||||
ObjectSampler* _sampler;
|
||||
EdgeStore* const _edge_store;
|
||||
const int64_t _cutoff_ticks;
|
||||
const bool _emit_all;
|
||||
|
||||
public:
|
||||
PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all);
|
||||
virtual void doit();
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
|
@ -28,12 +28,14 @@
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
|
||||
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
|
||||
#include "jfr/leakprofiler/utilities/saveRestore.hpp"
|
||||
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
@ -44,11 +46,11 @@
|
||||
#include "jvmci/jvmci.hpp"
|
||||
#endif
|
||||
|
||||
RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
|
||||
_edge_queue(edge_queue) {
|
||||
}
|
||||
template <typename Delegate>
|
||||
RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {}
|
||||
|
||||
void RootSetClosure::do_oop(oop* ref) {
|
||||
template <typename Delegate>
|
||||
void RootSetClosure<Delegate>::do_oop(oop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
// We discard unaligned root references because
|
||||
// our reference tagging scheme will use
|
||||
@ -62,50 +64,40 @@ void RootSetClosure::do_oop(oop* ref) {
|
||||
}
|
||||
|
||||
assert(is_aligned(ref, HeapWordSize), "invariant");
|
||||
const oop pointee = *ref;
|
||||
if (pointee != NULL) {
|
||||
closure_impl(ref, pointee);
|
||||
if (*ref != NULL) {
|
||||
_delegate->do_root(ref);
|
||||
}
|
||||
}
|
||||
|
||||
void RootSetClosure::do_oop(narrowOop* ref) {
|
||||
template <typename Delegate>
|
||||
void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
|
||||
const oop pointee = RawAccess<>::oop_load(ref);
|
||||
if (pointee != NULL) {
|
||||
closure_impl(UnifiedOop::encode(ref), pointee);
|
||||
_delegate->do_root(UnifiedOop::encode(ref));
|
||||
}
|
||||
}
|
||||
|
||||
void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
|
||||
if (!_edge_queue->is_full()) {
|
||||
_edge_queue->add(NULL, reference);
|
||||
}
|
||||
}
|
||||
class RootSetClosureMarkScope : public MarkScope {};
|
||||
|
||||
void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
|
||||
RootSetClosure rs(edge_queue);
|
||||
process_roots(&rs);
|
||||
}
|
||||
|
||||
class RootSetClosureMarkScope : public MarkScope {
|
||||
};
|
||||
|
||||
void RootSetClosure::process_roots(OopClosure* closure) {
|
||||
SaveRestoreCLDClaimBits save_restore_cld_claim_bits;
|
||||
template <typename Delegate>
|
||||
void RootSetClosure<Delegate>::process() {
|
||||
RootSetClosureMarkScope mark_scope;
|
||||
|
||||
CLDToOopClosure cldt_closure(closure, ClassLoaderData::_claim_strong);
|
||||
CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none);
|
||||
ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
|
||||
CodeBlobToOopClosure blobs(closure, false);
|
||||
Threads::oops_do(closure, &blobs);
|
||||
ObjectSynchronizer::oops_do(closure);
|
||||
Universe::oops_do(closure);
|
||||
JNIHandles::oops_do(closure);
|
||||
JvmtiExport::oops_do(closure);
|
||||
SystemDictionary::oops_do(closure);
|
||||
Management::oops_do(closure);
|
||||
StringTable::oops_do(closure);
|
||||
AOTLoader::oops_do(closure);
|
||||
JVMCI_ONLY(JVMCI::oops_do(closure);)
|
||||
CodeBlobToOopClosure blobs(this, false);
|
||||
Threads::oops_do(this, &blobs);
|
||||
ObjectSynchronizer::oops_do(this);
|
||||
Universe::oops_do(this);
|
||||
JNIHandles::oops_do(this);
|
||||
JvmtiExport::oops_do(this);
|
||||
SystemDictionary::oops_do(this);
|
||||
Management::oops_do(this);
|
||||
StringTable::oops_do(this);
|
||||
AOTLoader::oops_do(this);
|
||||
JVMCI_ONLY(JVMCI::oops_do(this);)
|
||||
}
|
||||
|
||||
template class RootSetClosure<BFSClosure>;
|
||||
template class RootSetClosure<DFSClosure>;
|
||||
|
@ -26,18 +26,14 @@
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
class EdgeQueue;
|
||||
|
||||
template <typename Delegate>
|
||||
class RootSetClosure: public BasicOopIterateClosure {
|
||||
private:
|
||||
RootSetClosure(EdgeQueue* edge_queue);
|
||||
EdgeQueue* _edge_queue;
|
||||
void closure_impl(const oop* reference, const oop pointee);
|
||||
Delegate* const _delegate;
|
||||
public:
|
||||
static void add_to_queue(EdgeQueue* edge_queue);
|
||||
static void process_roots(OopClosure* closure);
|
||||
RootSetClosure(Delegate* delegate);
|
||||
void process();
|
||||
|
||||
virtual void do_oop(oop* reference);
|
||||
virtual void do_oop(narrowOop* reference);
|
||||
|
148
src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
Normal file
148
src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSample.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
||||
EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) :
|
||||
_start_time(start_time),
|
||||
_end_time(end_time),
|
||||
_thread(Thread::current()),
|
||||
_jfr_thread_local(_thread->jfr_thread_local()),
|
||||
_thread_id(_thread->jfr_thread_local()->thread_id()) {}
|
||||
|
||||
EventEmitter::~EventEmitter() {
|
||||
// restore / reset thread local stack trace and thread id
|
||||
_jfr_thread_local->set_thread_id(_thread_id);
|
||||
_jfr_thread_local->clear_cached_stack_trace();
|
||||
}
|
||||
|
||||
void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) {
|
||||
assert(sampler != NULL, "invariant");
|
||||
|
||||
ResourceMark rm;
|
||||
EdgeStore edge_store;
|
||||
if (cutoff_ticks <= 0) {
|
||||
// no reference chains
|
||||
JfrTicks time_stamp = JfrTicks::now();
|
||||
EventEmitter emitter(time_stamp, time_stamp);
|
||||
emitter.write_events(sampler, &edge_store, emit_all);
|
||||
return;
|
||||
}
|
||||
// events emitted with reference chains require a safepoint operation
|
||||
PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all);
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
|
||||
assert(_thread == Thread::current(), "invariant");
|
||||
assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
assert(edge_store != NULL, "invariant");
|
||||
|
||||
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
|
||||
size_t count = 0;
|
||||
|
||||
const ObjectSample* current = object_sampler->first();
|
||||
while (current != NULL) {
|
||||
ObjectSample* prev = current->prev();
|
||||
if (current->is_alive_and_older_than(last_sweep)) {
|
||||
write_event(current, edge_store);
|
||||
++count;
|
||||
}
|
||||
current = prev;
|
||||
}
|
||||
|
||||
if (count > 0) {
|
||||
// serialize associated checkpoints and potential chains
|
||||
ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int array_size(const oop object) {
|
||||
assert(object != NULL, "invariant");
|
||||
if (object->is_array()) {
|
||||
return arrayOop(object)->length();
|
||||
}
|
||||
return min_jint;
|
||||
}
|
||||
|
||||
void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
|
||||
assert(sample != NULL, "invariant");
|
||||
assert(!sample->is_dead(), "invariant");
|
||||
assert(edge_store != NULL, "invariant");
|
||||
assert(_jfr_thread_local != NULL, "invariant");
|
||||
|
||||
const oop* object_addr = sample->object_addr();
|
||||
traceid gc_root_id = 0;
|
||||
const Edge* edge = NULL;
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
edge = (const Edge*)(*object_addr)->mark();
|
||||
}
|
||||
if (edge == NULL) {
|
||||
// In order to dump out a representation of the event
|
||||
// even though it was not reachable / too long to reach,
|
||||
// we need to register a top level edge for this object.
|
||||
edge = edge_store->put(object_addr);
|
||||
} else {
|
||||
gc_root_id = edge_store->gc_root_id(edge);
|
||||
}
|
||||
|
||||
assert(edge != NULL, "invariant");
|
||||
const traceid object_id = edge_store->get_id(edge);
|
||||
assert(object_id != 0, "invariant");
|
||||
|
||||
EventOldObjectSample e(UNTIMED);
|
||||
e.set_starttime(_start_time);
|
||||
e.set_endtime(_end_time);
|
||||
e.set_allocationTime(sample->allocation_time());
|
||||
e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
|
||||
e.set_object(object_id);
|
||||
e.set_arrayElements(array_size(edge->pointee()));
|
||||
e.set_root(gc_root_id);
|
||||
|
||||
// Temporarily assigning both the stack trace id and thread id
|
||||
// onto the thread local data structure of the emitter thread (for the duration
|
||||
// of the commit() call). This trick provides a means to override
|
||||
// the event generation mechanism by injecting externally provided id's.
|
||||
// At this particular location, it allows us to emit an old object event
|
||||
// supplying information from where the actual sampling occurred.
|
||||
_jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
|
||||
assert(sample->has_thread(), "invariant");
|
||||
_jfr_thread_local->set_thread_id(sample->thread_id());
|
||||
e.commit();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,50 +22,37 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
|
||||
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "jfr/utilities/jfrTime.hpp"
|
||||
|
||||
typedef u8 traceid;
|
||||
|
||||
class BFSClosure;
|
||||
class EdgeStore;
|
||||
class EdgeQueue;
|
||||
class JfrThreadData;
|
||||
class JfrThreadLocal;
|
||||
class ObjectSample;
|
||||
class ObjectSampler;
|
||||
class Thread;
|
||||
|
||||
class VMThread;
|
||||
|
||||
// Safepoint operation for emitting object sample events
|
||||
class EmitEventOperation : public VM_Operation {
|
||||
class EventEmitter : public CHeapObj<mtTracing> {
|
||||
friend class LeakProfiler;
|
||||
friend class PathToGcRootsOperation;
|
||||
private:
|
||||
jlong _cutoff_ticks;
|
||||
bool _emit_all;
|
||||
VMThread* _vm_thread;
|
||||
JfrThreadLocal* _vm_thread_local;
|
||||
ObjectSampler* _object_sampler;
|
||||
const JfrTicks& _start_time;
|
||||
const JfrTicks& _end_time;
|
||||
Thread* _thread;
|
||||
JfrThreadLocal* _jfr_thread_local;
|
||||
traceid _thread_id;
|
||||
|
||||
EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
|
||||
~EventEmitter();
|
||||
|
||||
void write_event(const ObjectSample* sample, EdgeStore* edge_store);
|
||||
int write_events(EdgeStore* edge_store);
|
||||
size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all);
|
||||
|
||||
public:
|
||||
EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
|
||||
_cutoff_ticks(cutoff_ticks),
|
||||
_emit_all(emit_all),
|
||||
_vm_thread(NULL),
|
||||
_vm_thread_local(NULL),
|
||||
_object_sampler(NULL) {
|
||||
}
|
||||
|
||||
VMOp_Type type() const {
|
||||
return VMOp_GC_HeapInspection;
|
||||
}
|
||||
|
||||
Mode evaluation_mode() const {
|
||||
return _safepoint;
|
||||
}
|
||||
|
||||
virtual void doit();
|
||||
static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
|
@ -181,21 +181,18 @@ class SampleMark {
|
||||
}
|
||||
};
|
||||
|
||||
void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
|
||||
assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
|
||||
|
||||
void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload) {
|
||||
if (!writer.has_data()) {
|
||||
if (!class_unload) {
|
||||
LeakProfiler::resume();
|
||||
}
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
|
||||
|
||||
const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
|
||||
// Class unload implies a safepoint.
|
||||
// Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
|
||||
// Therefore: direct access the object sampler instance is safe.
|
||||
const ObjectSampler* const object_sampler = ObjectSampler::sampler();
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
|
||||
@ -203,80 +200,71 @@ void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unl
|
||||
CheckpointInstall install(h_cp);
|
||||
|
||||
if (class_unload) {
|
||||
if (last != NULL) {
|
||||
// all samples need the class unload information
|
||||
do_samples(last, NULL, install);
|
||||
}
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
// all samples need class unload information
|
||||
do_samples(last, NULL, install);
|
||||
return;
|
||||
}
|
||||
|
||||
// only new samples since last resolved checkpoint
|
||||
if (last != last_resolved) {
|
||||
do_samples(last, last_resolved, install);
|
||||
if (resume) {
|
||||
const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
|
||||
}
|
||||
}
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
if (resume) {
|
||||
LeakProfiler::resume();
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
|
||||
void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
|
||||
assert(sampler != NULL, "invariant");
|
||||
assert(edge_store != NULL, "invariant");
|
||||
assert(thread != NULL, "invariant");
|
||||
|
||||
static bool types_registered = false;
|
||||
if (!types_registered) {
|
||||
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
|
||||
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
|
||||
types_registered = true;
|
||||
}
|
||||
const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
|
||||
|
||||
const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
|
||||
{
|
||||
JfrCheckpointWriter writer(false, false, thread);
|
||||
CheckpointWrite checkpoint_write(writer, last_sweep);
|
||||
do_samples(last, NULL, checkpoint_write);
|
||||
}
|
||||
|
||||
CheckpointStateReset state_reset(last_sweep);
|
||||
do_samples(last, NULL, state_reset);
|
||||
|
||||
if (!edge_store->is_empty()) {
|
||||
// java object and chain representations
|
||||
JfrCheckpointWriter writer(false, true, thread);
|
||||
ObjectSampleWriter osw(writer, edge_store);
|
||||
edge_store->iterate_edges(osw);
|
||||
edge_store->iterate(osw);
|
||||
}
|
||||
}
|
||||
|
||||
WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
|
||||
_stack_trace_repo(repo) {
|
||||
int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
|
||||
if (last == NULL) {
|
||||
return 0;
|
||||
}
|
||||
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
|
||||
SampleMark mark(marker, last_sweep);
|
||||
do_samples(last, NULL, mark);
|
||||
return mark.count();
|
||||
}
|
||||
|
||||
WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
|
||||
_sampler(sampler), _stack_trace_repo(repo) {}
|
||||
|
||||
bool WriteObjectSampleStacktrace::process() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
if (!LeakProfiler::is_running()) {
|
||||
return true;
|
||||
}
|
||||
// Suspend the LeakProfiler subsystem
|
||||
// to ensure stable samples even
|
||||
// after we return from the safepoint.
|
||||
LeakProfiler::suspend();
|
||||
assert(!LeakProfiler::is_running(), "invariant");
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
assert(_sampler != NULL, "invariant");
|
||||
|
||||
const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
|
||||
const ObjectSample* const last_resolved = object_sampler->last_resolved();
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
|
||||
const ObjectSample* const last_resolved = _sampler->last_resolved();
|
||||
if (last == last_resolved) {
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -294,27 +282,13 @@ bool WriteObjectSampleStacktrace::process() {
|
||||
}
|
||||
if (count == 0) {
|
||||
writer.set_context(ctx);
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
return true;
|
||||
}
|
||||
assert(count > 0, "invariant");
|
||||
writer.write_count((u4)count, count_offset);
|
||||
JfrStackTraceRepository::write_metadata(writer);
|
||||
|
||||
ObjectSampleCheckpoint::install(writer, false, false);
|
||||
assert(LeakProfiler::is_suspended(), "invariant");
|
||||
// install the stacktrace checkpoint information to the candidates
|
||||
ObjectSampleCheckpoint::install(writer, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
|
||||
const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
|
||||
assert(object_sampler != NULL, "invariant");
|
||||
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
|
||||
if (last == NULL) {
|
||||
return 0;
|
||||
}
|
||||
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
|
||||
SampleMark mark(marker, last_sweep);
|
||||
do_samples(last, NULL, mark);
|
||||
return mark.count();
|
||||
}
|
||||
|
@ -26,25 +26,26 @@
|
||||
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
class EdgeStore;
|
||||
class JfrStackTraceRepository;
|
||||
class JfrCheckpointWriter;
|
||||
class JfrStackTraceRepository;
|
||||
class ObjectSampleMarker;
|
||||
class ObjectSampler;
|
||||
|
||||
class ObjectSampleCheckpoint : AllStatic {
|
||||
public:
|
||||
static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
|
||||
static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
|
||||
static int mark(ObjectSampleMarker& marker, bool emit_all);
|
||||
static void install(JfrCheckpointWriter& writer, bool class_unload);
|
||||
static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
|
||||
static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
|
||||
};
|
||||
|
||||
class WriteObjectSampleStacktrace : public StackObj {
|
||||
private:
|
||||
ObjectSampler* const _sampler;
|
||||
JfrStackTraceRepository& _stack_trace_repo;
|
||||
public:
|
||||
WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
|
||||
WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
|
||||
bool process();
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -350,7 +350,7 @@ int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet*
|
||||
return 1;
|
||||
}
|
||||
|
||||
static traceid get_root_description_info_id(const Edge& edge, traceid id) {
|
||||
static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
|
||||
assert(edge.is_root(), "invariant");
|
||||
if (EdgeUtils::is_leak_edge(edge)) {
|
||||
return 0;
|
||||
@ -518,7 +518,7 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) {
|
||||
}
|
||||
}
|
||||
|
||||
static void add_old_object_sample_info(const Edge* current, traceid id) {
|
||||
static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
|
||||
assert(current != NULL, "invariant");
|
||||
if (sample_infos == NULL) {
|
||||
sample_infos = new SampleInfo();
|
||||
@ -528,11 +528,11 @@ static void add_old_object_sample_info(const Edge* current, traceid id) {
|
||||
assert(oosi != NULL, "invariant");
|
||||
oosi->_id = id;
|
||||
oosi->_data._object = current->pointee();
|
||||
oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
|
||||
oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id;
|
||||
sample_infos->store(oosi);
|
||||
}
|
||||
|
||||
static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
|
||||
static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
|
||||
assert(current != NULL, "invariant");
|
||||
if (ref_infos == NULL) {
|
||||
ref_infos = new RefInfo();
|
||||
@ -544,37 +544,43 @@ static void add_reference_info(const RoutableEdge* current, traceid id, traceid
|
||||
|
||||
ri->_id = id;
|
||||
ri->_data._array_info_id = !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
|
||||
ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
|
||||
get_field_info_id(*current) : (traceid)0;
|
||||
ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0;
|
||||
ri->_data._old_object_sample_id = parent_id;
|
||||
ri->_data._skip = current->skip_length();
|
||||
ref_infos->store(ri);
|
||||
}
|
||||
|
||||
static traceid add_root_info(const Edge* root, traceid id) {
|
||||
assert(root != NULL, "invariant");
|
||||
assert(root->is_root(), "invariant");
|
||||
return get_root_description_info_id(*root, id);
|
||||
static bool is_gc_root(const StoredEdge* current) {
|
||||
assert(current != NULL, "invariant");
|
||||
return current->parent() == NULL && current->gc_root_id() != 0;
|
||||
}
|
||||
|
||||
void ObjectSampleWriter::write(const RoutableEdge* edge) {
|
||||
static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
|
||||
assert(root != NULL, "invariant");
|
||||
assert(is_gc_root(root), "invariant");
|
||||
return get_gc_root_description_info_id(*root, id);
|
||||
}
|
||||
|
||||
void ObjectSampleWriter::write(const StoredEdge* edge) {
|
||||
assert(edge != NULL, "invariant");
|
||||
const traceid id = _store->get_id(edge);
|
||||
add_old_object_sample_info(edge, id);
|
||||
const RoutableEdge* parent = edge->logical_parent();
|
||||
const StoredEdge* const parent = edge->parent();
|
||||
if (parent != NULL) {
|
||||
add_reference_info(edge, id, _store->get_id(parent));
|
||||
} else {
|
||||
assert(edge->is_root(), "invariant");
|
||||
add_root_info(edge, id);
|
||||
if (is_gc_root(edge)) {
|
||||
assert(edge->gc_root_id() == id, "invariant");
|
||||
add_gc_root_info(edge, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
|
||||
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
|
||||
_writer(writer),
|
||||
_store(store) {
|
||||
assert(store != NULL, "invariant");
|
||||
assert(store->number_of_entries() > 0, "invariant");
|
||||
assert(!store->is_empty(), "invariant");
|
||||
sample_infos = NULL;
|
||||
ref_infos = NULL;
|
||||
array_infos = NULL;
|
||||
@ -590,26 +596,7 @@ ObjectSampleWriter::~ObjectSampleWriter() {
|
||||
write_root_descriptors(_writer);
|
||||
}
|
||||
|
||||
void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
|
||||
assert(EdgeUtils::is_leak_edge(edge), "invariant");
|
||||
if (edge.processed()) {
|
||||
return;
|
||||
}
|
||||
EdgeUtils::collapse_chain(edge);
|
||||
const RoutableEdge* current = &edge;
|
||||
while (current != NULL) {
|
||||
if (current->processed()) {
|
||||
return;
|
||||
}
|
||||
write(current);
|
||||
current->set_processed();
|
||||
current = current->logical_parent();
|
||||
}
|
||||
}
|
||||
|
||||
bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
|
||||
if (EdgeUtils::is_leak_edge(edge)) {
|
||||
write_chain(edge);
|
||||
}
|
||||
bool ObjectSampleWriter::operator()(StoredEdge& e) {
|
||||
write(&e);
|
||||
return true;
|
||||
}
|
||||
|
@ -30,21 +30,17 @@
|
||||
class Edge;
|
||||
class EdgeStore;
|
||||
class JfrCheckpointWriter;
|
||||
class RoutableEdge;
|
||||
class StoredEdge;
|
||||
|
||||
class ObjectSampleWriter : public StackObj {
|
||||
private:
|
||||
JfrCheckpointWriter& _writer;
|
||||
const EdgeStore* const _store;
|
||||
|
||||
void write(const RoutableEdge* edge);
|
||||
void write_chain(const RoutableEdge& edge);
|
||||
|
||||
EdgeStore* const _store;
|
||||
void write(const StoredEdge* edge);
|
||||
public:
|
||||
ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
|
||||
ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store);
|
||||
~ObjectSampleWriter();
|
||||
|
||||
bool operator()(const RoutableEdge& edge);
|
||||
bool operator()(StoredEdge& edge);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
|
||||
|
@ -132,7 +132,7 @@ class ReferenceToRootClosure : public StackObj {
|
||||
bool ReferenceToRootClosure::do_cldg_roots() {
|
||||
assert(!complete(), "invariant");
|
||||
ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL);
|
||||
CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_strong);
|
||||
CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none);
|
||||
ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
|
||||
return rlc.complete();
|
||||
}
|
||||
@ -435,9 +435,6 @@ class RootResolverMarkScope : public MarkScope {
|
||||
};
|
||||
|
||||
void RootResolver::resolve(RootCallback& callback) {
|
||||
|
||||
// Need to clear cld claim bit before starting
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
RootResolverMarkScope mark_scope;
|
||||
|
||||
// thread local roots
|
||||
|
@ -25,8 +25,8 @@
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "jfr/leakprofiler/utilities/rootType.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
struct RootCallbackInfo {
|
||||
|
@ -1,236 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
|
||||
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSample.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/leakprofiler/emitEventOperation.hpp"
|
||||
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/support/jfrThreadId.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
/* The EdgeQueue is backed by directly managed virtual memory.
|
||||
* We will attempt to dimension an initial reservation
|
||||
* in proportion to the size of the heap (represented by heap_region).
|
||||
* Initial memory reservation: 5% of the heap OR at least 32 Mb
|
||||
* Commit ratio: 1 : 10 (subject to allocation granularties)
|
||||
*/
|
||||
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
|
||||
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
|
||||
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
|
||||
return memory_reservation_bytes;
|
||||
}
|
||||
|
||||
static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
|
||||
const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
|
||||
assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
|
||||
return memory_commit_block_size_bytes;
|
||||
}
|
||||
|
||||
static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
|
||||
log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
|
||||
log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
|
||||
log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
|
||||
if (edge_queue.reserved_size() > 0) {
|
||||
log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
|
||||
((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
|
||||
}
|
||||
}
|
||||
|
||||
void EmitEventOperation::doit() {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
_object_sampler = LeakProfiler::object_sampler();
|
||||
assert(_object_sampler != NULL, "invariant");
|
||||
|
||||
_vm_thread = VMThread::vm_thread();
|
||||
assert(_vm_thread == Thread::current(), "invariant");
|
||||
_vm_thread_local = _vm_thread->jfr_thread_local();
|
||||
assert(_vm_thread_local != NULL, "invariant");
|
||||
assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
|
||||
|
||||
// The VM_Operation::evaluate() which invoked doit()
|
||||
// contains a top level ResourceMark
|
||||
|
||||
// save the original markWord for the potential leak objects
|
||||
// to be restored on function exit
|
||||
ObjectSampleMarker marker;
|
||||
if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
EdgeStore edge_store;
|
||||
|
||||
GranularTimer::start(_cutoff_ticks, 1000000);
|
||||
if (_cutoff_ticks <= 0) {
|
||||
// no chains
|
||||
write_events(&edge_store);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_cutoff_ticks > 0, "invariant");
|
||||
|
||||
// The bitset used for marking is dimensioned as a function of the heap size
|
||||
const MemRegion heap_region = Universe::heap()->reserved_region();
|
||||
BitSet mark_bits(heap_region);
|
||||
|
||||
// The edge queue is dimensioned as a fraction of the heap size
|
||||
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
|
||||
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
|
||||
|
||||
// The initialize() routines will attempt to reserve and allocate backing storage memory.
|
||||
// Failure to accommodate will render root chain processing impossible.
|
||||
// As a fallback on failure, just write out the existing samples, flat, without chains.
|
||||
if (!(mark_bits.initialize() && edge_queue.initialize())) {
|
||||
log_warning(jfr)("Unable to allocate memory for root chain processing");
|
||||
write_events(&edge_store);
|
||||
return;
|
||||
}
|
||||
|
||||
// necessary condition for attempting a root set iteration
|
||||
Universe::heap()->ensure_parsability(false);
|
||||
|
||||
RootSetClosure::add_to_queue(&edge_queue);
|
||||
if (edge_queue.is_full()) {
|
||||
// Pathological case where roots don't fit in queue
|
||||
// Do a depth-first search, but mark roots first
|
||||
// to avoid walking sideways over roots
|
||||
DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
|
||||
} else {
|
||||
BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
|
||||
bfs.process();
|
||||
}
|
||||
GranularTimer::stop();
|
||||
write_events(&edge_store);
|
||||
log_edge_queue_summary(edge_queue);
|
||||
}
|
||||
|
||||
int EmitEventOperation::write_events(EdgeStore* edge_store) {
|
||||
assert(_object_sampler != NULL, "invariant");
|
||||
assert(edge_store != NULL, "invariant");
|
||||
assert(_vm_thread != NULL, "invariant");
|
||||
assert(_vm_thread_local != NULL, "invariant");
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
|
||||
// save thread id in preparation for thread local trace data manipulations
|
||||
const traceid vmthread_id = _vm_thread_local->thread_id();
|
||||
assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
|
||||
|
||||
const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
|
||||
int count = 0;
|
||||
|
||||
const ObjectSample* current = _object_sampler->first();
|
||||
while (current != NULL) {
|
||||
ObjectSample* prev = current->prev();
|
||||
if (current->is_alive_and_older_than(last_sweep)) {
|
||||
write_event(current, edge_store);
|
||||
++count;
|
||||
}
|
||||
current = prev;
|
||||
}
|
||||
|
||||
// restore thread local stack trace and thread id
|
||||
_vm_thread_local->set_thread_id(vmthread_id);
|
||||
_vm_thread_local->clear_cached_stack_trace();
|
||||
assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
|
||||
|
||||
if (count > 0) {
|
||||
// serialize assoicated checkpoints
|
||||
ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int array_size(const oop object) {
|
||||
assert(object != NULL, "invariant");
|
||||
if (object->is_array()) {
|
||||
return arrayOop(object)->length();
|
||||
}
|
||||
return min_jint;
|
||||
}
|
||||
|
||||
void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
|
||||
assert(sample != NULL, "invariant");
|
||||
assert(!sample->is_dead(), "invariant");
|
||||
assert(edge_store != NULL, "invariant");
|
||||
assert(_vm_thread_local != NULL, "invariant");
|
||||
const oop* object_addr = sample->object_addr();
|
||||
assert(*object_addr != NULL, "invariant");
|
||||
|
||||
const Edge* edge = (const Edge*)(*object_addr)->mark();
|
||||
traceid gc_root_id = 0;
|
||||
if (edge == NULL) {
|
||||
// In order to dump out a representation of the event
|
||||
// even though it was not reachable / too long to reach,
|
||||
// we need to register a top level edge for this object
|
||||
Edge e(NULL, object_addr);
|
||||
edge_store->add_chain(&e, 1);
|
||||
edge = (const Edge*)(*object_addr)->mark();
|
||||
} else {
|
||||
gc_root_id = edge_store->get_root_id(edge);
|
||||
}
|
||||
|
||||
assert(edge != NULL, "invariant");
|
||||
assert(edge->pointee() == *object_addr, "invariant");
|
||||
const traceid object_id = edge_store->get_id(edge);
|
||||
assert(object_id != 0, "invariant");
|
||||
|
||||
EventOldObjectSample e(UNTIMED);
|
||||
e.set_starttime(GranularTimer::start_time());
|
||||
e.set_endtime(GranularTimer::end_time());
|
||||
e.set_allocationTime(sample->allocation_time());
|
||||
e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
|
||||
e.set_object(object_id);
|
||||
e.set_arrayElements(array_size(*object_addr));
|
||||
e.set_root(gc_root_id);
|
||||
|
||||
// Temporarily assigning both the stack trace id and thread id
|
||||
// onto the thread local data structure of the VMThread (for the duration
|
||||
// of the commit() call). This trick provides a means to override
|
||||
// the event generation mechanism by injecting externally provided id's.
|
||||
// Here, in particular, this allows us to emit an old object event
|
||||
// supplying information from where the actual sampling occurred.
|
||||
_vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
|
||||
assert(sample->has_thread(), "invariant");
|
||||
_vm_thread_local->set_thread_id(sample->thread_id());
|
||||
e.commit();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,25 +23,31 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/emitEventOperation.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/startOperation.hpp"
|
||||
#include "jfr/leakprofiler/stopOperation.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Only to be updated during safepoint
|
||||
ObjectSampler* LeakProfiler::_object_sampler = NULL;
|
||||
bool LeakProfiler::is_running() {
|
||||
return ObjectSampler::is_created();
|
||||
}
|
||||
|
||||
bool LeakProfiler::start(int sample_count) {
|
||||
if (is_running()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Allows user to disable leak profiler on command line by setting queue size to zero.
|
||||
if (sample_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static volatile jbyte suspended = 0;
|
||||
bool LeakProfiler::start(jint sample_count) {
|
||||
if (UseZGC) {
|
||||
log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC");
|
||||
return false;
|
||||
@ -52,49 +58,56 @@ bool LeakProfiler::start(jint sample_count) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_object_sampler != NULL) {
|
||||
// already started
|
||||
return true;
|
||||
assert(!is_running(), "invariant");
|
||||
assert(sample_count > 0, "invariant");
|
||||
|
||||
// schedule the safepoint operation for installing the object sampler
|
||||
StartOperation op(sample_count);
|
||||
VMThread::execute(&op);
|
||||
|
||||
if (!is_running()) {
|
||||
log_trace(jfr, system)("Object sampling could not be started because the sampler could not be allocated");
|
||||
return false;
|
||||
}
|
||||
// Allows user to disable leak profiler on command line by setting queue size to zero.
|
||||
if (sample_count > 0) {
|
||||
StartOperation op(sample_count);
|
||||
VMThread::execute(&op);
|
||||
return _object_sampler != NULL;
|
||||
}
|
||||
return false;
|
||||
assert(is_running(), "invariant");
|
||||
log_trace(jfr, system)("Object sampling started");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LeakProfiler::stop() {
|
||||
if (_object_sampler == NULL) {
|
||||
// already stopped/not started
|
||||
return true;
|
||||
if (!is_running()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// schedule the safepoint operation for uninstalling and destroying the object sampler
|
||||
StopOperation op;
|
||||
VMThread::execute(&op);
|
||||
return _object_sampler == NULL;
|
||||
|
||||
assert(!is_running(), "invariant");
|
||||
log_trace(jfr, system)("Object sampling stopped");
|
||||
return true;
|
||||
}
|
||||
|
||||
void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
|
||||
void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
|
||||
if (!is_running()) {
|
||||
return;
|
||||
}
|
||||
EmitEventOperation op(cutoff_ticks, emit_all);
|
||||
VMThread::execute(&op);
|
||||
// exclusive access to object sampler instance
|
||||
ObjectSampler* const sampler = ObjectSampler::acquire();
|
||||
assert(sampler != NULL, "invariant");
|
||||
EventEmitter::emit(sampler, cutoff_ticks, emit_all);
|
||||
ObjectSampler::release();
|
||||
}
|
||||
|
||||
void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"Leak Profiler::oops_do(...) may only be called during safepoint");
|
||||
|
||||
if (_object_sampler != NULL) {
|
||||
_object_sampler->oops_do(is_alive, f);
|
||||
if (is_running()) {
|
||||
ObjectSampler::oops_do(is_alive, f);
|
||||
}
|
||||
}
|
||||
|
||||
void LeakProfiler::sample(HeapWord* object,
|
||||
size_t size,
|
||||
JavaThread* thread) {
|
||||
void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
|
||||
assert(is_running(), "invariant");
|
||||
assert(thread != NULL, "invariant");
|
||||
assert(thread->thread_state() == _thread_in_vm, "invariant");
|
||||
@ -104,39 +117,5 @@ void LeakProfiler::sample(HeapWord* object,
|
||||
return;
|
||||
}
|
||||
|
||||
_object_sampler->add(object, size, thread);
|
||||
}
|
||||
|
||||
ObjectSampler* LeakProfiler::object_sampler() {
|
||||
assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
|
||||
"Leak Profiler::object_sampler() may only be called during safepoint");
|
||||
return _object_sampler;
|
||||
}
|
||||
|
||||
void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"Leak Profiler::set_object_sampler() may only be called during safepoint");
|
||||
_object_sampler = object_sampler;
|
||||
}
|
||||
|
||||
bool LeakProfiler::is_running() {
|
||||
return _object_sampler != NULL && !suspended;
|
||||
}
|
||||
|
||||
bool LeakProfiler::is_suspended() {
|
||||
return _object_sampler != NULL && suspended;
|
||||
}
|
||||
|
||||
void LeakProfiler::resume() {
|
||||
assert(is_suspended(), "invariant");
|
||||
OrderAccess::storestore();
|
||||
Atomic::store((jbyte)0, &suspended);
|
||||
assert(is_running(), "invariant");
|
||||
}
|
||||
|
||||
void LeakProfiler::suspend() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
assert(_object_sampler != NULL, "invariant");
|
||||
assert(!is_suspended(), "invariant");
|
||||
suspended = (jbyte)1; // safepoint visible
|
||||
ObjectSampler::sample(object, size, thread);
|
||||
}
|
||||
|
@ -28,36 +28,16 @@
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class BoolObjectClosure;
|
||||
class ObjectSampler;
|
||||
class OopClosure;
|
||||
class JavaThread;
|
||||
class Thread;
|
||||
|
||||
class LeakProfiler : public AllStatic {
|
||||
friend class ClassUnloadTypeSet;
|
||||
friend class EmitEventOperation;
|
||||
friend class ObjectSampleCheckpoint;
|
||||
friend class StartOperation;
|
||||
friend class StopOperation;
|
||||
friend class TypeSet;
|
||||
friend class WriteObjectSampleStacktrace;
|
||||
|
||||
private:
|
||||
static ObjectSampler* _object_sampler;
|
||||
|
||||
static void set_object_sampler(ObjectSampler* object_sampler);
|
||||
static ObjectSampler* object_sampler();
|
||||
|
||||
static void suspend();
|
||||
static void resume();
|
||||
static bool is_suspended();
|
||||
|
||||
public:
|
||||
static bool start(jint sample_count);
|
||||
static bool start(int sample_count);
|
||||
static bool stop();
|
||||
static void emit_events(jlong cutoff_ticks, bool emit_all);
|
||||
static bool is_running();
|
||||
|
||||
static void emit_events(int64_t cutoff_ticks, bool emit_all);
|
||||
static void sample(HeapWord* object, size_t size, JavaThread* thread);
|
||||
|
||||
// Called by GC
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,6 +21,7 @@
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSample.hpp"
|
||||
@ -35,8 +36,18 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
static ObjectSampler* _instance = NULL;
|
||||
|
||||
static ObjectSampler& instance() {
|
||||
assert(_instance != NULL, "invariant");
|
||||
return *_instance;
|
||||
}
|
||||
|
||||
ObjectSampler::ObjectSampler(size_t size) :
|
||||
_priority_queue(new SamplePriorityQueue(size)),
|
||||
_list(new SampleList(size)),
|
||||
@ -44,7 +55,6 @@ ObjectSampler::ObjectSampler(size_t size) :
|
||||
_total_allocated(0),
|
||||
_threshold(0),
|
||||
_size(size),
|
||||
_tryLock(0),
|
||||
_dead_samples(false) {}
|
||||
|
||||
ObjectSampler::~ObjectSampler() {
|
||||
@ -54,32 +64,110 @@ ObjectSampler::~ObjectSampler() {
|
||||
_list = NULL;
|
||||
}
|
||||
|
||||
void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
|
||||
bool ObjectSampler::create(size_t size) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
assert(_instance == NULL, "invariant");
|
||||
_instance = new ObjectSampler(size);
|
||||
return _instance != NULL;
|
||||
}
|
||||
|
||||
bool ObjectSampler::is_created() {
|
||||
return _instance != NULL;
|
||||
}
|
||||
|
||||
ObjectSampler* ObjectSampler::sampler() {
|
||||
assert(is_created(), "invariant");
|
||||
return _instance;
|
||||
}
|
||||
|
||||
void ObjectSampler::destroy() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
if (_instance != NULL) {
|
||||
ObjectSampler* const sampler = _instance;
|
||||
_instance = NULL;
|
||||
delete sampler;
|
||||
}
|
||||
}
|
||||
|
||||
static volatile int _lock = 0;
|
||||
|
||||
ObjectSampler* ObjectSampler::acquire() {
|
||||
assert(is_created(), "invariant");
|
||||
while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
|
||||
return _instance;
|
||||
}
|
||||
|
||||
void ObjectSampler::release() {
|
||||
assert(is_created(), "invariant");
|
||||
OrderAccess::fence();
|
||||
_lock = 0;
|
||||
}
|
||||
|
||||
static traceid get_thread_id(JavaThread* thread) {
|
||||
assert(thread != NULL, "invariant");
|
||||
const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
|
||||
if (thread->threadObj() == NULL) {
|
||||
return 0;
|
||||
}
|
||||
const JfrThreadLocal* const tl = thread->jfr_thread_local();
|
||||
assert(tl != NULL, "invariant");
|
||||
if (!tl->has_thread_checkpoint()) {
|
||||
JfrCheckpointManager::create_thread_checkpoint(thread);
|
||||
}
|
||||
assert(tl->has_thread_checkpoint(), "invariant");
|
||||
return tl->thread_id();
|
||||
}
|
||||
|
||||
// Populates the thread local stack frames, but does not add them
|
||||
// to the stacktrace repository (...yet, see stacktrace_id() below)
|
||||
//
|
||||
void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
|
||||
assert(stacktrace != NULL, "invariant");
|
||||
assert(thread != NULL, "invariant");
|
||||
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
|
||||
JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// We were successful in acquiring the try lock and have been selected for adding a sample.
|
||||
// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
|
||||
//
|
||||
traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
|
||||
assert(stacktrace != NULL, "invariant");
|
||||
assert(stacktrace->hash() != 0, "invariant");
|
||||
const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
|
||||
thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
|
||||
return stacktrace_id;
|
||||
}
|
||||
|
||||
void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
|
||||
assert(thread != NULL, "invariant");
|
||||
assert(is_created(), "invariant");
|
||||
|
||||
const traceid thread_id = get_thread_id(thread);
|
||||
if (thread_id == 0) {
|
||||
return;
|
||||
}
|
||||
assert(thread_id != 0, "invariant");
|
||||
|
||||
if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
|
||||
JfrCheckpointManager::create_thread_checkpoint(thread);
|
||||
assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
|
||||
}
|
||||
const JfrThreadLocal* const tl = thread->jfr_thread_local();
|
||||
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
|
||||
fill_stacktrace(&stacktrace, thread);
|
||||
|
||||
traceid stack_trace_id = 0;
|
||||
unsigned int stack_trace_hash = 0;
|
||||
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
|
||||
stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
|
||||
thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
|
||||
}
|
||||
|
||||
JfrTryLock tryLock(&_tryLock);
|
||||
// try enter critical section
|
||||
JfrTryLock tryLock(&_lock);
|
||||
if (!tryLock.has_lock()) {
|
||||
log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
|
||||
return;
|
||||
}
|
||||
|
||||
instance().add(obj, allocated, thread_id, &stacktrace, thread);
|
||||
}
|
||||
|
||||
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
|
||||
assert(stacktrace != NULL, "invariant");
|
||||
assert(thread_id != 0, "invariant");
|
||||
assert(thread != NULL, "invariant");
|
||||
assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
|
||||
|
||||
if (_dead_samples) {
|
||||
scavenge();
|
||||
assert(!_dead_samples, "invariant");
|
||||
@ -101,13 +189,13 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
|
||||
}
|
||||
|
||||
assert(sample != NULL, "invariant");
|
||||
assert(thread_id != 0, "invariant");
|
||||
sample->set_thread_id(thread_id);
|
||||
sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
|
||||
|
||||
if (stack_trace_id != 0) {
|
||||
sample->set_stack_trace_id(stack_trace_id);
|
||||
sample->set_stack_trace_hash(stack_trace_hash);
|
||||
const unsigned int stacktrace_hash = stacktrace->hash();
|
||||
if (stacktrace_hash != 0) {
|
||||
sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
|
||||
sample->set_stack_trace_hash(stacktrace_hash);
|
||||
}
|
||||
|
||||
sample->set_span(allocated);
|
||||
@ -118,6 +206,53 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
|
||||
_priority_queue->push(sample);
|
||||
}
|
||||
|
||||
void ObjectSampler::scavenge() {
|
||||
ObjectSample* current = _list->last();
|
||||
while (current != NULL) {
|
||||
ObjectSample* next = current->next();
|
||||
if (current->is_dead()) {
|
||||
remove_dead(current);
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
_dead_samples = false;
|
||||
}
|
||||
|
||||
void ObjectSampler::remove_dead(ObjectSample* sample) {
|
||||
assert(sample != NULL, "invariant");
|
||||
assert(sample->is_dead(), "invariant");
|
||||
ObjectSample* const previous = sample->prev();
|
||||
// push span on to previous
|
||||
if (previous != NULL) {
|
||||
_priority_queue->remove(previous);
|
||||
previous->add_span(sample->span());
|
||||
_priority_queue->push(previous);
|
||||
}
|
||||
_priority_queue->remove(sample);
|
||||
_list->release(sample);
|
||||
}
|
||||
|
||||
void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
assert(is_created(), "invariant");
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
ObjectSampler& sampler = instance();
|
||||
ObjectSample* current = sampler._list->last();
|
||||
while (current != NULL) {
|
||||
ObjectSample* next = current->next();
|
||||
if (!current->is_dead()) {
|
||||
if (is_alive->do_object_b(current->object())) {
|
||||
// The weakly referenced object is alive, update pointer
|
||||
f->do_oop(const_cast<oop*>(current->object_addr()));
|
||||
} else {
|
||||
current->set_dead();
|
||||
sampler._dead_samples = true;
|
||||
}
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
sampler._last_sweep = JfrTicks::now();
|
||||
}
|
||||
|
||||
const ObjectSample* ObjectSampler::last() const {
|
||||
return _list->last();
|
||||
}
|
||||
@ -134,50 +269,6 @@ void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
|
||||
_list->set_last_resolved(sample);
|
||||
}
|
||||
|
||||
void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
ObjectSample* current = _list->last();
|
||||
while (current != NULL) {
|
||||
ObjectSample* next = current->next();
|
||||
if (!current->is_dead()) {
|
||||
if (is_alive->do_object_b(current->object())) {
|
||||
// The weakly referenced object is alive, update pointer
|
||||
f->do_oop(const_cast<oop*>(current->object_addr()));
|
||||
} else {
|
||||
current->set_dead();
|
||||
_dead_samples = true;
|
||||
}
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
_last_sweep = JfrTicks::now();
|
||||
}
|
||||
|
||||
void ObjectSampler::remove_dead(ObjectSample* sample) {
|
||||
assert(sample != NULL, "invariant");
|
||||
assert(sample->is_dead(), "invariant");
|
||||
ObjectSample* const previous = sample->prev();
|
||||
// push span on to previous
|
||||
if (previous != NULL) {
|
||||
_priority_queue->remove(previous);
|
||||
previous->add_span(sample->span());
|
||||
_priority_queue->push(previous);
|
||||
}
|
||||
_priority_queue->remove(sample);
|
||||
_list->release(sample);
|
||||
}
|
||||
|
||||
void ObjectSampler::scavenge() {
|
||||
ObjectSample* current = _list->last();
|
||||
while (current != NULL) {
|
||||
ObjectSample* next = current->next();
|
||||
if (current->is_dead()) {
|
||||
remove_dead(current);
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
_dead_samples = false;
|
||||
}
|
||||
|
||||
int ObjectSampler::item_count() const {
|
||||
return _priority_queue->count();
|
||||
}
|
||||
@ -189,7 +280,7 @@ const ObjectSample* ObjectSampler::item_at(int index) const {
|
||||
ObjectSample* ObjectSampler::item_at(int index) {
|
||||
return const_cast<ObjectSample*>(
|
||||
const_cast<const ObjectSampler*>(this)->item_at(index)
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
const JfrTicks& ObjectSampler::last_sweep() const {
|
||||
|
@ -28,7 +28,10 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "jfr/utilities/jfrTime.hpp"
|
||||
|
||||
typedef u8 traceid;
|
||||
|
||||
class BoolObjectClosure;
|
||||
class JfrStackTrace;
|
||||
class OopClosure;
|
||||
class ObjectSample;
|
||||
class ObjectSampler;
|
||||
@ -40,11 +43,13 @@ class Thread;
|
||||
// making sure the samples are evenly distributed as
|
||||
// new entries are added and removed.
|
||||
class ObjectSampler : public CHeapObj<mtTracing> {
|
||||
friend class EventEmitter;
|
||||
friend class JfrRecorderService;
|
||||
friend class LeakProfiler;
|
||||
friend class ObjectSampleCheckpoint;
|
||||
friend class StartOperation;
|
||||
friend class StopOperation;
|
||||
friend class EmitEventOperation;
|
||||
friend class ObjectSampleCheckpoint;
|
||||
friend class WriteObjectSampleStacktrace;
|
||||
private:
|
||||
SamplePriorityQueue* _priority_queue;
|
||||
SampleList* _list;
|
||||
@ -52,20 +57,33 @@ class ObjectSampler : public CHeapObj<mtTracing> {
|
||||
size_t _total_allocated;
|
||||
size_t _threshold;
|
||||
size_t _size;
|
||||
volatile int _tryLock;
|
||||
bool _dead_samples;
|
||||
|
||||
// Lifecycle
|
||||
explicit ObjectSampler(size_t size);
|
||||
~ObjectSampler();
|
||||
static bool create(size_t size);
|
||||
static bool is_created();
|
||||
static ObjectSampler* sampler();
|
||||
static void destroy();
|
||||
|
||||
void add(HeapWord* object, size_t size, JavaThread* thread);
|
||||
void remove_dead(ObjectSample* sample);
|
||||
// For operations that require exclusive access (non-safepoint)
|
||||
static ObjectSampler* acquire();
|
||||
static void release();
|
||||
|
||||
// Stacktrace
|
||||
static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
|
||||
traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
|
||||
|
||||
// Sampling
|
||||
static void sample(HeapWord* object, size_t size, JavaThread* thread);
|
||||
void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
|
||||
void scavenge();
|
||||
void remove_dead(ObjectSample* sample);
|
||||
|
||||
// Called by GC
|
||||
void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
|
||||
static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
|
||||
|
||||
public:
|
||||
const ObjectSample* item_at(int index) const;
|
||||
ObjectSample* item_at(int index);
|
||||
int item_count() const;
|
||||
|
@ -25,35 +25,18 @@
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
|
||||
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
|
||||
|
||||
// Safepoint operation for starting leak profiler object sampler
|
||||
class StartOperation : public VM_Operation {
|
||||
// Safepoint operation for creating and starting the leak profiler object sampler
|
||||
class StartOperation : public OldObjectVMOperation {
|
||||
private:
|
||||
jlong _sample_count;
|
||||
int _sample_count;
|
||||
public:
|
||||
StartOperation(jlong sample_count) :
|
||||
_sample_count(sample_count) {
|
||||
}
|
||||
|
||||
Mode evaluation_mode() const {
|
||||
return _safepoint;
|
||||
}
|
||||
|
||||
VMOp_Type type() const {
|
||||
return VMOp_GC_HeapInspection;
|
||||
}
|
||||
StartOperation(int sample_count) : _sample_count(sample_count) {}
|
||||
|
||||
virtual void doit() {
|
||||
assert(!LeakProfiler::is_running(), "invariant");
|
||||
jint queue_size = JfrOptionSet::old_object_queue_size();
|
||||
LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
|
||||
log_trace(jfr, system)( "Object sampling started");
|
||||
ObjectSampler::create(_sample_count);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -25,31 +25,14 @@
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
|
||||
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
|
||||
|
||||
// Safepoint operation for stopping leak profiler object sampler
|
||||
class StopOperation : public VM_Operation {
|
||||
// Safepoint operation for stopping and destroying the leak profiler object sampler
|
||||
class StopOperation : public OldObjectVMOperation {
|
||||
public:
|
||||
StopOperation() {}
|
||||
|
||||
Mode evaluation_mode() const {
|
||||
return _safepoint;
|
||||
}
|
||||
|
||||
VMOp_Type type() const {
|
||||
return VMOp_GC_HeapInspection;
|
||||
}
|
||||
|
||||
virtual void doit() {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
ObjectSampler* object_sampler = LeakProfiler::object_sampler();
|
||||
delete object_sampler;
|
||||
LeakProfiler::set_object_sampler(NULL);
|
||||
log_trace(jfr, system)( "Object sampling stopped");
|
||||
ObjectSampler::destroy();
|
||||
}
|
||||
};
|
||||
|
||||
|
41
src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp
Normal file
41
src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
|
||||
|
||||
#include "runtime/vmOperations.hpp"
|
||||
|
||||
class OldObjectVMOperation : public VM_Operation {
|
||||
public:
|
||||
Mode evaluation_mode() const {
|
||||
return _safepoint;
|
||||
}
|
||||
|
||||
VMOp_Type type() const {
|
||||
return VMOp_JFROldObject;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
|
@ -311,7 +311,7 @@ void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
|
||||
if (LeakProfiler::is_running()) {
|
||||
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
|
||||
type_set.write(writer, &leakp_writer);
|
||||
ObjectSampleCheckpoint::install(leakp_writer, true, true);
|
||||
ObjectSampleCheckpoint::install(leakp_writer, true);
|
||||
return;
|
||||
}
|
||||
type_set.write(writer, NULL);
|
||||
@ -319,10 +319,10 @@ void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
|
||||
|
||||
void TypeSet::serialize(JfrCheckpointWriter& writer) {
|
||||
TypeSetSerialization type_set(false);
|
||||
if (LeakProfiler::is_suspended()) {
|
||||
if (LeakProfiler::is_running()) {
|
||||
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
|
||||
type_set.write(writer, &leakp_writer);
|
||||
ObjectSampleCheckpoint::install(leakp_writer, false, true);
|
||||
ObjectSampleCheckpoint::install(leakp_writer, false);
|
||||
return;
|
||||
}
|
||||
type_set.write(writer, NULL);
|
||||
|
@ -194,9 +194,6 @@ bool JfrRecorder::on_vm_start() {
|
||||
if (!validate_recording_options(thread)) {
|
||||
return false;
|
||||
}
|
||||
if (!JfrJavaEventWriter::initialize()) {
|
||||
return false;
|
||||
}
|
||||
if (!JfrOptionSet::configure(thread)) {
|
||||
return false;
|
||||
}
|
||||
@ -246,6 +243,9 @@ bool JfrRecorder::create_components() {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
if (!create_java_event_writer()) {
|
||||
return false;
|
||||
}
|
||||
if (!create_jvmti_agent()) {
|
||||
return false;
|
||||
}
|
||||
@ -287,6 +287,10 @@ static JfrStringPool* _stringpool = NULL;
|
||||
static JfrOSInterface* _os_interface = NULL;
|
||||
static JfrThreadSampling* _thread_sampling = NULL;
|
||||
|
||||
bool JfrRecorder::create_java_event_writer() {
|
||||
return JfrJavaEventWriter::initialize();
|
||||
}
|
||||
|
||||
bool JfrRecorder::create_jvmti_agent() {
|
||||
return JfrOptionSet::allow_retransforms() ? JfrJvmtiAgent::create() : true;
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ class JfrRecorder : public JfrCHeapObj {
|
||||
private:
|
||||
static bool create_checkpoint_manager();
|
||||
static bool create_chunk_repository();
|
||||
static bool create_java_event_writer();
|
||||
static bool create_jvmti_agent();
|
||||
static bool create_os_interface();
|
||||
static bool create_post_box();
|
||||
|
@ -24,7 +24,9 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
|
||||
@ -335,6 +337,7 @@ void JfrRecorderService::prepare_for_vm_error_rotation() {
|
||||
open_new_chunk(true);
|
||||
}
|
||||
_checkpoint_manager.register_service_thread(Thread::current());
|
||||
JfrMetadataEvent::lock();
|
||||
}
|
||||
|
||||
void JfrRecorderService::open_new_chunk(bool vm_error) {
|
||||
@ -398,6 +401,11 @@ static void write_stacktrace_checkpoint(JfrStackTraceRepository& stack_trace_rep
|
||||
write_stack_trace_checkpoint.process();
|
||||
}
|
||||
|
||||
static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
|
||||
WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
|
||||
object_sample_stacktrace.process();
|
||||
}
|
||||
|
||||
static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
|
||||
WriteStringPool write_string_pool(string_pool);
|
||||
WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
|
||||
@ -418,8 +426,9 @@ static void write_stringpool_checkpoint_safepoint(JfrStringPool& string_pool, Jf
|
||||
// write checkpoint epoch transition list->
|
||||
// write stack trace checkpoint ->
|
||||
// write string pool checkpoint ->
|
||||
// write storage ->
|
||||
// release stream lock
|
||||
// write object sample stacktraces ->
|
||||
// write storage ->
|
||||
// release stream lock
|
||||
//
|
||||
void JfrRecorderService::pre_safepoint_write() {
|
||||
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
|
||||
@ -428,6 +437,13 @@ void JfrRecorderService::pre_safepoint_write() {
|
||||
_checkpoint_manager.write_epoch_transition_mspace();
|
||||
write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
|
||||
write_stringpool_checkpoint(_string_pool, _chunkwriter);
|
||||
if (LeakProfiler::is_running()) {
|
||||
// Exclusive access to the object sampler instance.
|
||||
// The sampler is released (unlocked) later in post_safepoint_write.
|
||||
ObjectSampler* const sampler = ObjectSampler::acquire();
|
||||
assert(sampler != NULL, "invariant");
|
||||
write_object_sample_stacktrace(sampler, _stack_trace_repository);
|
||||
}
|
||||
_storage.write();
|
||||
}
|
||||
|
||||
@ -436,16 +452,10 @@ void JfrRecorderService::invoke_safepoint_write() {
|
||||
VMThread::execute(&safepoint_task);
|
||||
}
|
||||
|
||||
static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
|
||||
WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
|
||||
object_sample_stacktrace.process();
|
||||
}
|
||||
|
||||
//
|
||||
// safepoint write sequence
|
||||
//
|
||||
// lock stream lock ->
|
||||
// write object sample stacktraces ->
|
||||
// write stacktrace repository ->
|
||||
// write string pool ->
|
||||
// write safepoint dependent types ->
|
||||
@ -458,7 +468,6 @@ static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_
|
||||
void JfrRecorderService::safepoint_write() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
|
||||
write_object_sample_stacktrace(_stack_trace_repository);
|
||||
write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
|
||||
write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
|
||||
_checkpoint_manager.write_safepoint_types();
|
||||
@ -478,13 +487,14 @@ static int64_t write_metadata_event(JfrChunkWriter& chunkwriter) {
|
||||
//
|
||||
// post-safepoint write sequence
|
||||
//
|
||||
// lock stream lock ->
|
||||
// write type set ->
|
||||
// write checkpoints ->
|
||||
// write metadata event ->
|
||||
// write chunk header ->
|
||||
// close chunk fd ->
|
||||
// release stream lock
|
||||
// write type set ->
|
||||
// release object sampler ->
|
||||
// lock stream lock ->
|
||||
// write checkpoints ->
|
||||
// write metadata event ->
|
||||
// write chunk header ->
|
||||
// close chunk fd ->
|
||||
// release stream lock
|
||||
//
|
||||
void JfrRecorderService::post_safepoint_write() {
|
||||
assert(_chunkwriter.is_valid(), "invariant");
|
||||
@ -493,6 +503,11 @@ void JfrRecorderService::post_safepoint_write() {
|
||||
// already tagged artifacts for the previous epoch. We can accomplish this concurrently
|
||||
// with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
|
||||
_checkpoint_manager.write_type_set();
|
||||
if (LeakProfiler::is_running()) {
|
||||
// The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
|
||||
// Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
|
||||
ObjectSampler::release();
|
||||
}
|
||||
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
|
||||
// serialize any outstanding checkpoint memory
|
||||
_checkpoint_manager.write();
|
||||
@ -512,11 +527,9 @@ void JfrRecorderService::vm_error_rotation() {
|
||||
void JfrRecorderService::finalize_current_chunk_on_vm_error() {
|
||||
assert(_chunkwriter.is_valid(), "invariant");
|
||||
pre_safepoint_write();
|
||||
JfrMetadataEvent::lock();
|
||||
// Do not attempt safepoint dependent operations during emergency dump.
|
||||
// Optimistically write tagged artifacts.
|
||||
_checkpoint_manager.shift_epoch();
|
||||
_checkpoint_manager.write_type_set();
|
||||
// update time
|
||||
_chunkwriter.time_stamp_chunk_now();
|
||||
post_safepoint_write();
|
||||
|
@ -164,7 +164,13 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
|
||||
return instance().add_trace(stacktrace);
|
||||
traceid tid = instance().add_trace(stacktrace);
|
||||
if (tid == 0) {
|
||||
stacktrace.resolve_linenos();
|
||||
tid = instance().add_trace(stacktrace);
|
||||
}
|
||||
assert(tid != 0, "invariant");
|
||||
return tid;
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
|
||||
@ -187,54 +193,29 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
|
||||
return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
|
||||
assert(thread == Thread::current(), "invariant");
|
||||
JfrThreadLocal* const tl = thread->jfr_thread_local();
|
||||
assert(tl != NULL, "invariant");
|
||||
|
||||
if (tl->has_cached_stack_trace()) {
|
||||
*hash = tl->cached_stack_trace_hash();
|
||||
return tl->cached_stack_trace_id();
|
||||
}
|
||||
if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
|
||||
return 0;
|
||||
}
|
||||
JfrStackFrame* frames = tl->stackframes();
|
||||
if (frames == NULL) {
|
||||
// pending oom
|
||||
return 0;
|
||||
}
|
||||
assert(frames != NULL, "invariant");
|
||||
assert(tl->stackframes() == frames, "invariant");
|
||||
return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
|
||||
JfrStackTrace stacktrace(frames, max_frames);
|
||||
if (!stacktrace.record_safe(thread, skip)) {
|
||||
return 0;
|
||||
}
|
||||
traceid tid = add(stacktrace);
|
||||
if (tid == 0) {
|
||||
stacktrace.resolve_linenos();
|
||||
tid = add(stacktrace);
|
||||
}
|
||||
return tid;
|
||||
return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
|
||||
assert(hash != NULL && *hash == 0, "invariant");
|
||||
JfrStackTrace stacktrace(frames, max_frames);
|
||||
if (!stacktrace.record_safe(thread, skip, true)) {
|
||||
return 0;
|
||||
traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
|
||||
assert(stacktrace != NULL, "invariant");
|
||||
assert(thread != NULL, "invariant");
|
||||
assert(stacktrace->hash() != 0, "invariant");
|
||||
return add(*stacktrace);
|
||||
}
|
||||
|
||||
bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
|
||||
assert(thread == Thread::current(), "invariant");
|
||||
assert(stacktrace != NULL, "invariant");
|
||||
JfrThreadLocal* const tl = thread->jfr_thread_local();
|
||||
assert(tl != NULL, "invariant");
|
||||
const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
|
||||
if (cached_stacktrace_hash != 0) {
|
||||
stacktrace->set_hash(cached_stacktrace_hash);
|
||||
return true;
|
||||
}
|
||||
traceid tid = add(stacktrace);
|
||||
if (tid == 0) {
|
||||
stacktrace.resolve_linenos();
|
||||
tid = add(stacktrace);
|
||||
}
|
||||
*hash = stacktrace._hash;
|
||||
return tid;
|
||||
return stacktrace->record_safe(thread, skip, true);
|
||||
}
|
||||
|
||||
size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
|
||||
@ -363,7 +344,7 @@ const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entr
|
||||
return trace;
|
||||
}
|
||||
|
||||
void JfrStackFrame::resolve_lineno() {
|
||||
void JfrStackFrame::resolve_lineno() const {
|
||||
assert(_method, "no method pointer");
|
||||
assert(_line == 0, "already have linenumber");
|
||||
_line = _method->line_number_from_bci(_bci);
|
||||
@ -375,7 +356,7 @@ void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
|
||||
_frames[frame_pos] = frame;
|
||||
}
|
||||
|
||||
void JfrStackTrace::resolve_linenos() {
|
||||
void JfrStackTrace::resolve_linenos() const {
|
||||
for(unsigned int i = 0; i < _nr_of_frames; i++) {
|
||||
_frames[i].resolve_lineno();
|
||||
}
|
||||
|
@ -36,9 +36,9 @@ class Method;
|
||||
|
||||
class JfrStackFrame {
|
||||
private:
|
||||
const Method* _method;
|
||||
mutable const Method* _method;
|
||||
traceid _methodid;
|
||||
int _line;
|
||||
mutable int _line;
|
||||
int _bci;
|
||||
u1 _type;
|
||||
|
||||
@ -58,7 +58,7 @@ class JfrStackFrame {
|
||||
bool equals(const JfrStackFrame& rhs) const;
|
||||
void write(JfrChunkWriter& cw) const;
|
||||
void write(JfrCheckpointWriter& cpw) const;
|
||||
void resolve_lineno();
|
||||
void resolve_lineno() const;
|
||||
};
|
||||
|
||||
class JfrStackTrace : public StackObj {
|
||||
@ -70,7 +70,7 @@ class JfrStackTrace : public StackObj {
|
||||
unsigned int _hash;
|
||||
const u4 _max_frames;
|
||||
bool _reached_root;
|
||||
bool _lineno;
|
||||
mutable bool _lineno;
|
||||
|
||||
public:
|
||||
JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
|
||||
@ -82,9 +82,10 @@ class JfrStackTrace : public StackObj {
|
||||
_lineno(false) {}
|
||||
bool record_thread(JavaThread& thread, frame& frame);
|
||||
bool record_safe(JavaThread* thread, int skip, bool leakp = false);
|
||||
void resolve_linenos();
|
||||
void resolve_linenos() const;
|
||||
void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
|
||||
void set_hash(unsigned int hash) { _hash = hash; }
|
||||
unsigned int hash() const { return _hash; }
|
||||
void set_frame(u4 frame_pos, JfrStackFrame& frame);
|
||||
void set_reached_root(bool reached_root) { _reached_root = reached_root; }
|
||||
bool full_stacktrace() const { return _reached_root; }
|
||||
@ -128,23 +129,26 @@ class JfrStackTraceRepository : public JfrCHeapObj {
|
||||
traceid _next_id;
|
||||
u4 _entries;
|
||||
|
||||
size_t write_impl(JfrChunkWriter& cw, bool clear);
|
||||
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
|
||||
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
|
||||
traceid add_trace(const JfrStackTrace& stacktrace);
|
||||
const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
|
||||
static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
|
||||
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
|
||||
|
||||
size_t write_impl(JfrChunkWriter& cw, bool clear);
|
||||
const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
|
||||
static void write_metadata(JfrCheckpointWriter& cpw);
|
||||
|
||||
static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
|
||||
|
||||
JfrStackTraceRepository();
|
||||
static JfrStackTraceRepository& instance();
|
||||
public:
|
||||
static JfrStackTraceRepository* create();
|
||||
bool initialize();
|
||||
static void destroy();
|
||||
|
||||
static JfrStackTraceRepository& instance();
|
||||
|
||||
public:
|
||||
static traceid add(const JfrStackTrace& stacktrace);
|
||||
static traceid record(Thread* thread, int skip = 0);
|
||||
static traceid record(Thread* thread, int skip, unsigned int* hash);
|
||||
traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
|
||||
size_t write(JfrChunkWriter& cw, bool clear);
|
||||
size_t clear();
|
||||
|
@ -48,10 +48,12 @@ void jfr_clear_stacktrace(Thread* t);
|
||||
|
||||
template <typename Event>
|
||||
class JfrConditionalFlush {
|
||||
protected:
|
||||
bool _enabled;
|
||||
public:
|
||||
typedef JfrBuffer Type;
|
||||
JfrConditionalFlush(Thread* t) {
|
||||
if (jfr_is_event_enabled(Event::eventId)) {
|
||||
JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) {
|
||||
if (_enabled) {
|
||||
jfr_conditional_flush(Event::eventId, sizeof(Event), t);
|
||||
}
|
||||
}
|
||||
@ -63,7 +65,7 @@ class JfrConditionalFlushWithStacktrace : public JfrConditionalFlush<Event> {
|
||||
bool _owner;
|
||||
public:
|
||||
JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
|
||||
if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
|
||||
if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
|
||||
_owner = jfr_save_stacktrace(t);
|
||||
}
|
||||
}
|
||||
|
@ -150,9 +150,7 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const {
|
||||
|
||||
JfrStackFrame* JfrThreadLocal::install_stackframes() const {
|
||||
assert(_stackframes == NULL, "invariant");
|
||||
_stackdepth = (u4)JfrOptionSet::stackdepth();
|
||||
guarantee(_stackdepth > 0, "Stackdepth must be > 0");
|
||||
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
|
||||
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
|
||||
return _stackframes;
|
||||
}
|
||||
|
||||
@ -163,3 +161,7 @@ ByteSize JfrThreadLocal::trace_id_offset() {
|
||||
ByteSize JfrThreadLocal::java_event_writer_offset() {
|
||||
return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
|
||||
}
|
||||
|
||||
u4 JfrThreadLocal::stackdepth() const {
|
||||
return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
|
||||
}
|
||||
|
@ -113,9 +113,7 @@ class JfrThreadLocal {
|
||||
_stackframes = frames;
|
||||
}
|
||||
|
||||
u4 stackdepth() const {
|
||||
return _stackdepth;
|
||||
}
|
||||
u4 stackdepth() const;
|
||||
|
||||
void set_stackdepth(u4 depth) {
|
||||
_stackdepth = depth;
|
||||
|
@ -135,8 +135,7 @@ static bool setup_event_writer_offsets(TRAPS) {
|
||||
bool JfrJavaEventWriter::initialize() {
|
||||
static bool initialized = false;
|
||||
if (!initialized) {
|
||||
Thread* thread = Thread::current();
|
||||
initialized = setup_event_writer_offsets(thread);
|
||||
initialized = setup_event_writer_offsets(Thread::current());
|
||||
}
|
||||
return initialized;
|
||||
}
|
||||
@ -155,6 +154,7 @@ jboolean JfrJavaEventWriter::flush(jobject writer, jint used, jint requested, Ja
|
||||
// large enough to accommodate the "requested size".
|
||||
const bool is_valid = buffer->free_size() >= (size_t)(used + requested);
|
||||
u1* const new_current_position = is_valid ? buffer->pos() + used : buffer->pos();
|
||||
assert(start_pos_offset != invalid_offset, "invariant");
|
||||
w->long_field_put(start_pos_offset, (jlong)buffer->pos());
|
||||
w->long_field_put(current_pos_offset, (jlong)new_current_position);
|
||||
// only update java writer if underlying memory changed
|
||||
|
@ -33,13 +33,14 @@ class Thread;
|
||||
|
||||
class JfrJavaEventWriter : AllStatic {
|
||||
friend class JfrCheckpointThreadClosure;
|
||||
friend class JfrJavaEventWriterNotifyOperation;
|
||||
friend class JfrJavaEventWriterNotificationClosure;
|
||||
friend class JfrJavaEventWriterNotifyOperation;
|
||||
friend class JfrRecorder;
|
||||
private:
|
||||
static bool initialize();
|
||||
static void notify(JavaThread* jt);
|
||||
|
||||
public:
|
||||
static bool initialize();
|
||||
static void notify();
|
||||
static jobject event_writer(Thread* t);
|
||||
static jobject new_event_writer(TRAPS);
|
||||
|
@ -170,7 +170,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
case Op_LoadI:
|
||||
case Op_LoadL:
|
||||
case Op_LoadP:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
case Op_LoadN:
|
||||
case Op_LoadS:
|
||||
case Op_LoadKlass:
|
||||
|
@ -2494,9 +2494,11 @@ uint IdealLoopTree::est_loop_clone_sz(uint factor) const {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add data (x1.5) and control (x1.0) count to estimate iff both are > 0.
|
||||
// Add data and control count (x2.0) to estimate iff both are > 0. This is
|
||||
// a rather pessimistic estimate for the most part, in particular for some
|
||||
// complex loops, but still not enough to capture all loops.
|
||||
if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
|
||||
estimate += ctrl_edge_out_cnt + data_edge_out_cnt + data_edge_out_cnt / 2;
|
||||
estimate += 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
|
||||
}
|
||||
|
||||
return estimate;
|
||||
@ -4292,7 +4294,6 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
|
||||
case Op_LoadL:
|
||||
case Op_LoadS:
|
||||
case Op_LoadP:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
case Op_LoadN:
|
||||
case Op_LoadRange:
|
||||
case Op_LoadD_unaligned:
|
||||
|
@ -675,6 +675,7 @@ public:
|
||||
DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
|
||||
DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
|
||||
DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
|
||||
DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7)
|
||||
|
||||
DEFINE_CLASS_ID(Proj, Node, 3)
|
||||
DEFINE_CLASS_ID(CatchProj, Proj, 0)
|
||||
@ -688,7 +689,6 @@ public:
|
||||
DEFINE_CLASS_ID(Mem, Node, 4)
|
||||
DEFINE_CLASS_ID(Load, Mem, 0)
|
||||
DEFINE_CLASS_ID(LoadVector, Load, 0)
|
||||
DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
|
||||
DEFINE_CLASS_ID(Store, Mem, 1)
|
||||
DEFINE_CLASS_ID(StoreVector, Store, 0)
|
||||
DEFINE_CLASS_ID(LoadStore, Mem, 2)
|
||||
|
@ -297,7 +297,6 @@ void VectorNode::vector_operands(Node* n, uint* start, uint* end) {
|
||||
case Op_LoadI: case Op_LoadL:
|
||||
case Op_LoadF: case Op_LoadD:
|
||||
case Op_LoadP: case Op_LoadN:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
*start = 0;
|
||||
*end = 0; // no vector operands
|
||||
break;
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
@ -643,6 +644,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP)) {
|
||||
// Don't bother reporting event or time for this very short operation.
|
||||
// To have any utility we'd also want to report whether needed.
|
||||
OopStorage::trigger_cleanup_if_needed();
|
||||
}
|
||||
|
||||
_subtasks.all_tasks_completed(_num_workers);
|
||||
}
|
||||
};
|
||||
|
@ -77,6 +77,7 @@ class SafepointSynchronize : AllStatic {
|
||||
SAFEPOINT_CLEANUP_STRING_TABLE_REHASH,
|
||||
SAFEPOINT_CLEANUP_CLD_PURGE,
|
||||
SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE,
|
||||
SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP,
|
||||
// Leave this one last.
|
||||
SAFEPOINT_CLEANUP_NUM_TASKS
|
||||
};
|
||||
|
@ -83,27 +83,9 @@ void ServiceThread::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
static bool needs_oopstorage_cleanup(OopStorage* const* storages,
|
||||
bool* needs_cleanup,
|
||||
size_t size) {
|
||||
bool any_needs_cleanup = false;
|
||||
static void cleanup_oopstorages(OopStorage* const* storages, size_t size) {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
assert(!needs_cleanup[i], "precondition");
|
||||
if (storages[i]->needs_delete_empty_blocks()) {
|
||||
needs_cleanup[i] = true;
|
||||
any_needs_cleanup = true;
|
||||
}
|
||||
}
|
||||
return any_needs_cleanup;
|
||||
}
|
||||
|
||||
static void cleanup_oopstorages(OopStorage* const* storages,
|
||||
const bool* needs_cleanup,
|
||||
size_t size) {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
if (needs_cleanup[i]) {
|
||||
storages[i]->delete_empty_blocks();
|
||||
}
|
||||
storages[i]->delete_empty_blocks();
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,7 +108,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
bool resolved_method_table_work = false;
|
||||
bool protection_domain_table_work = false;
|
||||
bool oopstorage_work = false;
|
||||
bool oopstorages_cleanup[oopstorage_count] = {}; // Zero (false) initialize.
|
||||
JvmtiDeferredEvent jvmti_event;
|
||||
{
|
||||
// Need state transition ThreadBlockInVM so that this thread
|
||||
@ -152,10 +133,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
(symboltable_work = SymbolTable::has_work()) |
|
||||
(resolved_method_table_work = ResolvedMethodTable::has_work()) |
|
||||
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
|
||||
(oopstorage_work = needs_oopstorage_cleanup(oopstorages,
|
||||
oopstorages_cleanup,
|
||||
oopstorage_count)))
|
||||
|
||||
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()))
|
||||
== 0) {
|
||||
// Wait until notified that there is some work to do.
|
||||
ml.wait();
|
||||
@ -199,7 +177,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
}
|
||||
|
||||
if (oopstorage_work) {
|
||||
cleanup_oopstorages(oopstorages, oopstorages_cleanup, oopstorage_count);
|
||||
cleanup_oopstorages(oopstorages, oopstorage_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,6 +128,7 @@
|
||||
template(ScavengeMonitors) \
|
||||
template(PrintMetadata) \
|
||||
template(GTestExecuteAtSafepoint) \
|
||||
template(JFROldObject) \
|
||||
|
||||
class VM_Operation: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
|
@ -880,7 +880,7 @@ public:
|
||||
return "High: Switches the VM into Java debug mode.";
|
||||
}
|
||||
static const JavaPermission permission() {
|
||||
JavaPermission p = { "java.lang.management.ManagementPermission", "monitor", NULL };
|
||||
JavaPermission p = { "java.lang.management.ManagementPermission", "control", NULL };
|
||||
return p;
|
||||
}
|
||||
static int num_arguments() { return 0; }
|
||||
|
@ -67,7 +67,7 @@ questions.
|
||||
the optional part of the <em>JMX Remote API</em>
|
||||
are not included in the <em>Java SE Platform</em>
|
||||
but are available from the <em>JMX Remote API
|
||||
<a href="http://java.sun.com/products/JavaManagement/download.html">
|
||||
<a href="https://www.oracle.com/technetwork/java/javasebusiness/downloads/java-archive-downloads-java-plat-419418.html">
|
||||
Reference Implementation</a></em>.</p>
|
||||
|
||||
|
||||
|
@ -197,6 +197,26 @@ public class Checksum {
|
||||
usage);
|
||||
}
|
||||
|
||||
// =============== ATTENTION! Use with care ==================
|
||||
// According to https://tools.ietf.org/html/rfc3961#section-6.1,
|
||||
// An unkeyed checksum should only be used "in limited circumstances
|
||||
// where the lack of a key does not provide a window for an attack,
|
||||
// preferably as part of an encrypted message".
|
||||
public boolean verifyAnyChecksum(byte[] data, EncryptionKey key,
|
||||
int usage)
|
||||
throws KdcErrException, KrbCryptoException {
|
||||
CksumType cksumEngine = CksumType.getInstance(cksumType);
|
||||
if (!cksumEngine.isSafe()) {
|
||||
return cksumEngine.verifyChecksum(data, checksum);
|
||||
} else {
|
||||
return cksumEngine.verifyKeyedChecksum(data,
|
||||
data.length,
|
||||
key.getBytes(),
|
||||
checksum,
|
||||
usage);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
public Checksum(byte[] data) throws KdcErrException, KrbCryptoException {
|
||||
this(Checksum.CKSUMTYPE_DEFAULT, data);
|
||||
|
@ -158,8 +158,10 @@ abstract class KrbKdcRep {
|
||||
Checksum repCksum = new Checksum(
|
||||
new DerInputStream(
|
||||
pa.getValue()).getDerValue());
|
||||
// The checksum is inside encKDCRepPart so we don't
|
||||
// care if it's keyed or not.
|
||||
repPaReqEncPaRepValid =
|
||||
repCksum.verifyKeyedChecksum(
|
||||
repCksum.verifyAnyChecksum(
|
||||
req.asn1Encode(), replyKey,
|
||||
KeyUsage.KU_AS_REQ);
|
||||
} catch (Exception e) {
|
||||
|
@ -156,6 +156,11 @@ public abstract class CksumType {
|
||||
public abstract byte[] calculateKeyedChecksum(byte[] data, int size,
|
||||
byte[] key, int usage) throws KrbCryptoException;
|
||||
|
||||
public boolean verifyChecksum(byte[] data, byte[] checksum)
|
||||
throws KrbCryptoException {
|
||||
throw new UnsupportedOperationException("Not supported");
|
||||
}
|
||||
|
||||
public abstract boolean verifyKeyedChecksum(byte[] data, int size,
|
||||
byte[] key, byte[] checksum, int usage) throws KrbCryptoException;
|
||||
|
||||
|
@ -101,4 +101,14 @@ public final class RsaMd5CksumType extends CksumType {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean verifyChecksum(byte[] data, byte[] checksum)
|
||||
throws KrbCryptoException {
|
||||
try {
|
||||
byte[] calculated = MessageDigest.getInstance("MD5").digest(data);
|
||||
return CksumType.isChecksumEqual(calculated, checksum);
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -775,9 +775,9 @@ public class JavacParser implements Parser {
|
||||
|
||||
public JCExpression unannotatedType(boolean allowVar) {
|
||||
JCExpression result = term(TYPE);
|
||||
Name restrictedTypeName;
|
||||
Name restrictedTypeName = restrictedTypeName(result, !allowVar);
|
||||
|
||||
if (!allowVar && (restrictedTypeName = restrictedTypeName(result, true)) != null) {
|
||||
if (restrictedTypeName != null && (!allowVar || restrictedTypeName != names.var)) {
|
||||
syntaxError(result.pos, Errors.RestrictedTypeNotAllowedHere(restrictedTypeName));
|
||||
}
|
||||
|
||||
|
@ -72,3 +72,11 @@ serviceability/sa/TestPrintMdo.java 8216181 generic-all
|
||||
serviceability/sa/TestRevPtrsForInvokeDynamic.java 8216181 generic-all
|
||||
serviceability/sa/TestType.java 8216181 generic-all
|
||||
serviceability/sa/TestUniverse.java 8216181 generic-all
|
||||
|
||||
compiler/intrinsics/sha/sanity/TestSHA256MultiBlockIntrinsics.java 8167430 generic-all
|
||||
compiler/intrinsics/sha/sanity/TestSHA1Intrinsics.java 8167430 generic-all
|
||||
compiler/intrinsics/sha/sanity/TestSHA512Intrinsics.java 8167430 generic-all
|
||||
compiler/intrinsics/sha/sanity/TestSHA256Intrinsics.java 8167430 generic-all
|
||||
compiler/intrinsics/sha/sanity/TestSHA1MultiBlockIntrinsics.java 8167430 generic-all
|
||||
compiler/intrinsics/sha/sanity/TestSHA512MultiBlockIntrinsics.java 8167430 generic-all
|
||||
|
||||
|
@ -810,7 +810,7 @@ jint JNICALL heapReferenceCallback(
|
||||
}
|
||||
break;
|
||||
|
||||
case JVMTI_REFERENCE_ARRAY_ELEMENT:
|
||||
case JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT:
|
||||
case JVMTI_HEAP_REFERENCE_JNI_GLOBAL:
|
||||
case JVMTI_HEAP_REFERENCE_SYSTEM_CLASS:
|
||||
case JVMTI_HEAP_REFERENCE_MONITOR:
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8223305
|
||||
* @bug 8223305 8226522
|
||||
* @summary Verify correct warnings w.r.t. yield
|
||||
* @compile/ref=WarnWrongYieldTest.out -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WarnWrongYieldTest.java
|
||||
*/
|
||||
@ -159,4 +159,12 @@ public class WarnWrongYieldTest {
|
||||
//OK - yield is a variable:
|
||||
yield[0] = 5;
|
||||
}
|
||||
|
||||
private void lambda() {
|
||||
SAM s = (yield y) -> {};
|
||||
}
|
||||
|
||||
interface SAM {
|
||||
public void m(yield o);
|
||||
}
|
||||
}
|
||||
|
@ -11,8 +11,11 @@ WarnWrongYieldTest.java:113:9: compiler.warn.invalid.yield
|
||||
WarnWrongYieldTest.java:118:9: compiler.warn.invalid.yield
|
||||
WarnWrongYieldTest.java:123:22: compiler.warn.invalid.yield
|
||||
WarnWrongYieldTest.java:152:24: compiler.warn.invalid.yield
|
||||
WarnWrongYieldTest.java:164:18: compiler.warn.restricted.type.not.allowed.preview: yield, 13
|
||||
WarnWrongYieldTest.java:168:23: compiler.warn.restricted.type.not.allowed.preview: yield, 13
|
||||
WarnWrongYieldTest.java:34:28: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:45:5: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:168:23: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:72:9: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:75:9: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:75:24: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
@ -22,4 +25,5 @@ WarnWrongYieldTest.java:81:30: compiler.warn.illegal.ref.to.restricted.type: yie
|
||||
WarnWrongYieldTest.java:84:27: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:84:43: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
WarnWrongYieldTest.java:153:24: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
24 warnings
|
||||
WarnWrongYieldTest.java:164:18: compiler.warn.illegal.ref.to.restricted.type: yield
|
||||
28 warnings
|
@ -23,7 +23,7 @@
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8223305
|
||||
* @bug 8223305 8226522
|
||||
* @summary Ensure proper errors are returned for yields.
|
||||
* @compile/fail/ref=WrongYieldTest.out --enable-preview -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WrongYieldTest.java
|
||||
*/
|
||||
@ -222,4 +222,12 @@ public class WrongYieldTest {
|
||||
//OK - yield is a variable:
|
||||
yield[0] = 5;
|
||||
}
|
||||
|
||||
private void lambda() {
|
||||
SAM s = (yield y) -> {};
|
||||
}
|
||||
|
||||
interface SAM {
|
||||
public void m(Object o);
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
WrongYieldTest.java:39:11: compiler.err.restricted.type.not.allowed: yield, 13
|
||||
WrongYieldTest.java:45:5: compiler.err.restricted.type.not.allowed.here: yield
|
||||
WrongYieldTest.java:123:15: compiler.err.restricted.type.not.allowed.here: yield
|
||||
WrongYieldTest.java:136:9: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:146:9: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:151:9: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:161:9: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:166:22: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:215:24: compiler.err.invalid.yield
|
||||
WrongYieldTest.java:227:18: compiler.err.restricted.type.not.allowed.here: yield
|
||||
WrongYieldTest.java:34:24: compiler.err.illegal.ref.to.restricted.type: yield
|
||||
WrongYieldTest.java:95:9: compiler.err.no.switch.expression
|
||||
WrongYieldTest.java:95:15: compiler.err.cant.resolve.location: kindname.variable, y1, , , (compiler.misc.location: kindname.class, t.WrongYieldTest, null)
|
||||
@ -26,4 +28,4 @@ WrongYieldTest.java:202:9: compiler.err.no.switch.expression
|
||||
WrongYieldTest.java:216:24: compiler.err.illegal.ref.to.restricted.type: yield
|
||||
- compiler.note.preview.filename: WrongYieldTest.java
|
||||
- compiler.note.preview.recompile
|
||||
26 errors
|
||||
28 errors
|
||||
|
Loading…
Reference in New Issue
Block a user