8263332: JFR: Dump recording from a recording stream

Reviewed-by: mgronlun
This commit is contained in:
Erik Gahlin 2021-06-02 17:25:42 +00:00
parent b7ac705dd6
commit 1ae934e09d
14 changed files with 757 additions and 93 deletions

View File

@ -37,13 +37,16 @@ static const u2 JFR_VERSION_MINOR = 1;
// strictly monotone
static jlong nanos_now() {
static jlong last = 0;
// We use javaTimeMillis so this can be correlated with
// external timestamps.
const jlong now = os::javaTimeMillis() * JfrTimeConverter::NANOS_PER_MILLISEC;
jlong seconds;
jlong nanos;
// Use same clock source as Instant.now() to ensure
// that Recording::getStopTime() returns an Instant that
// is in sync.
os::javaTimeSystemUTC(seconds, nanos);
const jlong now = seconds * 1000000000 + nanos;
if (now > last) {
last = now;
} else {
++last;
}
return last;
}
@ -124,7 +127,6 @@ int64_t JfrChunk::start_ticks() const {
}
int64_t JfrChunk::start_nanos() const {
assert(_start_nanos != 0, "invariant");
return _start_nanos;
}
@ -144,14 +146,14 @@ void JfrChunk::update_start_ticks() {
void JfrChunk::update_start_nanos() {
const jlong now = nanos_now();
assert(now > _start_nanos, "invariant");
assert(now > _last_update_nanos, "invariant");
assert(now >= _start_nanos, "invariant");
assert(now >= _last_update_nanos, "invariant");
_start_nanos = _last_update_nanos = now;
}
void JfrChunk::update_current_nanos() {
const jlong now = nanos_now();
assert(now > _last_update_nanos, "invariant");
assert(now >= _last_update_nanos, "invariant");
_last_update_nanos = now;
}

View File

@ -254,7 +254,7 @@ int64_t JfrChunkWriter::last_checkpoint_offset() const {
int64_t JfrChunkWriter::current_chunk_start_nanos() const {
assert(_chunk != NULL, "invariant");
return this->is_valid() ? _chunk->start_nanos() : invalid_time;
return _chunk->start_nanos();
}
void JfrChunkWriter::set_last_checkpoint_offset(int64_t offset) {

View File

@ -26,6 +26,7 @@
package jdk.jfr.consumer;
import java.io.IOException;
import java.nio.file.Path;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.time.Duration;
@ -33,6 +34,7 @@ import java.time.Instant;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Consumer;
import jdk.jfr.Configuration;
@ -40,6 +42,7 @@ import jdk.jfr.Event;
import jdk.jfr.EventSettings;
import jdk.jfr.EventType;
import jdk.jfr.Recording;
import jdk.jfr.RecordingState;
import jdk.jfr.internal.PlatformRecording;
import jdk.jfr.internal.PrivateAccess;
import jdk.jfr.internal.SecuritySupport;
@ -402,6 +405,41 @@ public final class RecordingStream implements AutoCloseable, EventStream {
directoryStream.startAsync(startNanos);
}
/**
* Writes recording data to a file.
* <p>
* The recording stream must be started, but not closed.
* <p>
* It's highly recommended that a max age or max size is set before
* starting the stream. Otherwise, the dump may not contain any events.
*
* @param destination the location where recording data is written, not
* {@code null}
*
* @throws IOException if the recording data can't be copied to the specified
* location, or if the stream is closed, or not started.
*
* @throws SecurityException if a security manager exists and the caller doesn't
* have {@code FilePermission} to write to the destination path
*
* @see RecordingStream#setMaxAge(Duration)
* @see RecordingStream#setMaxSize(Duration)
*/
public void dump(Path destination) throws IOException {
Objects.requireNonNull(destination);
Object recorder = PrivateAccess.getInstance().getPlatformRecorder();
synchronized (recorder) {
RecordingState state = recording.getState();
if (state == RecordingState.CLOSED) {
throw new IOException("Recording stream has been closed, no content to write");
}
if (state == RecordingState.NEW) {
throw new IOException("Recording stream has not been started, no content to write");
}
recording.dump(destination);
}
}
@Override
public void awaitTermination(Duration timeout) throws InterruptedException {
directoryStream.awaitTermination(timeout);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@ import static jdk.jfr.internal.LogTag.JFR_SYSTEM;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -62,6 +63,7 @@ public final class MetadataRepository {
private boolean staleMetadata = true;
private boolean unregistered;
private long lastUnloaded = -1;
private Instant outputChange;
public MetadataRepository() {
initializeJVMEventTypes();
@ -263,11 +265,13 @@ public final class MetadataRepository {
// Lock around setOutput ensures that other threads don't
// emit events after setOutput and unregister the event class, before a call
// to storeDescriptorInJVM
synchronized void setOutput(String filename) {
synchronized Instant setOutput(String filename) {
if (staleMetadata) {
storeDescriptorInJVM();
}
awaitUniqueTimestamp();
jvm.setOutput(filename);
long nanos = jvm.getChunkStartNanos();
if (filename != null) {
RepositoryFiles.notifyNewFile();
}
@ -278,6 +282,29 @@ public final class MetadataRepository {
}
unregistered = false;
}
return Utils.epochNanosToInstant(nanos);
}
// Each chunk needs a unique start timestamp and
// if the clock resolution is low, two chunks may
// get the same timestamp.
private void awaitUniqueTimestamp() {
if (outputChange == null) {
outputChange = Instant.now();
return;
}
while (true) {
Instant time = Instant.now();
if (!time.equals(outputChange)) {
outputChange = time;
return;
}
try {
Thread.sleep(0, 100);
} catch (InterruptedException iex) {
// ignore
}
}
}
private void unregisterUnloaded() {

View File

@ -222,14 +222,7 @@ public final class PlatformRecorder {
synchronized long start(PlatformRecording recording) {
// State can only be NEW or DELAYED because of previous checks
ZonedDateTime zdtNow = ZonedDateTime.now();
Instant now = zdtNow.toInstant();
recording.setStartTime(now);
recording.updateTimer();
Duration duration = recording.getDuration();
if (duration != null) {
recording.setStopTime(now.plus(duration));
}
Instant startTime = null;
boolean toDisk = recording.isToDisk();
boolean beginPhysical = true;
long streamInterval = recording.getStreamIntervalMillis();
@ -246,7 +239,7 @@ public final class PlatformRecorder {
if (beginPhysical) {
RepositoryChunk newChunk = null;
if (toDisk) {
newChunk = repository.newChunk(zdtNow);
newChunk = repository.newChunk();
if (EventLog.shouldLog()) {
EventLog.start();
}
@ -257,25 +250,34 @@ public final class PlatformRecorder {
currentChunk = newChunk;
jvm.beginRecording();
startNanos = jvm.getChunkStartNanos();
startTime = Utils.epochNanosToInstant(startNanos);
if (currentChunk != null) {
currentChunk.setStartTime(startTime);
}
recording.setState(RecordingState.RUNNING);
updateSettings();
recording.setStartTime(startTime);
writeMetaEvents();
} else {
RepositoryChunk newChunk = null;
if (toDisk) {
newChunk = repository.newChunk(zdtNow);
newChunk = repository.newChunk();
if (EventLog.shouldLog()) {
EventLog.start();
}
RequestEngine.doChunkEnd();
MetadataRepository.getInstance().setOutput(newChunk.getFile().toString());
startNanos = jvm.getChunkStartNanos();
String p = newChunk.getFile().toString();
startTime = MetadataRepository.getInstance().setOutput(p);
newChunk.setStartTime(startTime);
}
startNanos = jvm.getChunkStartNanos();
startTime = Utils.epochNanosToInstant(startNanos);
recording.setStartTime(startTime);
recording.setState(RecordingState.RUNNING);
updateSettings();
writeMetaEvents();
if (currentChunk != null) {
finishChunk(currentChunk, now, recording);
finishChunk(currentChunk, startTime, recording);
}
currentChunk = newChunk;
}
@ -283,12 +285,17 @@ public final class PlatformRecorder {
RequestEngine.setFlushInterval(streamInterval);
}
RequestEngine.doChunkBegin();
Duration duration = recording.getDuration();
if (duration != null) {
recording.setStopTime(startTime.plus(duration));
}
recording.updateTimer();
return startNanos;
}
synchronized void stop(PlatformRecording recording) {
RecordingState state = recording.getState();
Instant stopTime;
if (Utils.isAfter(state, RecordingState.RUNNING)) {
throw new IllegalStateException("Can't stop an already stopped recording.");
@ -296,8 +303,6 @@ public final class PlatformRecorder {
if (Utils.isBefore(state, RecordingState.RUNNING)) {
throw new IllegalStateException("Recording must be started before it can be stopped.");
}
ZonedDateTime zdtNow = ZonedDateTime.now();
Instant now = zdtNow.toInstant();
boolean toDisk = false;
boolean endPhysical = true;
long streamInterval = Long.MAX_VALUE;
@ -317,33 +322,37 @@ public final class PlatformRecorder {
if (endPhysical) {
RequestEngine.doChunkEnd();
if (recording.isToDisk()) {
if (currentChunk != null) {
if (inShutdown) {
jvm.markChunkFinal();
}
MetadataRepository.getInstance().setOutput(null);
finishChunk(currentChunk, now, null);
currentChunk = null;
if (inShutdown) {
jvm.markChunkFinal();
}
stopTime = MetadataRepository.getInstance().setOutput(null);
finishChunk(currentChunk, stopTime, null);
currentChunk = null;
} else {
// last memory
dumpMemoryToDestination(recording);
stopTime = dumpMemoryToDestination(recording);
}
jvm.endRecording();
recording.setStopTime(stopTime);
disableEvents();
} else {
RepositoryChunk newChunk = null;
RequestEngine.doChunkEnd();
updateSettingsButIgnoreRecording(recording);
String path = null;
if (toDisk) {
newChunk = repository.newChunk(zdtNow);
MetadataRepository.getInstance().setOutput(newChunk.getFile().toString());
} else {
MetadataRepository.getInstance().setOutput(null);
newChunk = repository.newChunk();
path = newChunk.getFile().toString();
}
stopTime = MetadataRepository.getInstance().setOutput(path);
if (toDisk) {
newChunk.setStartTime(stopTime);
}
recording.setStopTime(stopTime);
writeMetaEvents();
if (currentChunk != null) {
finishChunk(currentChunk, now, null);
finishChunk(currentChunk, stopTime, null);
}
currentChunk = newChunk;
RequestEngine.doChunkBegin();
@ -360,12 +369,14 @@ public final class PlatformRecorder {
}
}
private void dumpMemoryToDestination(PlatformRecording recording) {
private Instant dumpMemoryToDestination(PlatformRecording recording) {
WriteableUserPath dest = recording.getDestination();
if (dest != null) {
MetadataRepository.getInstance().setOutput(dest.getRealPathText());
Instant t = MetadataRepository.getInstance().setOutput(dest.getRealPathText());
recording.clearDestination();
return t;
}
return Instant.now();
}
private void disableEvents() {
MetadataRepository.getInstance().disableEvents();
@ -389,13 +400,14 @@ public final class PlatformRecorder {
synchronized void rotateDisk() {
ZonedDateTime now = ZonedDateTime.now();
RepositoryChunk newChunk = repository.newChunk(now);
RepositoryChunk newChunk = repository.newChunk();
RequestEngine.doChunkEnd();
MetadataRepository.getInstance().setOutput(newChunk.getFile().toString());
String path = newChunk.getFile().toString();
Instant timestamp = MetadataRepository.getInstance().setOutput(path);
newChunk.setStartTime(timestamp);
writeMetaEvents();
if (currentChunk != null) {
finishChunk(currentChunk, now.toInstant(), null);
finishChunk(currentChunk, timestamp, null);
}
currentChunk = newChunk;
RequestEngine.doChunkBegin();

View File

@ -166,7 +166,6 @@ public final class PlatformRecording implements AutoCloseable {
recorder.stop(this);
String endText = reason == null ? "" : ". Reason \"" + reason + "\".";
Logger.log(LogTag.JFR, LogLevel.INFO, "Stopped recording \"" + getName() + "\" (" + getId() + ")" + endText);
this.stopTime = Instant.now();
newState = getState();
}
WriteableUserPath dest = getDestination();
@ -317,10 +316,10 @@ public final class PlatformRecording implements AutoCloseable {
}
RecordingState state = getState();
if (state == RecordingState.CLOSED) {
throw new IOException("Recording \"" + name + "\" (id=" + id + ") has been closed, no contents to write");
throw new IOException("Recording \"" + name + "\" (id=" + id + ") has been closed, no content to write");
}
if (state == RecordingState.DELAYED || state == RecordingState.NEW) {
throw new IOException("Recording \"" + name + "\" (id=" + id + ") has not started, no contents to write");
throw new IOException("Recording \"" + name + "\" (id=" + id + ") has not started, no content to write");
}
if (state == RecordingState.STOPPED) {
PlatformRecording clone = recorder.newTemporaryRecording();

View File

@ -80,7 +80,8 @@ public final class Repository {
}
}
synchronized RepositoryChunk newChunk(ZonedDateTime timestamp) {
synchronized RepositoryChunk newChunk() {
ZonedDateTime timestamp = ZonedDateTime.now();
try {
if (!SecuritySupport.existDirectory(repository)) {
this.repository = createRepository(baseLocation);
@ -93,7 +94,7 @@ public final class Repository {
chunkFilename = ChunkFilename.newPriviliged(repository.toPath());
}
String filename = chunkFilename.next(timestamp.toLocalDateTime());
return new RepositoryChunk(new SafePath(filename), timestamp.toInstant());
return new RepositoryChunk(new SafePath(filename));
} catch (Exception e) {
String errorMsg = String.format("Could not create chunk in repository %s, %s: %s", repository, e.getClass(), e.getMessage());
Logger.log(LogTag.JFR, LogLevel.ERROR, errorMsg);

View File

@ -43,15 +43,14 @@ final class RepositoryChunk {
};
private final SafePath chunkFile;
private final Instant startTime;
private final RandomAccessFile unFinishedRAF;
private Instant endTime = null; // unfinished
private Instant startTime;
private int refCount = 0;
private long size;
RepositoryChunk(SafePath path, Instant startTime) throws Exception {
this.startTime = startTime;
RepositoryChunk(SafePath path) throws Exception {
this.chunkFile = path;
this.unFinishedRAF = SecuritySupport.createRandomAccessFile(chunkFile);
}
@ -77,6 +76,10 @@ final class RepositoryChunk {
return startTime;
}
public void setStartTime(Instant timestamp) {
this.startTime = timestamp;
}
public Instant getEndTime() {
return endTime;
}

View File

@ -841,9 +841,7 @@ public final class Utils {
}
public static Instant epochNanosToInstant(long epochNanos) {
long epochSeconds = epochNanos / 1_000_000_000L;
long nanoAdjustment = epochNanos - 1_000_000_000L * epochSeconds;
return Instant.ofEpochSecond(epochSeconds, nanoAdjustment);
return Instant.ofEpochSecond(0, epochNanos);
}
public static long timeToNanos(Instant timestamp) {

View File

@ -28,18 +28,24 @@ import java.io.Closeable;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.time.Duration;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Queue;
import jdk.jfr.internal.management.ChunkFilename;
import jdk.jfr.internal.management.ManagementSupport;
@ -49,12 +55,48 @@ final class DiskRepository implements Closeable {
static final class DiskChunk {
final Path path;
final long startTimeNanos;
final DiskRepository repository;
int referenceCount;
Instant endTime;
long size;
long endTimeNanos;
DiskChunk(Path path, long startNanos) {
DiskChunk(DiskRepository repository, Path path, long startNanos) {
this.repository = repository;
this.path = path;
this.startTimeNanos = startNanos;
this.referenceCount = 1;
}
public void acquire() {
referenceCount++;
}
public void release() {
referenceCount--;
if (referenceCount == 0) {
destroy();
}
if (referenceCount < 0) {
throw new InternalError("Reference count below zero");
}
}
private void destroy() {
try {
Files.delete(path);
} catch (IOException e) {
// Schedule for deletion later.
this.repository.deadChunks.add(this);
}
}
public boolean isDead() {
return referenceCount == 0;
}
public Path path() {
return path;
}
}
@ -77,8 +119,9 @@ final class DiskRepository implements Closeable {
static final int HEADER_SIZE = 68;
static final int HEADER_FILE_DURATION = 40;
private final Deque<DiskChunk> activeChunks = new ArrayDeque<>();
private final Deque<DiskChunk> chunks = new ArrayDeque<>();
private final Deque<DiskChunk> deadChunks = new ArrayDeque<>();
private final Deque<FileDump> fileDumps = new ArrayDeque<>();
private final boolean deleteDirectory;
private final ByteBuffer buffer = ByteBuffer.allocate(256);
private final Path directory;
@ -300,6 +343,7 @@ final class DiskRepository implements Closeable {
currentChunk.size = Files.size(currentChunk.path);
long durationNanos = buffer.getLong(HEADER_FILE_DURATION);
long endTimeNanos = currentChunk.startTimeNanos + durationNanos;
currentChunk.endTimeNanos = endTimeNanos;
currentChunk.endTime = ManagementSupport.epochNanosToInstant(endTimeNanos);
}
raf.seek(position);
@ -330,7 +374,7 @@ final class DiskRepository implements Closeable {
ZoneOffset z = OffsetDateTime.now().getOffset();
LocalDateTime d = LocalDateTime.ofEpochSecond(epochSecond, nanoOfSecond, z);
String filename = chunkFilename.next(d);
return new DiskChunk(Paths.get(filename), nanos);
return new DiskChunk(this, Paths.get(filename), nanos);
}
@Override
@ -339,7 +383,10 @@ final class DiskRepository implements Closeable {
if (raf != null) {
raf.close();
}
deadChunks.addAll(activeChunks);
for (FileDump dump: fileDumps) {
dump.close();
}
deadChunks.addAll(chunks);
if (currentChunk != null) {
deadChunks.add(currentChunk);
}
@ -368,20 +415,21 @@ final class DiskRepository implements Closeable {
return;
}
int count = 0;
while (size > maxSize && activeChunks.size() > 1) {
while (size > maxSize && chunks.size() > 1) {
removeOldestChunk();
count++;
}
cleanUpDeadChunk(count + 10);
}
private void trimToAge(Instant oldest) {
if (maxAge == null) {
return;
}
int count = 0;
while (activeChunks.size() > 1) {
DiskChunk oldestChunk = activeChunks.getLast();
while (chunks.size() > 1) {
DiskChunk oldestChunk = chunks.getLast();
if (oldestChunk.endTime.isAfter(oldest)) {
return;
}
@ -391,33 +439,35 @@ final class DiskRepository implements Closeable {
cleanUpDeadChunk(count + 10);
}
private void removeOldestChunk() {
DiskChunk chunk = chunks.poll();
chunk.release();
size -= chunk.size;
}
public synchronized void onChunkComplete(long endTimeNanos) {
int count = 0;
while (!activeChunks.isEmpty()) {
DiskChunk oldestChunk = activeChunks.peek();
while (!chunks.isEmpty()) {
DiskChunk oldestChunk = chunks.peek();
if (oldestChunk.startTimeNanos < endTimeNanos) {
removeOldestChunk();
count++;
} else {
break;
}
}
cleanUpDeadChunk(count + 10);
}
private void addChunk(DiskChunk chunk) {
if (maxAge != null) {
trimToAge(chunk.endTime.minus(maxAge));
}
activeChunks.push(chunk);
chunks.push(chunk);
size += chunk.size;
trimToSize();
}
private void removeOldestChunk() {
DiskChunk chunk = activeChunks.poll();
deadChunks.add(chunk);
size -= chunk.size;
for (FileDump fd : fileDumps) {
fd.add(chunk);
}
fileDumps.removeIf(FileDump::isComplete);
}
private void cleanUpDeadChunk(int maxCount) {
@ -425,13 +475,13 @@ final class DiskRepository implements Closeable {
Iterator<DiskChunk> iterator = deadChunks.iterator();
while (iterator.hasNext()) {
DiskChunk chunk = iterator.next();
count++;
try {
Files.delete(chunk.path);
iterator.remove();
} catch (IOException e) {
// ignore
}
count++;
if (count == maxCount) {
return;
}
@ -447,4 +497,15 @@ final class DiskRepository implements Closeable {
}
}
}
public synchronized FileDump newDump(long endTime) {
FileDump fd = new FileDump(endTime);
for (DiskChunk dc : chunks) {
fd.add(dc);
}
if (!fd.isComplete()) {
fileDumps.add(fd);
}
return fd;
}
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.management.jfr;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.ArrayDeque;
import java.util.Queue;
import jdk.management.jfr.DiskRepository.DiskChunk;
final class FileDump {
private final Queue<DiskChunk> chunks = new ArrayDeque<>();
private final long stopTimeMillis;
private boolean complete;
FileDump(long stopTimeMillis) {
this.stopTimeMillis = stopTimeMillis;
}
public synchronized void add(DiskChunk dc) {
if (isComplete()) {
return;
}
dc.acquire();
chunks.add(dc);
long endMillis = dc.endTimeNanos / 1_000_000;
if (endMillis >= stopTimeMillis) {
setComplete();
}
}
public synchronized boolean isComplete() {
return complete;
}
public synchronized void setComplete() {
complete = true;
this.notifyAll();
}
public synchronized void close() {
for (DiskChunk dc : chunks) {
dc.release();
}
chunks.clear();
complete = true;
}
private DiskChunk oldestChunk() throws InterruptedException {
while (true) {
synchronized (this) {
if (!chunks.isEmpty()) {
return chunks.poll();
}
if (complete) {
return null;
}
this.wait();
}
}
}
public void write(Path path) throws IOException, InterruptedException {
try (FileChannel out = FileChannel.open(path, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
DiskChunk chunk = null;
while ((chunk = oldestChunk()) != null) {
try (FileChannel in = FileChannel.open(chunk.path(), StandardOpenOption.READ)) {
in.transferTo(0, in.size(), out);
}
}
} finally {
close();
}
}
}

View File

@ -27,9 +27,11 @@ package jdk.management.jfr;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.time.Duration;
@ -40,6 +42,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.Future;
import java.util.function.Consumer;
import java.security.AccessControlException;
import javax.management.JMX;
@ -50,11 +53,14 @@ import jdk.jfr.Configuration;
import jdk.jfr.EventSettings;
import jdk.jfr.EventType;
import jdk.jfr.Recording;
import jdk.jfr.RecordingState;
import jdk.jfr.consumer.EventStream;
import jdk.jfr.consumer.MetadataEvent;
import jdk.jfr.consumer.RecordedEvent;
import jdk.jfr.consumer.RecordingStream;
import jdk.jfr.internal.management.EventSettingsModifier;
import jdk.jfr.internal.management.ManagementSupport;
import jdk.management.jfr.DiskRepository.DiskChunk;
import jdk.jfr.internal.management.EventByteStream;
/**
@ -146,9 +152,11 @@ public final class RemoteRecordingStream implements EventStream {
final AccessControlContext accessControllerContext;
final DiskRepository repository;
final Instant creationTime;
final Object lock = new Object();
volatile Instant startTime;
volatile Instant endTime;
volatile boolean closed;
private boolean started; // always guarded by lock
/**
* Creates an event stream that operates against a {@link MBeanServerConnection}
@ -463,10 +471,12 @@ public final class RemoteRecordingStream implements EventStream {
@Override
public void close() {
if (closed) {
return;
synchronized (lock) { // ensure one closer
if (closed) {
return;
}
closed = true;
}
closed = true;
ManagementSupport.setOnChunkCompleteHandler(stream, null);
stream.close();
try {
@ -510,33 +520,118 @@ public final class RemoteRecordingStream implements EventStream {
@Override
public void start() {
try {
synchronized (lock) { // ensure one starter
ensureStartable();
try {
mbean.startRecording(recordingId);
} catch (IllegalStateException ise) {
throw ise;
try {
mbean.startRecording(recordingId);
} catch (IllegalStateException ise) {
throw ise;
}
startDownload();
} catch (Exception e) {
ManagementSupport.logDebug(e.getMessage());
close();
return;
}
startDownload();
} catch (Exception e) {
ManagementSupport.logDebug(e.getMessage());
close();
return;
stream.start();
started = true;
}
stream.start();
}
@Override
public void startAsync() {
stream.startAsync();
synchronized (lock) { // ensure one starter
ensureStartable();
stream.startAsync();
try {
mbean.startRecording(recordingId);
startDownload();
} catch (Exception e) {
ManagementSupport.logDebug(e.getMessage());
close();
}
started = true;
}
}
private void ensureStartable() {
if (closed) {
throw new IllegalStateException("Event stream is closed");
}
if (started) {
throw new IllegalStateException("Event stream can only be started once");
}
}
/**
* Writes recording data to a file.
* <p>
* The recording stream must be started, but not closed.
* <p>
* It's highly recommended that a max age or max size is set before
* starting the stream. Otherwise, the dump may not contain any events.
*
* @param destination the location where recording data is written, not
* {@code null}
*
* @throws IOException if the recording data can't be copied to the specified
* location, or if the stream is closed, or not started.
*
* @throws SecurityException if a security manager exists and the caller doesn't
* have {@code FilePermission} to write to the destination path
*
* @see RemoteRecordingStream#setMaxAge(Duration)
* @see RemoteRecordingStream#setMaxSize(Duration)
*/
public void dump(Path destination) throws IOException {
Objects.requireNonNull(destination);
long id = -1;
try {
mbean.startRecording(recordingId);
startDownload();
FileDump fileDump;
synchronized (lock) { // ensure running state while preparing dump
if (closed) {
throw new IOException("Recording stream has been closed, no content to write");
}
if (!started) {
throw new IOException("Recording stream has not been started, no content to write");
}
// Take repository lock to prevent new data to be flushed
// client-side after clone has been created on the server.
synchronized (repository) {
id = mbean.cloneRecording(recordingId, true);
RecordingInfo ri = getRecordingInfo(mbean.getRecordings(), id);
fileDump = repository.newDump(ri.getStopTime());
}
}
// Write outside lock
fileDump.write(destination);
} catch (IOException ioe) {
throw ioe;
} catch (Exception e) {
ManagementSupport.logDebug(e.getMessage());
close();
} finally {
if (id != -1) {
try {
mbean.closeRecording(id);
} catch (Exception e) {
ManagementSupport.logDebug(e.getMessage());
close();
}
}
}
}
private RecordingInfo getRecordingInfo(List<RecordingInfo> infos, long id) throws IOException {
for (RecordingInfo info : infos) {
if (info.getId() == id) {
return info;
}
}
throw new IOException("Unable to find id of dumped recording");
}
@Override
public void awaitTermination(Duration timeout) throws InterruptedException {
stream.awaitTermination(timeout);

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jfr.api.consumer.recordingstream;
import java.io.IOException;
import java.nio.file.Path;
import java.time.Duration;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import jdk.jfr.Event;
import jdk.jfr.Name;
import jdk.jfr.consumer.RecordedEvent;
import jdk.jfr.consumer.RecordingFile;
import jdk.jfr.consumer.RecordingStream;
/**
* @test
* @summary Tests RecordingStream::dump(Path)
* @key jfr
* @requires vm.hasJFR
* @library /test/lib
* @run main/othervm jdk.jfr.api.consumer.recordingstream.TestDump
*/
public class TestDump {
@Name("DumpTest")
static class DumpEvent extends Event {
}
public static void main(String... args) throws Exception {
testUnstarted();
testClosed();
testOneDump();
testMultipleDumps();
testEventAfterDump();
}
private static void testUnstarted() throws Exception {
Path path = Path.of("recording.jfr");
var rs = new RecordingStream();
rs.setMaxAge(Duration.ofHours(1));
try {
rs.dump(path);
throw new Exception("Should not be able to dump unstarted recording");
} catch (IOException ise) {
// OK, expected
}
}
private static void testClosed() throws Exception {
Path path = Path.of("recording.jfr");
var rs = new RecordingStream();
rs.setMaxAge(Duration.ofHours(1));
rs.startAsync();
rs.close();
try {
rs.dump(path);
throw new Exception("Should not be able to dump closed recording");
} catch (IOException ise) {
// OK, expected
}
}
private static void testMultipleDumps() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
try (var rs = new RecordingStream()) {
rs.setMaxAge(Duration.ofHours(1));
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
while (latch.getCount() > 0) {
DumpEvent e = new DumpEvent();
e.commit();
latch.await(10, TimeUnit.MILLISECONDS);
}
latch.await(); // Await first event
AtomicInteger counter = new AtomicInteger();
Callable<Boolean> f = () -> {
try {
int id = counter.incrementAndGet();
Path p = Path.of("multiple-" + id + ".jfr");
rs.dump(p);
return !RecordingFile.readAllEvents(p).isEmpty();
} catch (IOException ioe) {
ioe.printStackTrace();
return false;
}
};
var service = Executors.newFixedThreadPool(3);
var f1 = service.submit(f);
var f2 = service.submit(f);
var f3 = service.submit(f);
if (!f1.get() && !f1.get() && !f3.get()) {
throw new Exception("Failed to dump multiple recordings simultanously");
}
service.shutdown();
}
}
private static void testOneDump() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
try (var rs = new RecordingStream()) {
rs.setMaxSize(5_000_000);
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
while (latch.getCount() > 0) {
DumpEvent e = new DumpEvent();
e.commit();
latch.await(10, TimeUnit.MILLISECONDS);
}
Path p = Path.of("one-dump.jfr");
rs.dump(p);
if (RecordingFile.readAllEvents(p).isEmpty()) {
throw new Exception("No events in dump");
}
}
}
private static void testEventAfterDump() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
try (var rs = new RecordingStream()) {
rs.setMaxAge(Duration.ofHours(1));
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
Path p = Path.of("after-dump.jfr");
rs.dump(p);
DumpEvent e = new DumpEvent();
e.commit();
latch.await();
}
}
}

View File

@ -0,0 +1,166 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jfr.jmx.streaming;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.nio.file.Path;
import java.time.Duration;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import jdk.jfr.Event;
import jdk.jfr.Name;
import jdk.jfr.consumer.RecordedEvent;
import jdk.jfr.consumer.RecordingFile;
import jdk.jfr.consumer.RecordingStream;
import jdk.management.jfr.RemoteRecordingStream;
/**
* @test
* @summary Tests RecordingStream::dump(Path)
* @key jfr
* @requires vm.hasJFR
* @library /test/lib
* @run main/othervm jdk.jfr.jmx.streaming.TestRemoteDump
*/
public class TestRemoteDump {
@Name("RemoteDumpTest")
static class DumpEvent extends Event {
}
public static void main(String... args) throws Exception {
testUnstarted();
testClosed();
testOneDump();
testMultipleDumps();
testEventAfterDump();
}
private static void testUnstarted() throws Exception {
Path path = Path.of("recording.jfr");
var rs = new RecordingStream();
rs.setMaxAge(Duration.ofHours(1));
try {
rs.dump(path);
throw new Exception("Should not be able to dump unstarted recording");
} catch (IOException ise) {
// OK, expected
}
}
private static void testClosed() throws Exception {
Path path = Path.of("recording.jfr");
var rs = new RecordingStream();
rs.setMaxAge(Duration.ofHours(1));
rs.startAsync();
rs.close();
try {
rs.dump(path);
throw new Exception("Should not be able to dump closed recording");
} catch (IOException ise) {
// OK, expected
}
}
private static void testMultipleDumps() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
var conn = ManagementFactory.getPlatformMBeanServer();
try (var rs = new RemoteRecordingStream(conn)) {
rs.setMaxAge(Duration.ofHours(1));
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
while (latch.getCount() > 0) {
DumpEvent e = new DumpEvent();
e.commit();
latch.await(10, TimeUnit.MILLISECONDS);
}
latch.await(); // Await first event
AtomicInteger counter = new AtomicInteger();
Callable<Boolean> f = () -> {
try {
int id = counter.incrementAndGet();
Path p = Path.of("multiple-" + id + ".jfr");
rs.dump(p);
return !RecordingFile.readAllEvents(p).isEmpty();
} catch (IOException ioe) {
ioe.printStackTrace();
return false;
}
};
var service = Executors.newFixedThreadPool(3);
var f1 = service.submit(f);
var f2 = service.submit(f);
var f3 = service.submit(f);
if (!f1.get() && !f1.get() && !f3.get()) {
throw new Exception("Failed to dump multiple recordings simultanously");
}
service.shutdown();
}
}
private static void testOneDump() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
var conn = ManagementFactory.getPlatformMBeanServer();
try (var rs = new RemoteRecordingStream(conn)) {
rs.setMaxSize(5_000_000);
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
while (latch.getCount() > 0) {
DumpEvent e = new DumpEvent();
e.commit();
latch.await(10, TimeUnit.MILLISECONDS);
}
Path p = Path.of("one-dump.jfr");
rs.dump(p);
if (RecordingFile.readAllEvents(p).isEmpty()) {
throw new Exception("No events in dump");
}
}
}
private static void testEventAfterDump() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
try (var rs = new RecordingStream()) {
rs.setMaxAge(Duration.ofHours(1));
rs.onEvent(e -> {
latch.countDown();
});
rs.startAsync();
Path p = Path.of("after-dump.jfr");
rs.dump(p);
DumpEvent e = new DumpEvent();
e.commit();
latch.await();
}
}
}