8224974: Implement JEP 352

Non-Volatile Mapped Byte Buffers

Reviewed-by: alanb, kvn, bpb, gromero, darcy, shade, bulasevich, dchuyko
This commit is contained in:
Andrew Dinn 2019-08-20 10:11:53 +01:00
parent db359f11b5
commit 047b8bfeb7
53 changed files with 1400 additions and 69 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,7 @@ BOOT_MODULES += \
jdk.management.jfr \
jdk.management.agent \
jdk.net \
jdk.nio.mapmode \
jdk.sctp \
jdk.unsupported \
#

View File

@ -2185,17 +2185,21 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
//=============================================================================
const bool Matcher::match_rule_supported(int opcode) {
switch (opcode) {
default:
break;
}
if (!has_match_rule(opcode)) {
if (!has_match_rule(opcode))
return false;
bool ret_value = true;
switch (opcode) {
case Op_CacheWB:
case Op_CacheWBPreSync:
case Op_CacheWBPostSync:
if (!VM_Version::supports_data_cache_line_flush()) {
ret_value = false;
}
break;
}
return true; // Per default match rules are supported.
return ret_value; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
@ -7769,6 +7773,47 @@ instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
// ---------------- end of volatile loads and stores ----------------
instruct cacheWB(indirect addr)
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWB addr);
ins_cost(100);
format %{"cache wb $addr" %}
ins_encode %{
assert($addr->index_position() < 0, "should be");
assert($addr$$disp == 0, "should be");
__ cache_wb(Address($addr$$base$$Register, 0));
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPreSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPreSync);
ins_cost(100);
format %{"cache wb presync" %}
ins_encode %{
__ cache_wbsync(true);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPostSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPostSync);
ins_cost(100);
format %{"cache wb postsync" %}
ins_encode %{
__ cache_wbsync(false);
%}
ins_pipe(pipe_slow); // XXX
%}
// ============================================================================
// BSWAP Instructions

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1058,12 +1058,13 @@ public:
// op1 CRn CRm op2
// IC IVAU 3 7 5 1
// DC CVAC 3 7 10 1
// DC CVAP 3 7 12 1
// DC CVAU 3 7 11 1
// DC CIVAC 3 7 14 1
// DC ZVA 3 7 4 1
// So only deal with the CRm field.
enum icache_maintenance {IVAU = 0b0101};
enum dcache_maintenance {CVAC = 0b1010, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100};
enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100};
void dc(dcache_maintenance cm, Register Rt) {
sys(0b011, 0b0111, cm, 0b001, Rt);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -5833,3 +5833,25 @@ void MacroAssembler::get_thread(Register dst) {
pop(saved_regs, sp);
}
void MacroAssembler::cache_wb(Address line) {
assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
assert(line.index() == noreg, "index should be noreg");
assert(line.offset() == 0, "offset should be 0");
// would like to assert this
// assert(line._ext.shift == 0, "shift should be zero");
if (VM_Version::supports_dcpop()) {
// writeback using clear virtual address to point of persistence
dc(Assembler::CVAP, line.base());
} else {
// no need to generate anything as Unsafe.writebackMemory should
// never invoke this stub
}
}
void MacroAssembler::cache_wbsync(bool is_pre) {
// we only need a barrier post sync
if (!is_pre) {
membar(Assembler::AnyAny);
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1344,6 +1344,9 @@ public:
spill(tmp1, true, dst_offset+8);
}
}
void cache_wb(Address line);
void cache_wbsync(bool is_pre);
};
#ifdef ASSERT

View File

@ -2350,6 +2350,44 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
address generate_data_cache_writeback() {
const Register line = c_rarg0; // address of line to write back
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback");
address start = __ pc();
__ enter();
__ cache_wb(Address(line, 0));
__ leave();
__ ret(lr);
return start;
}
address generate_data_cache_writeback_sync() {
const Register is_pre = c_rarg0; // pre or post sync
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync");
// pre wbsync is a no-op
// post wbsync translates to an sfence
Label skip;
address start = __ pc();
__ enter();
__ cbnz(is_pre, skip);
__ cache_wbsync(false);
__ bind(skip);
__ leave();
__ ret(lr);
return start;
}
void generate_arraycopy_stubs() {
address entry;
address entry_jbyte_arraycopy;
@ -5739,6 +5777,10 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
}
// data cache line writeback
StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
if (UseAESIntrinsics) {
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/macros.hpp"
#include "vm_version_aarch64.hpp"
@ -67,6 +68,7 @@ int VM_Version::_model2;
int VM_Version::_variant;
int VM_Version::_revision;
int VM_Version::_stepping;
bool VM_Version::_dcpop;
VM_Version::PsrInfo VM_Version::_psr_info = { 0, };
static BufferBlob* stub_blob;
@ -167,7 +169,8 @@ void VM_Version::get_processor_features() {
int cpu_lines = 0;
if (FILE *f = fopen("/proc/cpuinfo", "r")) {
char buf[128], *p;
// need a large buffer as the flags line may include lots of text
char buf[1024], *p;
while (fgets(buf, sizeof (buf), f) != NULL) {
if ((p = strchr(buf, ':')) != NULL) {
long v = strtol(p+1, NULL, 0);
@ -181,12 +184,25 @@ void VM_Version::get_processor_features() {
_model = v;
} else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
_revision = v;
} else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
if (strstr(p+1, "dcpop")) {
_dcpop = true;
}
}
}
}
fclose(f);
}
if (os::supports_map_sync()) {
// if dcpop is available publish data cache line flush size via
// generic field, otherwise let if default to zero thereby
// disabling writeback
if (_dcpop) {
_data_cache_line_flush_size = dcache_line;
}
}
// Enable vendor specific features
// Ampere eMAG

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ protected:
static int _variant;
static int _revision;
static int _stepping;
static bool _dcpop;
struct PsrInfo {
uint32_t dczid_el0;
uint32_t ctr_el0;
@ -106,6 +106,7 @@ public:
static int cpu_model2() { return _model2; }
static int cpu_variant() { return _variant; }
static int cpu_revision() { return _revision; }
static bool supports_dcpop() { return _dcpop; }
static ByteSize dczid_el0_offset() { return byte_offset_of(PsrInfo, dczid_el0); }
static ByteSize ctr_el0_offset() { return byte_offset_of(PsrInfo, ctr_el0); }
static bool is_zva_enabled() {

View File

@ -2274,6 +2274,14 @@ void Assembler::mfence() {
emit_int8((unsigned char)0xF0);
}
// Emit sfence instruction
void Assembler::sfence() {
NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
emit_int8(0x0F);
emit_int8((unsigned char)0xAE);
emit_int8((unsigned char)0xF8);
}
void Assembler::mov(Register dst, Register src) {
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
}
@ -8617,12 +8625,45 @@ void Assembler::cdqq() {
}
void Assembler::clflush(Address adr) {
assert(VM_Version::supports_clflush(), "should do");
prefix(adr);
emit_int8(0x0F);
emit_int8((unsigned char)0xAE);
emit_operand(rdi, adr);
}
void Assembler::clflushopt(Address adr) {
assert(VM_Version::supports_clflushopt(), "should do!");
// adr should be base reg only with no index or offset
assert(adr.index() == noreg, "index should be noreg");
assert(adr.scale() == Address::no_scale, "scale should be no_scale");
assert(adr.disp() == 0, "displacement should be 0");
// instruction prefix is 0x66
emit_int8(0x66);
prefix(adr);
// opcode family is 0x0f 0xAE
emit_int8(0x0F);
emit_int8((unsigned char)0xAE);
// extended opcode byte is 7 == rdi
emit_operand(rdi, adr);
}
void Assembler::clwb(Address adr) {
assert(VM_Version::supports_clwb(), "should do!");
// adr should be base reg only with no index or offset
assert(adr.index() == noreg, "index should be noreg");
assert(adr.scale() == Address::no_scale, "scale should be no_scale");
assert(adr.disp() == 0, "displacement should be 0");
// instruction prefix is 0x66
emit_int8(0x66);
prefix(adr);
// opcode family is 0x0f 0xAE
emit_int8(0x0F);
emit_int8((unsigned char)0xAE);
// extended opcode byte is 6 == rsi
emit_operand(rsi, adr);
}
void Assembler::cmovq(Condition cc, Register dst, Register src) {
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);

View File

@ -1028,6 +1028,8 @@ private:
void cld();
void clflush(Address adr);
void clflushopt(Address adr);
void clwb(Address adr);
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, Address src);
@ -1404,6 +1406,7 @@ private:
}
void mfence();
void sfence();
// Moves

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -9905,6 +9905,47 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
bind(done);
}
#ifdef _LP64
void MacroAssembler::cache_wb(Address line)
{
// 64 bit cpus always support clflush
assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();
// prefer clwb (writeback without evict) otherwise
// prefer clflushopt (potentially parallel writeback with evict)
// otherwise fallback on clflush (serial writeback with evict)
if (optimized) {
if (no_evict) {
clwb(line);
} else {
clflushopt(line);
}
} else {
// no need for fence when using CLFLUSH
clflush(line);
}
}
void MacroAssembler::cache_wbsync(bool is_pre)
{
assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();
// pick the correct implementation
if (!is_pre && (optimized || no_evict)) {
// need an sfence for post flush when using clflushopt or clwb
// otherwise no no need for any synchroniaztion
sfence();
}
}
#endif // _LP64
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
switch (cond) {
// Note some conditions are synonyms for others

View File

@ -1801,6 +1801,10 @@ public:
void byte_array_inflate(Register src, Register dst, Register len,
XMMRegister tmp1, Register tmp2);
#ifdef _LP64
void cache_wb(Address line);
void cache_wbsync(bool is_pre);
#endif // _LP64
};
/**

View File

@ -2909,6 +2909,45 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
address generate_data_cache_writeback() {
const Register src = c_rarg0; // source address
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback");
address start = __ pc();
__ enter();
__ cache_wb(Address(src, 0));
__ leave();
__ ret(0);
return start;
}
address generate_data_cache_writeback_sync() {
const Register is_pre = c_rarg0; // pre or post sync
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync");
// pre wbsync is a no-op
// post wbsync translates to an sfence
Label skip;
address start = __ pc();
__ enter();
__ cmpl(is_pre, 0);
__ jcc(Assembler::notEqual, skip);
__ cache_wbsync(false);
__ bind(skip);
__ leave();
__ ret(0);
return start;
}
void generate_arraycopy_stubs() {
address entry;
address entry_jbyte_arraycopy;
@ -5998,6 +6037,10 @@ address generate_avx_ghash_processBlocks() {
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
// data cache line writeback
StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
// arraycopy stubs used by compilers
generate_arraycopy_stubs();

View File

@ -35,6 +35,7 @@
#include "utilities/virtualizationSupport.hpp"
#include "vm_version_x86.hpp"
#include OS_HEADER_INLINE(os)
int VM_Version::_cpu;
int VM_Version::_model;
@ -608,6 +609,16 @@ void VM_Version::get_processor_features() {
guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
#endif
#ifdef _LP64
// assigning this field effectively enables Unsafe.writebackMemory()
// by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
// that is only implemented on x86_64 and only if the OS plays ball
if (os::supports_map_sync()) {
// publish data cache line flush size to generic field, otherwise
// let if default to zero thereby disabling writeback
_data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
}
#endif
// If the OS doesn't support SSE, we can't use this feature even if the HW does
if (!os::supports_sse())
_features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);

View File

@ -25,6 +25,7 @@
#ifndef CPU_X86_VM_VERSION_X86_HPP
#define CPU_X86_VM_VERSION_X86_HPP
#include "memory/universe.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
@ -218,7 +219,10 @@ class VM_Version : public Abstract_VM_Version {
avx512dq : 1,
: 1,
adx : 1,
: 6,
: 3,
clflushopt : 1,
clwb : 1,
: 1,
avx512pf : 1,
avx512er : 1,
avx512cd : 1,
@ -338,7 +342,11 @@ protected:
#define CPU_VAES ((uint64_t)UCONST64(0x8000000000)) // Vector AES instructions
#define CPU_VNNI ((uint64_t)UCONST64(0x10000000000)) // Vector Neural Network Instructions
enum Extended_Family {
#define CPU_FLUSH ((uint64_t)UCONST64(0x20000000000)) // flush instruction
#define CPU_FLUSHOPT ((uint64_t)UCONST64(0x40000000000)) // flushopt instruction
#define CPU_CLWB ((uint64_t)UCONST64(0x80000000000)) // clwb instruction
enum Extended_Family {
// AMD
CPU_FAMILY_AMD_11H = 0x11,
// ZX
@ -495,6 +503,14 @@ protected:
result |= CPU_CX8;
if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
result |= CPU_CMOV;
if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0)
result |= CPU_FLUSH;
#ifdef _LP64
// clflush should always be available on x86_64
// if not we are in real trouble because we rely on it
// to flush the code cache.
assert ((result & CPU_FLUSH) != 0, "clflush should be available");
#endif
if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
_cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
result |= CPU_FXSR;
@ -575,6 +591,8 @@ protected:
result |= CPU_SHA;
if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
result |= CPU_FMA;
if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0)
result |= CPU_FLUSHOPT;
// AMD|Hygon features.
if (is_amd_family()) {
@ -594,6 +612,9 @@ protected:
if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
result |= CPU_3DNOW_PREFETCH;
}
if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) {
result |= CPU_CLWB;
}
}
// ZX features.
@ -941,6 +962,44 @@ public:
return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32
}
// there are several insns to force cache line sync to memory which
// we can use to ensure mapped non-volatile memory is up to date with
// pending in-cache changes.
//
// 64 bit cpus always support clflush which writes back and evicts
// on 32 bit cpus support is recorded via a feature flag
//
// clflushopt is optional and acts like clflush except it does
// not synchronize with other memory ops. it needs a preceding
// and trailing StoreStore fence
//
// clwb is an optional, intel-specific instruction optional which
// writes back without evicting the line. it also does not
// synchronize with other memory ops. so, it also needs a preceding
// and trailing StoreStore fence.
#ifdef _LP64
static bool supports_clflush() {
// clflush should always be available on x86_64
// if not we are in real trouble because we rely on it
// to flush the code cache.
// Unfortunately, Assembler::clflush is currently called as part
// of generation of the code cache flush routine. This happens
// under Universe::init before the processor features are set
// up. Assembler::flush calls this routine to check that clflush
// is allowed. So, we give the caller a free pass if Universe init
// is still in progress.
assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available");
return true;
}
static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); }
static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); }
#else
static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); }
static bool supports_clflushopt() { return false; }
static bool supports_clwb() { return false; }
#endif // _LP64
// support functions for virtualization detection
private:
static void check_virt_cpuid(uint32_t idx, uint32_t *regs);

View File

@ -1478,6 +1478,13 @@ const bool Matcher::match_rule_supported(int opcode) {
ret_value = false;
break;
#endif
case Op_CacheWB:
case Op_CacheWBPreSync:
case Op_CacheWBPostSync:
if (!VM_Version::supports_data_cache_line_flush()) {
ret_value = false;
}
break;
}
return ret_value; // Per default match rules are supported.

View File

@ -6565,6 +6565,47 @@ instruct storeSSD(stackSlotD dst, regD src)
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWB(indirect addr)
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWB addr);
ins_cost(100);
format %{"cache wb $addr" %}
ins_encode %{
assert($addr->index_position() < 0, "should be");
assert($addr$$disp == 0, "should be");
__ cache_wb(Address($addr$$base$$Register, 0));
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPreSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPreSync);
ins_cost(100);
format %{"cache wb presync" %}
ins_encode %{
__ cache_wbsync(true);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPostSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPostSync);
ins_cost(100);
format %{"cache wb postsync" %}
ins_encode %{
__ cache_wbsync(false);
%}
ins_pipe(pipe_slow); // XXX
%}
//----------BSWAP Instructions-------------------------------------------------
instruct bytes_reverse_int(rRegI dst) %{
match(Set dst (ReverseBytesI dst));

View File

@ -4338,3 +4338,7 @@ int os::compare_file_modified_times(const char* file1, const char* file2) {
time_t t2 = get_mtime(file2);
return t1 - t2;
}
bool os::supports_map_sync() {
return false;
}

View File

@ -3803,6 +3803,10 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return n;
}
bool os::supports_map_sync() {
return false;
}
#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform

View File

@ -6185,6 +6185,10 @@ int os::compare_file_modified_times(const char* file1, const char* file2) {
return diff;
}
bool os::supports_map_sync() {
return true;
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT

View File

@ -5380,6 +5380,10 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer);
}
bool os::supports_map_sync() {
return false;
}
#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform

View File

@ -5803,3 +5803,7 @@ void os::win32::initialize_thread_ptr_offset() {
os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
NULL, NULL, NULL, NULL);
}
bool os::supports_map_sync() {
return false;
}

View File

@ -3518,6 +3518,12 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
if( strcmp(_opType,"PrefetchAllocation")==0 )
return 1;
if( strcmp(_opType,"CacheWB")==0 )
return 1;
if( strcmp(_opType,"CacheWBPreSync")==0 )
return 1;
if( strcmp(_opType,"CacheWBPostSync")==0 )
return 1;
if( _lChild ) {
const char *opType = _lChild->_opType;
for( int i=0; i<cnt; i++ )

View File

@ -64,6 +64,7 @@
#include "runtime/safepointVerifiers.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vframe.inline.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/utf8.hpp"
@ -4034,6 +4035,7 @@ private:
int _page_size;
bool _big_endian;
bool _use_unaligned_access;
int _data_cache_line_flush_size;
public:
UnsafeConstantsFixup() {
// round up values for all static final fields
@ -4041,6 +4043,7 @@ public:
_page_size = os::vm_page_size();
_big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
_use_unaligned_access = UseUnalignedAccesses;
_data_cache_line_flush_size = (int)VM_Version::data_cache_line_flush_size();
}
void do_field(fieldDescriptor* fd) {
@ -4057,6 +4060,8 @@ public:
mirror->bool_field_put(fd->offset(), _big_endian);
} else if (fd->name() == vmSymbols::use_unaligned_access_name()) {
mirror->bool_field_put(fd->offset(), _use_unaligned_access);
} else if (fd->name() == vmSymbols::data_cache_line_flush_size_name()) {
mirror->int_field_put(fd->offset(), _data_cache_line_flush_size);
} else {
assert(false, "unexpected UnsafeConstants field");
}

View File

@ -452,6 +452,7 @@
template(page_size_name, "PAGE_SIZE") \
template(big_endian_name, "BIG_ENDIAN") \
template(use_unaligned_access_name, "UNALIGNED_ACCESS") \
template(data_cache_line_flush_size_name, "DATA_CACHE_LINE_FLUSH_SIZE") \
\
/* name symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@ -480,6 +481,7 @@
template(long_int_signature, "(J)I") \
template(long_long_signature, "(J)J") \
template(long_double_signature, "(J)D") \
template(long_void_signature, "(J)V") \
template(byte_signature, "B") \
template(char_signature, "C") \
template(double_signature, "D") \
@ -1093,6 +1095,12 @@
do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\
do_intrinsic(_writeback0, jdk_internal_misc_Unsafe, writeback0_name, long_void_signature , F_RN) \
do_name( writeback0_name, "writeback0") \
do_intrinsic(_writebackPreSync0, jdk_internal_misc_Unsafe, writebackPreSync0_name, void_method_signature , F_RN) \
do_name( writebackPreSync0_name, "writebackPreSync0") \
do_intrinsic(_writebackPostSync0, jdk_internal_misc_Unsafe, writebackPostSync0_name, void_method_signature , F_RN) \
do_name( writebackPostSync0_name, "writebackPostSync0") \
do_intrinsic(_allocateInstance, jdk_internal_misc_Unsafe, allocateInstance_name, allocateInstance_signature, F_RN) \
do_name( allocateInstance_name, "allocateInstance") \
do_signature(allocateInstance_signature, "(Ljava/lang/Class;)Ljava/lang/Object;") \

View File

@ -453,6 +453,15 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_minD:
if (!Matcher::match_rule_supported(Op_MinD)) return false;
break;
case vmIntrinsics::_writeback0:
if (!Matcher::match_rule_supported(Op_CacheWB)) return false;
break;
case vmIntrinsics::_writebackPreSync0:
if (!Matcher::match_rule_supported(Op_CacheWBPreSync)) return false;
break;
case vmIntrinsics::_writebackPostSync0:
if (!Matcher::match_rule_supported(Op_CacheWBPostSync)) return false;
break;
case vmIntrinsics::_hashCode:
case vmIntrinsics::_identityHashCode:
case vmIntrinsics::_getClass:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,9 @@ macro(ReverseBytesL)
macro(ReverseBytesUS)
macro(ReverseBytesS)
macro(CProj)
macro(CacheWB)
macro(CacheWBPreSync)
macro(CacheWBPostSync)
macro(CallDynamicJava)
macro(CallJava)
macro(CallLeaf)

View File

@ -253,6 +253,8 @@ class LibraryCallKit : public GraphKit {
static bool klass_needs_init_guard(Node* kls);
bool inline_unsafe_allocate();
bool inline_unsafe_newArray(bool uninitialized);
bool inline_unsafe_writeback0();
bool inline_unsafe_writebackSync0(bool is_pre);
bool inline_unsafe_copyMemory();
bool inline_native_currentThread();
@ -755,6 +757,9 @@ bool LibraryCallKit::try_to_inline(int predicate) {
#endif
case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
case vmIntrinsics::_getLength: return inline_native_getLength();
@ -2848,6 +2853,55 @@ bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
return !ik->is_initialized();
}
//----------------------------inline_unsafe_writeback0-------------------------
// public native void Unsafe.writeback0(long address)
bool LibraryCallKit::inline_unsafe_writeback0() {
if (!Matcher::has_match_rule(Op_CacheWB)) {
return false;
}
#ifndef PRODUCT
assert(Matcher::has_match_rule(Op_CacheWBPreSync), "found match rule for CacheWB but not CacheWBPreSync");
assert(Matcher::has_match_rule(Op_CacheWBPostSync), "found match rule for CacheWB but not CacheWBPostSync");
ciSignature* sig = callee()->signature();
assert(sig->type_at(0)->basic_type() == T_LONG, "Unsafe_writeback0 address is long!");
#endif
null_check_receiver(); // null-check, then ignore
Node *addr = argument(1);
addr = new CastX2PNode(addr);
addr = _gvn.transform(addr);
Node *flush = new CacheWBNode(control(), memory(TypeRawPtr::BOTTOM), addr);
flush = _gvn.transform(flush);
set_memory(flush, TypeRawPtr::BOTTOM);
return true;
}
//----------------------------inline_unsafe_writeback0-------------------------
// public native void Unsafe.writeback0(long address)
bool LibraryCallKit::inline_unsafe_writebackSync0(bool is_pre) {
if (is_pre && !Matcher::has_match_rule(Op_CacheWBPreSync)) {
return false;
}
if (!is_pre && !Matcher::has_match_rule(Op_CacheWBPostSync)) {
return false;
}
#ifndef PRODUCT
assert(Matcher::has_match_rule(Op_CacheWB),
(is_pre ? "found match rule for CacheWBPreSync but not CacheWB"
: "found match rule for CacheWBPostSync but not CacheWB"));
#endif
null_check_receiver(); // null-check, then ignore
Node *sync;
if (is_pre) {
sync = new CacheWBPreSyncNode(control(), memory(TypeRawPtr::BOTTOM));
} else {
sync = new CacheWBPostSyncNode(control(), memory(TypeRawPtr::BOTTOM));
}
sync = _gvn.transform(sync);
set_memory(sync, TypeRawPtr::BOTTOM);
return true;
}
//----------------------------inline_unsafe_allocate---------------------------
// public native Object Unsafe.allocateInstance(Class<?> cls);
bool LibraryCallKit::inline_unsafe_allocate() {

View File

@ -1642,6 +1642,42 @@ class MergeMemStream : public StackObj {
}
};
// cachewb node for guaranteeing writeback of the cache line at a
// given address to (non-volatile) RAM
class CacheWBNode : public Node {
public:
CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return (idx == 2); }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
};
// cachewb pre sync node for ensuring that writebacks are serialised
// relative to preceding or following stores
class CacheWBPreSyncNode : public Node {
public:
CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return false; }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
};
// cachewb pre sync node for ensuring that writebacks are serialised
// relative to preceding or following stores
class CacheWBPostSyncNode : public Node {
public:
CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return false; }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
};
//------------------------------Prefetch---------------------------------------
// Allocation prefetch which may fault, TLAB size have to be adjusted.

View File

@ -44,6 +44,7 @@
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vm_version.hpp"
@ -445,6 +446,46 @@ UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject sr
}
} UNSAFE_END
UNSAFE_LEAF (void, Unsafe_WriteBack0(JNIEnv *env, jobject unsafe, jlong line)) {
assert(VM_Version::supports_data_cache_line_flush(), "should not get here");
#ifdef ASSERT
if (TraceMemoryWriteback) {
tty->print_cr("Unsafe: writeback 0x%p", addr_from_java(line));
}
#endif
assert(StubRoutines::data_cache_writeback() != NULL, "sanity");
(StubRoutines::DataCacheWriteback_stub())(addr_from_java(line));
} UNSAFE_END
static void doWriteBackSync0(bool is_pre)
{
assert(StubRoutines::data_cache_writeback_sync() != NULL, "sanity");
(StubRoutines::DataCacheWritebackSync_stub())(is_pre);
}
UNSAFE_LEAF (void, Unsafe_WriteBackPreSync0(JNIEnv *env, jobject unsafe)) {
assert(VM_Version::supports_data_cache_line_flush(), "should not get here");
#ifdef ASSERT
if (TraceMemoryWriteback) {
tty->print_cr("Unsafe: writeback pre-sync");
}
#endif
doWriteBackSync0(true);
} UNSAFE_END
UNSAFE_LEAF (void, Unsafe_WriteBackPostSync0(JNIEnv *env, jobject unsafe)) {
assert(VM_Version::supports_data_cache_line_flush(), "should not get here");
#ifdef ASSERT
if (TraceMemoryWriteback) {
tty->print_cr("Unsafe: writeback pre-sync");
}
#endif
doWriteBackSync0(false);
} UNSAFE_END
////// Random queries
static jlong find_field_offset(jclass clazz, jstring name, TRAPS) {
@ -1073,6 +1114,9 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "copyMemory0", CC "(" OBJ "J" OBJ "JJ)V", FN_PTR(Unsafe_CopyMemory0)},
{CC "copySwapMemory0", CC "(" OBJ "J" OBJ "JJJ)V", FN_PTR(Unsafe_CopySwapMemory0)},
{CC "writeback0", CC "(" "J" ")V", FN_PTR(Unsafe_WriteBack0)},
{CC "writebackPreSync0", CC "()V", FN_PTR(Unsafe_WriteBackPreSync0)},
{CC "writebackPostSync0", CC "()V", FN_PTR(Unsafe_WriteBackPostSync0)},
{CC "setMemory0", CC "(" OBJ "JJB)V", FN_PTR(Unsafe_SetMemory0)},
{CC "defineAnonymousClass0", CC "(" DAC_Args ")" CLS, FN_PTR(Unsafe_DefineAnonymousClass0)},

View File

@ -2440,6 +2440,9 @@ const size_t minimumSymbolTableSize = 1024;
diagnostic(bool, UseSwitchProfiling, true, \
"leverage profiling for table/lookup switch") \
\
develop(bool, TraceMemoryWriteback, false, \
"Trace memory writeback operations") \
\
JFR_ONLY(product(bool, FlightRecorder, false, \
"(Deprecated) Enable Flight Recorder")) \
\

View File

@ -823,6 +823,9 @@ class os: AllStatic {
static char** split_path(const char* path, int* n);
// support for mapping non-volatile memory using MAP_SYNC
static bool supports_map_sync();
// Extensions
#include "runtime/os_ext.hpp"

View File

@ -113,6 +113,9 @@ address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR
address StubRoutines::_zero_aligned_words = CAST_FROM_FN_PTR(address, Copy::zero_to_words);
address StubRoutines::_data_cache_writeback = NULL;
address StubRoutines::_data_cache_writeback_sync = NULL;
address StubRoutines::_checkcast_arraycopy = NULL;
address StubRoutines::_checkcast_arraycopy_uninit = NULL;
address StubRoutines::_unsafe_arraycopy = NULL;

View File

@ -191,6 +191,10 @@ class StubRoutines: AllStatic {
static address _arrayof_jlong_disjoint_arraycopy;
static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit;
// cache line writeback
static address _data_cache_writeback;
static address _data_cache_writeback_sync;
// these are recommended but optional:
static address _checkcast_arraycopy, _checkcast_arraycopy_uninit;
static address _unsafe_arraycopy;
@ -357,6 +361,14 @@ class StubRoutines: AllStatic {
static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
}
static address data_cache_writeback() { return _data_cache_writeback; }
static address data_cache_writeback_sync() { return _data_cache_writeback_sync; }
typedef void (*DataCacheWritebackStub)(void *);
static DataCacheWritebackStub DataCacheWriteback_stub() { return CAST_TO_FN_PTR(DataCacheWritebackStub, _data_cache_writeback); }
typedef void (*DataCacheWritebackSyncStub)(bool);
static DataCacheWritebackSyncStub DataCacheWritebackSync_stub() { return CAST_TO_FN_PTR(DataCacheWritebackSyncStub, _data_cache_writeback_sync); }
static address checkcast_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
}

View File

@ -42,6 +42,7 @@ bool Abstract_VM_Version::_supports_atomic_getadd4 = false;
bool Abstract_VM_Version::_supports_atomic_getadd8 = false;
unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U;
unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0;
unsigned int Abstract_VM_Version::_data_cache_line_flush_size = 0;
VirtualizationType Abstract_VM_Version::_detected_virtualization = NoDetectedVirtualization;

View File

@ -67,6 +67,7 @@ class Abstract_VM_Version: AllStatic {
static int _vm_security_version;
static int _vm_patch_version;
static int _vm_build_number;
static unsigned int _data_cache_line_flush_size;
static VirtualizationType _detected_virtualization;
@ -155,6 +156,18 @@ class Abstract_VM_Version: AllStatic {
return _L1_data_cache_line_size;
}
// the size in bytes of a data cache line flushed by a flush
// operation which should be a power of two or zero if cache line
// writeback is not supported by the current os_cpu combination
static unsigned int data_cache_line_flush_size() {
return _data_cache_line_flush_size;
}
// returns true if and only if cache line writeback is supported
static bool supports_data_cache_line_flush() {
return _data_cache_line_flush_size != 0;
}
// ARCH specific policy for the BiasedLocking
static bool use_biased_locking() { return true; }

View File

@ -168,15 +168,16 @@ class Direct$Type$Buffer$RW$$BO$
//
protected Direct$Type$Buffer$RW$(int cap, long addr,
FileDescriptor fd,
Runnable unmapper)
Runnable unmapper,
boolean isSync)
{
#if[rw]
super(-1, 0, cap, cap, fd);
super(-1, 0, cap, cap, fd, isSync);
address = addr;
cleaner = Cleaner.create(this, unmapper);
att = null;
#else[rw]
super(cap, addr, fd, unmapper);
super(cap, addr, fd, unmapper, isSync);
this.isReadOnly = true;
#end[rw]
}

View File

@ -78,18 +78,33 @@ public abstract class MappedByteBuffer
// operations if valid; null if the buffer is not mapped.
private final FileDescriptor fd;
// A flag true if this buffer is mapped against non-volatile
// memory using one of the extended FileChannel.MapMode modes,
// MapMode.READ_ONLY_SYNC or MapMode.READ_WRITE_SYNC and false if
// it is mapped using any of the other modes. This flag only
// determines the behavior of force operations.
private final boolean isSync;
// This should only be invoked by the DirectByteBuffer constructors
//
MappedByteBuffer(int mark, int pos, int lim, int cap, // package-private
FileDescriptor fd)
{
FileDescriptor fd, boolean isSync) {
super(mark, pos, lim, cap);
this.fd = fd;
this.isSync = isSync;
}
MappedByteBuffer(int mark, int pos, int lim, int cap, // package-private
boolean isSync) {
super(mark, pos, lim, cap);
this.fd = null;
this.isSync = isSync;
}
MappedByteBuffer(int mark, int pos, int lim, int cap) { // package-private
super(mark, pos, lim, cap);
this.fd = null;
this.isSync = false;
}
// Returns the distance (in bytes) of the buffer start from the
@ -146,6 +161,23 @@ public abstract class MappedByteBuffer
return address & ~(pageSize - 1);
}
/**
* Tells whether this buffer was mapped against a non-volatile
* memory device by passing one of the sync map modes {@link
* jdk.nio.mapmode.ExtendedMapMode#READ_ONLY_SYNC
* ExtendedMapModeMapMode#READ_ONLY_SYNC} or {@link
* jdk.nio.mapmode.ExtendedMapMode#READ_ONLY_SYNC
* ExtendedMapMode#READ_WRITE_SYNC} in the call to {@link
* java.nio.channels.FileChannel#map FileChannel.map} or was
* mapped by passing one of the other map modes.
*
* @return true if the file was mapped using one of the sync map
* modes, otherwise false.
*/
private boolean isSync() {
return isSync;
}
/**
* Tells whether or not this buffer's content is resident in physical
* memory.
@ -168,6 +200,10 @@ public abstract class MappedByteBuffer
if (fd == null) {
return true;
}
// a sync mapped buffer is always loaded
if (isSync()) {
return true;
}
if ((address == 0) || (capacity() == 0))
return true;
long offset = mappingOffset();
@ -192,6 +228,10 @@ public abstract class MappedByteBuffer
if (fd == null) {
return this;
}
// no need to load a sync mapped buffer
if (isSync()) {
return this;
}
if ((address == 0) || (capacity() == 0))
return this;
long offset = mappingOffset();
@ -247,6 +287,9 @@ public abstract class MappedByteBuffer
if (fd == null) {
return this;
}
if (isSync) {
return force(0, limit());
}
if ((address != 0) && (capacity() != 0)) {
long offset = mappingOffset();
force0(fd, mappingAddress(offset), mappingLength(offset));
@ -303,8 +346,14 @@ public abstract class MappedByteBuffer
if ((address != 0) && (limit() != 0)) {
// check inputs
Objects.checkFromIndexSize(index, length, limit());
long offset = mappingOffset(index);
force0(fd, mappingAddress(offset, index), mappingLength(offset, length));
if (isSync) {
// simply force writeback of associated cache lines
Unsafe.getUnsafe().writebackMemory(address + index, length);
} else {
// force writeback via file descriptor
long offset = mappingOffset(index);
force0(fd, mappingAddress(offset, index), mappingLength(offset, length));
}
}
return this;
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.misc;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.nio.channels.FileChannel.MapMode;
/**
* JDK-specific map modes implemented in java.base.
*/
public class ExtendedMapMode {
static final MethodHandle MAP_MODE_CONSTRUCTOR;
static {
try {
var lookup = MethodHandles.privateLookupIn(MapMode.class, MethodHandles.lookup());
var methodType = MethodType.methodType(void.class, String.class);
MAP_MODE_CONSTRUCTOR = lookup.findConstructor(MapMode.class, methodType);
} catch (Exception e) {
throw new InternalError(e);
}
}
public static final MapMode READ_ONLY_SYNC = newMapMode("READ_ONLY_SYNC");
public static final MapMode READ_WRITE_SYNC = newMapMode("READ_WRITE_SYNC");
private static MapMode newMapMode(String name) {
try {
return (MapMode) MAP_MODE_CONSTRUCTOR.invoke(name);
} catch (Throwable e) {
throw new InternalError(e);
}
}
private ExtendedMapMode() { }
}

View File

@ -921,6 +921,101 @@ public final class Unsafe {
checkPointer(null, address);
}
/**
* Ensure writeback of a specified virtual memory address range
* from cache to physical memory. All bytes in the address range
* are guaranteed to have been written back to physical memory on
* return from this call i.e. subsequently executed store
* instructions are guaranteed not to be visible before the
* writeback is completed.
*
* @param address
* the lowest byte address that must be guaranteed written
* back to memory. bytes at lower addresses may also be
* written back.
*
* @param length
* the length in bytes of the region starting at address
* that must be guaranteed written back to memory.
*
* @throws RuntimeException if memory writeback is not supported
* on the current hardware of if the arguments are invalid.
* (<em>Note:</em> after optimization, invalid inputs may
* go undetected, which will lead to unpredictable
* behavior)
*
* @since 14
*/
public void writebackMemory(long address, long length) {
checkWritebackEnabled();
checkWritebackMemory(address, length);
// perform any required pre-writeback barrier
writebackPreSync0();
// write back one cache line at a time
long line = dataCacheLineAlignDown(address);
long end = address + length;
while (line < end) {
writeback0(line);
line += dataCacheLineFlushSize();
}
// perform any required post-writeback barrier
writebackPostSync0();
}
/**
* Validate the arguments to writebackMemory
*
* @throws RuntimeException if the arguments are invalid
* (<em>Note:</em> after optimization, invalid inputs may
* go undetected, which will lead to unpredictable
* behavior)
*/
private void checkWritebackMemory(long address, long length) {
checkNativeAddress(address);
checkSize(length);
}
/**
* Validate that the current hardware supports memory writeback.
* (<em>Note:</em> this is a belt and braces check. Clients are
* expected to test whether writeback is enabled by calling
* ({@link isWritebackEnabled #isWritebackEnabled} and avoid
* calling method {@link writeback #writeback} if it is disabled).
*
*
* @throws RuntimeException if memory writeback is not supported
*/
private void checkWritebackEnabled() {
if (!isWritebackEnabled()) {
throw new RuntimeException("writebackMemory not enabled!");
}
}
/**
* force writeback of an individual cache line.
*
* @param address
* the start address of the cache line to be written back
*/
@HotSpotIntrinsicCandidate
private native void writeback0(long address);
/**
* Serialize writeback operations relative to preceding memory writes.
*/
@HotSpotIntrinsicCandidate
private native void writebackPreSync0();
/**
* Serialize writeback operations relative to following memory writes.
*/
@HotSpotIntrinsicCandidate
private native void writebackPostSync0();
/// random queries
/**
@ -1175,6 +1270,27 @@ public final class Unsafe {
*/
public int pageSize() { return PAGE_SIZE; }
/**
* Reports the size in bytes of a data cache line written back by
* the hardware cache line flush operation available to the JVM or
* 0 if data cache line flushing is not enabled.
*/
public int dataCacheLineFlushSize() { return DATA_CACHE_LINE_FLUSH_SIZE; }
/**
* Rounds down address to a data cache line boundary as
* determined by {@link #dataCacheLineFlushSize}
* @return the rounded down address
*/
public long dataCacheLineAlignDown(long address) {
return (address & ~(DATA_CACHE_LINE_FLUSH_SIZE - 1));
}
/**
* Returns true if data cache line writeback
*/
public static boolean isWritebackEnabled() { return DATA_CACHE_LINE_FLUSH_SIZE != 0; }
/// random trusted operations from JNI:
/**

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -94,10 +95,28 @@ final class UnsafeConstants {
static final boolean UNALIGNED_ACCESS;
/**
* The size of an L1 data cache line which will be either a power
* of two or zero.
*
* <p>A non-zero value indicates that writeback to memory is
* enabled for the current processor. The value defines the
* natural alignment and size of any data cache line committed to
* memory by a single writeback operation. If data cache line
* writeback is not enabled for the current hardware the field
* will have value 0.
*
* @implNote
* The actual value for this field is injected by the JVM.
*/
static final int DATA_CACHE_LINE_FLUSH_SIZE;
static {
ADDRESS_SIZE0 = 0;
PAGE_SIZE = 0;
BIG_ENDIAN = false;
UNALIGNED_ACCESS = false;
DATA_CACHE_LINE_FLUSH_SIZE = 0;
}
}

View File

@ -192,6 +192,7 @@ module java.base {
jdk.compiler,
jdk.jfr,
jdk.jshell,
jdk.nio.mapmode,
jdk.scripting.nashorn,
jdk.scripting.nashorn.shell,
jdk.unsupported,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,6 +46,8 @@ import java.nio.channels.WritableByteChannel;
import jdk.internal.access.JavaIOFileDescriptorAccess;
import jdk.internal.access.JavaNioAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.misc.ExtendedMapMode;
import jdk.internal.misc.Unsafe;
import jdk.internal.ref.Cleaner;
import jdk.internal.ref.CleanerFactory;
@ -860,20 +862,15 @@ public class FileChannelImpl
// -- Memory-mapped buffers --
private static class Unmapper
private static abstract class Unmapper
implements Runnable
{
// may be required to close file
private static final NativeDispatcher nd = new FileDispatcherImpl();
// keep track of mapped buffer usage
static volatile int count;
static volatile long totalSize;
static volatile long totalCapacity;
private volatile long address;
private final long size;
private final int cap;
protected final long size;
protected final int cap;
private final FileDescriptor fd;
private Unmapper(long address, long size, int cap,
@ -884,12 +881,6 @@ public class FileChannelImpl
this.size = size;
this.cap = cap;
this.fd = fd;
synchronized (Unmapper.class) {
count++;
totalSize += size;
totalCapacity += cap;
}
}
public void run() {
@ -907,7 +898,63 @@ public class FileChannelImpl
}
}
synchronized (Unmapper.class) {
decrementStats();
}
protected abstract void incrementStats();
protected abstract void decrementStats();
}
private static class DefaultUnmapper extends Unmapper {
// keep track of non-sync mapped buffer usage
static volatile int count;
static volatile long totalSize;
static volatile long totalCapacity;
public DefaultUnmapper(long address, long size, int cap,
FileDescriptor fd) {
super(address, size, cap, fd);
incrementStats();
}
protected void incrementStats() {
synchronized (DefaultUnmapper.class) {
count++;
totalSize += size;
totalCapacity += cap;
}
}
protected void decrementStats() {
synchronized (DefaultUnmapper.class) {
count--;
totalSize -= size;
totalCapacity -= cap;
}
}
}
private static class SyncUnmapper extends Unmapper {
// keep track of mapped buffer usage
static volatile int count;
static volatile long totalSize;
static volatile long totalCapacity;
public SyncUnmapper(long address, long size, int cap,
FileDescriptor fd) {
super(address, size, cap, fd);
incrementStats();
}
protected void incrementStats() {
synchronized (SyncUnmapper.class) {
count++;
totalSize += size;
totalCapacity += cap;
}
}
protected void decrementStats() {
synchronized (SyncUnmapper.class) {
count--;
totalSize -= size;
totalCapacity -= cap;
@ -941,18 +988,30 @@ public class FileChannelImpl
throw new IllegalArgumentException("Size exceeds Integer.MAX_VALUE");
int imode;
boolean isSync = false;
if (mode == MapMode.READ_ONLY)
imode = MAP_RO;
else if (mode == MapMode.READ_WRITE)
imode = MAP_RW;
else if (mode == MapMode.PRIVATE)
imode = MAP_PV;
else
else if (mode == ExtendedMapMode.READ_ONLY_SYNC) {
imode = MAP_RO;
isSync = true;
} else if (mode == ExtendedMapMode.READ_WRITE_SYNC) {
imode = MAP_RW;
isSync = true;
} else {
throw new UnsupportedOperationException();
if ((mode != MapMode.READ_ONLY) && !writable)
}
if ((mode != MapMode.READ_ONLY) && mode != ExtendedMapMode.READ_ONLY_SYNC && !writable)
throw new NonWritableChannelException();
if (!readable)
throw new NonReadableChannelException();
// reject SYNC request if writeback is not enabled for this platform
if (isSync && !Unsafe.isWritebackEnabled()) {
throw new UnsupportedOperationException();
}
long addr = -1;
int ti = -1;
@ -990,9 +1049,9 @@ public class FileChannelImpl
// a valid file descriptor is not required
FileDescriptor dummy = new FileDescriptor();
if ((!writable) || (imode == MAP_RO))
return Util.newMappedByteBufferR(0, 0, dummy, null);
return Util.newMappedByteBufferR(0, 0, dummy, null, isSync);
else
return Util.newMappedByteBuffer(0, 0, dummy, null);
return Util.newMappedByteBuffer(0, 0, dummy, null, isSync);
}
pagePosition = (int)(position % allocationGranularity);
@ -1000,7 +1059,7 @@ public class FileChannelImpl
mapSize = size + pagePosition;
try {
// If map0 did not throw an exception, the address is valid
addr = map0(imode, mapPosition, mapSize);
addr = map0(imode, mapPosition, mapSize, isSync);
} catch (OutOfMemoryError x) {
// An OutOfMemoryError may indicate that we've exhausted
// memory so force gc and re-attempt map
@ -1011,7 +1070,7 @@ public class FileChannelImpl
Thread.currentThread().interrupt();
}
try {
addr = map0(imode, mapPosition, mapSize);
addr = map0(imode, mapPosition, mapSize, isSync);
} catch (OutOfMemoryError y) {
// After a second OOME, fail
throw new IOException("Map failed", y);
@ -1032,17 +1091,21 @@ public class FileChannelImpl
assert (IOStatus.checkAll(addr));
assert (addr % allocationGranularity == 0);
int isize = (int)size;
Unmapper um = new Unmapper(addr, mapSize, isize, mfd);
Unmapper um = (isSync
? new SyncUnmapper(addr, mapSize, isize, mfd)
: new DefaultUnmapper(addr, mapSize, isize, mfd));
if ((!writable) || (imode == MAP_RO)) {
return Util.newMappedByteBufferR(isize,
addr + pagePosition,
mfd,
um);
um,
isSync);
} else {
return Util.newMappedByteBuffer(isize,
addr + pagePosition,
mfd,
um);
um,
isSync);
}
} finally {
threads.remove(ti);
@ -1062,15 +1125,40 @@ public class FileChannelImpl
}
@Override
public long getCount() {
return Unmapper.count;
return DefaultUnmapper.count;
}
@Override
public long getTotalCapacity() {
return Unmapper.totalCapacity;
return DefaultUnmapper.totalCapacity;
}
@Override
public long getMemoryUsed() {
return Unmapper.totalSize;
return DefaultUnmapper.totalSize;
}
};
}
/**
* Invoked by sun.management.ManagementFactoryHelper to create the management
* interface for sync mapped buffers.
*/
public static JavaNioAccess.BufferPool getSyncMappedBufferPool() {
return new JavaNioAccess.BufferPool() {
@Override
public String getName() {
return "mapped - 'non-volatile memory'";
}
@Override
public long getCount() {
return SyncUnmapper.count;
}
@Override
public long getTotalCapacity() {
return SyncUnmapper.totalCapacity;
}
@Override
public long getMemoryUsed() {
return SyncUnmapper.totalSize;
}
};
}
@ -1196,7 +1284,7 @@ public class FileChannelImpl
// -- Native methods --
// Creates a new mapping
private native long map0(int prot, long position, long length)
private native long map0(int prot, long position, long length, boolean isSync)
throws IOException;
// Removes an existing mapping

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -415,7 +415,8 @@ public class Util {
new Class<?>[] { int.class,
long.class,
FileDescriptor.class,
Runnable.class });
Runnable.class,
boolean.class });
ctor.setAccessible(true);
directByteBufferConstructor = ctor;
} catch (ClassNotFoundException |
@ -430,7 +431,8 @@ public class Util {
static MappedByteBuffer newMappedByteBuffer(int size, long addr,
FileDescriptor fd,
Runnable unmapper)
Runnable unmapper,
boolean isSync)
{
MappedByteBuffer dbb;
if (directByteBufferConstructor == null)
@ -440,7 +442,8 @@ public class Util {
new Object[] { size,
addr,
fd,
unmapper });
unmapper,
isSync});
} catch (InstantiationException |
IllegalAccessException |
InvocationTargetException e) {
@ -460,7 +463,8 @@ public class Util {
new Class<?>[] { int.class,
long.class,
FileDescriptor.class,
Runnable.class });
Runnable.class,
boolean.class });
ctor.setAccessible(true);
directByteBufferRConstructor = ctor;
} catch (ClassNotFoundException |
@ -475,7 +479,8 @@ public class Util {
static MappedByteBuffer newMappedByteBufferR(int size, long addr,
FileDescriptor fd,
Runnable unmapper)
Runnable unmapper,
boolean isSync)
{
MappedByteBuffer dbb;
if (directByteBufferRConstructor == null)
@ -485,7 +490,8 @@ public class Util {
new Object[] { size,
addr,
fd,
unmapper });
unmapper,
isSync});
} catch (InstantiationException |
IllegalAccessException |
InvocationTargetException e) {

View File

@ -48,6 +48,7 @@
#include "nio_util.h"
#include "sun_nio_ch_FileChannelImpl.h"
#include "java_lang_Integer.h"
#include <assert.h>
static jfieldID chan_fd; /* jobject 'fd' in sun.nio.ch.FileChannelImpl */
@ -73,7 +74,7 @@ handle(JNIEnv *env, jlong rv, char *msg)
JNIEXPORT jlong JNICALL
Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
jint prot, jlong off, jlong len)
jint prot, jlong off, jlong len, jboolean map_sync)
{
void *mapAddress = 0;
jobject fdo = (*env)->GetObjectField(env, this, chan_fd);
@ -81,6 +82,9 @@ Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
int protections = 0;
int flags = 0;
// should never be called with map_sync and prot == PRIVATE
assert((prot != sun_nio_ch_FileChannelImpl_MAP_PV) || !map_sync);
if (prot == sun_nio_ch_FileChannelImpl_MAP_RO) {
protections = PROT_READ;
flags = MAP_SHARED;
@ -92,6 +96,33 @@ Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
flags = MAP_PRIVATE;
}
// if MAP_SYNC and MAP_SHARED_VALIDATE are not defined then it is
// best to define them here. This ensures the code compiles on old
// OS releases which do not provide the relevant headers. If run
// on the same machine then it will work if the kernel contains
// the necessary support otherwise mmap should fail with an
// invalid argument error
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
if (map_sync) {
// ensure
// 1) this is Linux on AArch64 or x86_64
// 2) the mmap APIs are available/ at compile time
#if !defined(LINUX) || ! (defined(aarch64) || (defined(amd64) && defined(_LP64)))
// TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit
JNU_ThrowInternalError(env, "should never call map on platform where MAP_SYNC is unimplemented");
return IOS_THROWN;
#else
flags |= MAP_SYNC | MAP_SHARED_VALIDATE;
#endif
}
mapAddress = mmap64(
0, /* Let OS decide location */
len, /* Number of bytes to map */
@ -101,6 +132,11 @@ Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
off); /* Offset into file */
if (mapAddress == MAP_FAILED) {
if (map_sync && errno == ENOTSUP) {
JNU_ThrowIOExceptionWithLastError(env, "map with mode MAP_SYNC unsupported");
return IOS_THROWN;
}
if (errno == ENOMEM) {
JNU_ThrowOutOfMemoryError(env, "Map failed");
return IOS_THROWN;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ Java_sun_nio_ch_FileChannelImpl_initIDs(JNIEnv *env, jclass clazz)
JNIEXPORT jlong JNICALL
Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
jint prot, jlong off, jlong len)
jint prot, jlong off, jlong len, jboolean map_sync)
{
void *mapAddress = 0;
jint lowOffset = (jint)off;
@ -87,6 +87,11 @@ Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
mapAccess = FILE_MAP_COPY;
}
if (map_sync) {
JNU_ThrowInternalError(env, "should never call map on platform where MAP_SYNC is unimplemented");
return IOS_THROWN;
}
mapping = CreateFileMapping(
fileHandle, /* Handle of file */
NULL, /* Not inheritable */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -345,6 +345,8 @@ public class ManagementFactoryHelper {
.getDirectBufferPool()));
bufferPools.add(createBufferPoolMXBean(sun.nio.ch.FileChannelImpl
.getMappedBufferPool()));
bufferPools.add(createBufferPoolMXBean(sun.nio.ch.FileChannelImpl
.getSyncMappedBufferPool()));
}
return bufferPools;
}

View File

@ -409,6 +409,10 @@ public class CheckGraalIntrinsics extends GraalTest {
"java/lang/Math.max(FF)F",
"java/lang/Math.min(DD)D",
"java/lang/Math.min(FF)F");
add(toBeInvestigated,
"jdk/internal/misc/Unsafe.writeback0(J)V",
"jdk/internal/misc/Unsafe.writebackPostSync0()V",
"jdk/internal/misc/Unsafe.writebackPreSync0()V");
}
if (!config.inlineNotify()) {

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.nio.mapmode;
import java.nio.channels.FileChannel.MapMode;
/**
* JDK-specific map modes.
*
* @since 14
* @see java.nio.channels.FileChannel#map
*/
public class ExtendedMapMode {
private ExtendedMapMode() { }
/**
* File mapping mode for a read-only mapping of a file backed by
* non-volatile RAM.
*
* <p> The {@linkplain FileChannel#map map} method throws
* {@linkplain UnsupportedOperationException} when this map mode
* is used on an implementation that does not support it.
*
* @implNote On Linux, the {@code MAP_SYNC} and {@code
* MAP_SHARED_VALIDATE} flags are specified to {@code mmap} when
* mapping the file into memory.
*/
public static final MapMode READ_ONLY_SYNC = jdk.internal.misc.ExtendedMapMode.READ_ONLY_SYNC;
/**
* File mapping mode for a read-write mapping of a file backed by
* non-volatile RAM. {@linkplain MappedByteBufefr#force force}
* operations on a buffer created with this mode will be performed
* using cache line writeback rather than proceeding via a file
* device flush.
*
* <p> The {@linkplain FileChannel#map map} method throws
* {@linkplain UnsupportedOperationException} when this map mode
* is used on an implementation that does not support it.
*
* @implNote On Linux, the {@code MAP_SYNC} and {@code
* MAP_SHARED_VALIDATE} flags are specified to {@code mmap} when
* mapping the file into memory.
*/
public static final MapMode READ_WRITE_SYNC = jdk.internal.misc.ExtendedMapMode.READ_WRITE_SYNC;
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* Defines JDK-specific file mapping modes.
*
* @moduleGraph
* @since 14
*/
module jdk.nio.mapmode {
exports jdk.nio.mapmode;
}

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test
* @summary Test failure paths for MAP_SYNC FileChannel.map of non-DAX files
* @modules java.base/jdk.internal.misc
* @run main MapSyncFail true
* @run main MapSyncFail false
*/
import java.io.*;
import java.nio.*;
import java.util.*;
import java.nio.channels.*;
import jdk.nio.mapmode.*;
import jdk.internal.misc.Unsafe;
public class MapSyncFail {
public static final int K = 1024;
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new Exception("Expected true or false as argument");
}
boolean is_rw = Boolean.valueOf(args[0]);
FileChannel.MapMode mode = (is_rw ? ExtendedMapMode.READ_WRITE_SYNC : ExtendedMapMode.READ_ONLY_SYNC);
// it is assumed that /tmp is not a DAX file system
File file = File.createTempFile("MapSyncFail", null);
file.deleteOnExit();
long filesize = (8 * K);
try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
raf.setLength(filesize);
FileChannel fc = raf.getChannel();
MappedByteBuffer mbb = fc.map(mode, 0, filesize);
} catch(IOException ioe) {
// when writeback is enabled for the current os/cpu
// combination the underlying mmap should be attempted and
// the map call should fail with IOException
if (!Unsafe.isWritebackEnabled()) {
throw new Exception("IOException not expected");
}
System.out.println("caught " + ioe);
ioe.printStackTrace();
return;
} catch (UnsupportedOperationException uoe) {
// when writeback is not enabled for the current os/cpu
// combination the mmap should not be attempted and the
// map call should fail with UnsupportedOperationException
if (Unsafe.isWritebackEnabled()) {
throw new Exception("UnsupportedOperationException not expected");
}
System.out.println("caught " + uoe);
uoe.printStackTrace();
return;
}
throw new Exception("expected IOException or UnsupportedOperationException");
}
}

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This test is manually run because it requires an NVRAM device to be
* mapped as DAX file system or, at least, to be simulated by a
* volatile RAM mapped file system. Also, on AArch64 it requires an
* ARMV8.2 CPU which implements the dc CVAP instruction (CPU feature
* dcpop) and an OS that makes it available from user space.
*
* If the test runs on such a host without throwing an exception then
* that confirms that NVRAM-backed byte buffers can be allocated,
* updated and forced via cache line writeback.
*/
/*
* How to run this test:
*
* Ideally this test should be run on a x86_64/amd64 or aarch64 host
* fitted with an NVRAM memory device. The NVRAM should appear as
* /dev/pmem0 or some equivalent DAX file device. The file device
* should be mounted at /mnt/pmem with a directory tmp created
* directly under that mount point with a+rwx access.
*
* It is possible to run the test on x86_64 using a volatile RAM
* backed device to simulate NVRAM, even though this does not provide
* any guarantee of persistence of data across program runs. For the
* latter case the following instructions explain how to set up the
* simulated NVRAM device.
*
* https://developers.redhat.com/blog/2016/12/05/configuring-and-using-persistent-memory-rhel-7-3/
* https://nvdimm.wiki.kernel.org/
* TL;DR: add "memmap=1G!4G" to /etc/default/grub,
* then grub2-mkconfig -o /boot/grub2/grub.cfg and reboot
*
* ndctl create-namespace * -f -e namespace0.0 -m memory -M mem
* mkdir /mnt/pmem
* mkfs.xfs -f /dev/pmem0; mount -o dax /dev/pmem0 /mnt/pmem/
* mkdir /mnt/pmem/test; chmod a+rwx /mnt/pmem/test
*
* Now run the test program
*
* java PmemTest
*
* or
*
* make test TEST=jdk/java/nio/MappedByteBuffer/PmemTest.java
*/
/* @test
* @summary Testing NVRAM mapped byte buffer support
* @run main/manual PmemTest
* @requires (os.family == "linux")
* @requires ((os.arch == "x86_64")|(os.arch == "amd64")|(os.arch == "aarch64"))
*/
import java.io.File;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.EnumSet;
import java.util.List;
import jdk.nio.mapmode.ExtendedMapMode;
import java.lang.management.ManagementFactory;
import java.lang.management.BufferPoolMXBean;
public class PmemTest {
public static final int K = 1024;
public static final int NUM_KBS = 16;
public static void main(String[] args) throws Exception {
System.out.println("test");
String dir = "/tmp"; // mapSync should fail
dir = "/mnt/pmem/test"; // mapSync should work, since fs mount is -o dax
Path path = new File(dir, "pmemtest").toPath();
FileChannel fileChannel = (FileChannel) Files
.newByteChannel(path, EnumSet.of(
StandardOpenOption.READ,
StandardOpenOption.WRITE,
StandardOpenOption.CREATE));
MappedByteBuffer mappedByteBuffer = fileChannel.map(ExtendedMapMode.READ_WRITE_SYNC, 0, NUM_KBS * K);
dumpBufferPoolBeans();
// for (int loops = 0; loops < 1000; loops++) {
for (int loops = 0; loops < 100; loops++) {
int base = K * (loops % NUM_KBS);
for (int i = 0; i < K ; i++) {
for (int j = 0; j < K ;j++) {
testBuffer(mappedByteBuffer, base, (i << 3) + j);
commitBuffer(mappedByteBuffer, base);
}
}
}
dumpBufferPoolBeans();
}
public static void testBuffer(MappedByteBuffer mappedByteBuffer, int base, int start) {
for (int k = 0; k < 8; k++) {
int idx = (start + k) % K;
byte z = mappedByteBuffer.get(base + idx);
z++;
mappedByteBuffer.put(base + idx, z);
}
}
public static void commitBuffer(MappedByteBuffer mappedByteBuffer, int base)
{
mappedByteBuffer.force(base, K);
}
public static void dumpBufferPoolBeans()
{
List<BufferPoolMXBean> beansList = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class);
for (BufferPoolMXBean bean : beansList) {
System.out.println("BufferPoolMXBean {" +
"\n\tname: " + bean.getName() +
"\n\tcount: " + bean.getCount() +
"\n\ttotalCapacity: " + bean.getTotalCapacity() +
"\n\tmemoryUsed: " + bean.getMemoryUsed() +
"\n}");
}
}
}