This commit is contained in:
Phil Race 2018-06-12 15:14:22 -07:00
commit aeed1525ab
415 changed files with 31263 additions and 568 deletions
make
src/hotspot
cpu
os
os_cpu/linux_x86/gc/z
share

@ -34,6 +34,9 @@ _INITSUPPORT_GMK := 1
ifeq ($(HAS_SPEC),)
# COMMA is defined in spec.gmk, but that is not included yet
COMMA := ,
# Include the corresponding closed file, if present.
ifneq ($(CUSTOM_MAKE_DIR), )
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
@ -531,8 +534,6 @@ endif # HAS_SPEC
define ParseLogOption
ifneq ($$(findstring $1, $$(LOG)),)
override $2 := true
# COMMA is defined in spec.gmk, but that is not included yet
COMMA := ,
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
# We might have ended up with a leading comma. Remove it. Need override
@ -550,8 +551,6 @@ define ParseLogValue
# Make words of out comma-separated list and find the one with opt=val
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
override $2 := $$(value)
# COMMA is defined in spec.gmk, but that is not included yet
COMMA := ,
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))

@ -25,7 +25,7 @@
# All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
static-build link-time-opt aot jfr"
# Deprecated JVM features (these are ignored, but with a warning)
@ -328,6 +328,19 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
fi
# Only enable ZGC on Linux x86_64
AC_MSG_CHECKING([if zgc should be built])
if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
AC_MSG_RESULT([yes])
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
AC_MSG_RESULT([no, platform not supported])
fi
else
AC_MSG_RESULT([no])
fi
# Turn on additional features based on other parts of configure
if test "x$INCLUDE_DTRACE" = "xtrue"; then
JVM_FEATURES="$JVM_FEATURES dtrace"
@ -410,7 +423,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
# All variants but minimal (and custom) get these features
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc jni-check jvmti management nmt services vm-structs"
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs"
if test "x$ENABLE_CDS" = "xtrue"; then
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
fi

@ -858,7 +858,12 @@ ifeq ($(DEBUG_LEVEL), fastdebug)
else ifneq ($(DEBUG_LEVEL), release)
DEBUG_PART := -$(DEBUG_LEVEL)
endif
JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
ifeq ($(OPENJDK_TARGET_OS), windows)
JDK_BUNDLE_EXTENSION := zip
else
JDK_BUNDLE_EXTENSION := tar.gz
endif
JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION)
JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz
TEST_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests$(DEBUG_PART).tar.gz

@ -272,13 +272,14 @@ var getJibProfilesCommon = function (input, data) {
*/
common.main_profile_artifacts = function (o) {
var jdk_subdir = (o.jdk_subdir != null ? o.jdk_subdir : "jdk-" + data.version);
var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
var pf = o.platform
return {
artifacts: {
jdk: {
local: "bundles/\\(jdk.*bin.tar.gz\\)",
local: "bundles/\\(jdk.*bin." + jdk_suffix + "\\)",
remote: [
"bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin.tar.gz",
"bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin." + jdk_suffix,
"bundles/" + pf + "/\\1"
],
subdir: jdk_subdir,
@ -320,13 +321,14 @@ var getJibProfilesCommon = function (input, data) {
*/
common.debug_profile_artifacts = function (o) {
var jdk_subdir = "jdk-" + data.version + "/fastdebug";
var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
var pf = o.platform
return {
artifacts: {
jdk: {
local: "bundles/\\(jdk.*bin-debug.tar.gz\\)",
local: "bundles/\\(jdk.*bin-debug." + jdk_suffix + "\\)",
remote: [
"bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug.tar.gz",
"bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug." + jdk_suffix,
"bundles/" + pf + "/\\1"
],
subdir: jdk_subdir,
@ -590,9 +592,11 @@ var getJibProfilesProfiles = function (input, common, data) {
},
"windows-x64": {
platform: "windows-x64",
jdk_suffix: "zip",
},
"windows-x86": {
platform: "windows-x86",
jdk_suffix: "zip",
},
"linux-aarch64": {
platform: "linux-aarch64",
@ -690,6 +694,14 @@ var getJibProfilesProfiles = function (input, common, data) {
profiles[openName].artifacts["jdk"].remote));
});
// Enable ZGC in linux-x64-open builds
[ "linux-x64-open" ].forEach(function (name) {
var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
var debugName = name + common.debug_suffix;
profiles[name] = concatObjects(profiles[name], configureArgs);
profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
});
// Profiles used to run tests. Used in JPRT and Mach 5.
var testOnlyProfiles = {
"run-test-jprt": {

@ -1997,8 +1997,7 @@ JDWP "Java(tm) Debug Wire Protocol"
)
)
(Command Stop=10
"Stops the thread with an asynchronous exception, as if done by "
"java.lang.Thread.stop "
"Stops the thread with an asynchronous exception. "
(Out
(threadObject thread "The thread object ID. ")
(object throwable "Asynchronous exception. This object must "

@ -155,6 +155,16 @@ ifneq ($(call check-jvm-feature, serialgc), true)
JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
endif
ifneq ($(call check-jvm-feature, epsilongc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_EPSILONGC=0
JVM_EXCLUDE_PATTERNS += gc/epsilon
endif
ifneq ($(call check-jvm-feature, zgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
JVM_EXCLUDE_PATTERNS += gc/z
endif
ifneq ($(call check-jvm-feature, jfr), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
JVM_EXCLUDE_PATTERNS += jfr

@ -347,7 +347,8 @@ else ifeq ($(OPENJDK_TARGET_OS), aix)
EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
OPTIMIZATION := HIGH, \
CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS) \
$(addprefix -I, $(LIBJLI_SRC_DIRS)), \
ARFLAGS := $(ARFLAGS), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjli_static))

@ -73,19 +73,20 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
}
if (_index->is_cpu_register()) {
__ mov(r22, _index->as_register());
__ mov(rscratch1, _index->as_register());
} else {
__ mov(r22, _index->as_jint());
__ mov(rscratch1, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
assert(_array != NULL, "sanity");
__ mov(r23, _array->as_pointer_register());
__ mov(rscratch2, _array->as_pointer_register());
stub_id = Runtime1::throw_range_check_failed_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
__ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
__ blr(lr);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());

@ -323,7 +323,7 @@ void Runtime1::initialize_pd() {
// target: the entry point of the method that creates and posts the exception oop
// has_argument: true if the exception needs arguments (passed in r22 and r23)
// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// make a frame and preserve the caller's caller-save registers
@ -332,7 +332,9 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
if (!has_argument) {
call_offset = __ call_RT(noreg, noreg, target);
} else {
call_offset = __ call_RT(noreg, noreg, target, r22, r23);
__ mov(c_rarg1, rscratch1);
__ mov(c_rarg2, rscratch2);
call_offset = __ call_RT(noreg, noreg, target);
}
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);

@ -56,6 +56,15 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
}
break;
}
case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
case T_BYTE: __ load_signed_byte (dst, src); break;
case T_CHAR: __ load_unsigned_short(dst, src); break;
case T_SHORT: __ load_signed_short (dst, src); break;
case T_INT: __ ldrw (dst, src); break;
case T_LONG: __ ldr (dst, src); break;
case T_ADDRESS: __ ldr (dst, src); break;
case T_FLOAT: __ ldrs (v0, src); break;
case T_DOUBLE: __ ldrd (v0, src); break;
default: Unimplemented();
}
}
@ -84,6 +93,18 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
}
break;
}
case T_BOOLEAN:
__ andw(val, val, 0x1); // boolean is true if LSB is 1
__ strb(val, dst);
break;
case T_BYTE: __ strb(val, dst); break;
case T_CHAR: __ strh(val, dst); break;
case T_SHORT: __ strh(val, dst); break;
case T_INT: __ strw(val, dst); break;
case T_LONG: __ str (val, dst); break;
case T_ADDRESS: __ str (val, dst); break;
case T_FLOAT: __ strs(v0, dst); break;
case T_DOUBLE: __ strd(v0, dst); break;
default: Unimplemented();
}
}

@ -268,9 +268,6 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index, Register tmp) {
assert_different_registers(result, index);
// convert from field index to resolved_references() index and from
// word index to byte offset. Since this is a java object, it can be compressed
lslw(index, index, LogBytesPerHeapOop);
get_constant_pool(result);
// load pointer for resolved_references[] objArray
@ -278,8 +275,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
resolve_oop_handle(result, tmp);
// Add in the index
add(result, result, index);
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(

@ -2113,7 +2113,6 @@ void MacroAssembler::verify_heapbase(const char* msg) {
#endif
void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
Label done, not_weak;
cbz(value, done); // Use NULL as-is.
@ -2121,15 +2120,15 @@ void MacroAssembler::resolve_jobject(Register value, Register thread, Register t
tbz(r0, 0, not_weak); // Test for jweak tag.
// Resolve jweak.
bs->load_at(this, IN_ROOT | ON_PHANTOM_OOP_REF, T_OBJECT,
value, Address(value, -JNIHandles::weak_tag_value), tmp, thread);
access_load_at(T_OBJECT, IN_ROOT | ON_PHANTOM_OOP_REF, value,
Address(value, -JNIHandles::weak_tag_value), tmp, thread);
verify_oop(value);
b(done);
bind(not_weak);
// Resolve (untagged) jobject.
bs->load_at(this, IN_CONCURRENT_ROOT, T_OBJECT,
value, Address(value, 0), tmp, thread);
access_load_at(T_OBJECT, IN_CONCURRENT_ROOT, value, Address(value, 0), tmp,
thread);
verify_oop(value);
bind(done);
}
@ -3664,9 +3663,8 @@ void MacroAssembler::load_klass(Register dst, Register src) {
// ((OopHandle)result).resolve();
void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
// OopHandle::resolve is an indirection.
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->load_at(this, IN_CONCURRENT_ROOT, T_OBJECT,
result, Address(result, 0), tmp, rthread);
access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
result, Address(result, 0), tmp, noreg);
}
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {

@ -141,7 +141,7 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
__ access_load_at(T_ADDRESS, IN_HEAP, method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), noreg, noreg);
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@ -340,7 +340,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
__ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToStatic:
@ -348,7 +348,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
__ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToVirtual:
@ -362,7 +362,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// pick out the vtable index from the MemberName, and then we can discard it:
Register temp2_index = temp2;
__ ldr(temp2_index, member_vmindex);
__ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L_index_ok;
@ -394,7 +394,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
__ verify_klass_ptr(temp3_intf);
Register rindex = rmethod;
__ ldr(rindex, member_vmindex);
__ access_load_at(T_ADDRESS, IN_HEAP, rindex, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L;
__ cmpw(rindex, 0U);

@ -760,8 +760,8 @@ void TemplateTable::iaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(2)));
__ ldrw(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_INT)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
__ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::laload()
@ -772,8 +772,8 @@ void TemplateTable::laload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(3)));
__ ldr(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_LONG)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
__ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::faload()
@ -784,8 +784,8 @@ void TemplateTable::faload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(2)));
__ ldrs(v0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
__ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::daload()
@ -796,8 +796,8 @@ void TemplateTable::daload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(3)));
__ ldrd(v0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::aaload()
@ -808,10 +808,9 @@ void TemplateTable::aaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
int s = (UseCompressedOops ? 2 : 3);
__ lea(r1, Address(r0, r1, Address::uxtw(s)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
do_oop_load(_masm,
Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
r0,
IN_HEAP_ARRAY);
}
@ -824,8 +823,8 @@ void TemplateTable::baload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(0)));
__ load_signed_byte(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
__ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
}
void TemplateTable::caload()
@ -836,8 +835,8 @@ void TemplateTable::caload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(1)));
__ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
// iload followed by caload frequent pair
@ -853,8 +852,8 @@ void TemplateTable::fast_icaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(1)));
__ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::saload()
@ -865,8 +864,8 @@ void TemplateTable::saload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ lea(r1, Address(r0, r1, Address::uxtw(1)));
__ load_signed_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
__ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::iload(int n)
@ -1059,9 +1058,8 @@ void TemplateTable::iastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
__ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
__ strw(r0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_INT)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
__ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
}
void TemplateTable::lastore() {
@ -1072,9 +1070,8 @@ void TemplateTable::lastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
__ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
__ str(r0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_LONG)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
__ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
}
void TemplateTable::fastore() {
@ -1085,9 +1082,8 @@ void TemplateTable::fastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
__ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
__ strs(v0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
__ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
}
void TemplateTable::dastore() {
@ -1098,9 +1094,8 @@ void TemplateTable::dastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
__ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
__ strd(v0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
}
void TemplateTable::aastore() {
@ -1111,10 +1106,10 @@ void TemplateTable::aastore() {
__ ldr(r2, at_tos_p1()); // index
__ ldr(r3, at_tos_p2()); // array
Address element_address(r4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
index_check(r3, r2); // kills r1
__ lea(r4, Address(r3, r2, Address::uxtw(UseCompressedOops? 2 : 3)));
__ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
// do array store check - check for NULL value first
__ cbz(r0, is_null);
@ -1176,9 +1171,8 @@ void TemplateTable::bastore()
__ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
__ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
__ strb(r0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_BYTE)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
__ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
}
void TemplateTable::castore()
@ -1190,9 +1184,8 @@ void TemplateTable::castore()
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
__ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
__ strh(r0, Address(rscratch1,
arrayOopDesc::base_offset_in_bytes(T_CHAR)));
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
}
void TemplateTable::sastore()
@ -2513,7 +2506,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
if (is_static) rc = may_not_rewrite;
// btos
__ load_signed_byte(r0, field);
__ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
__ push(btos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2526,7 +2519,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ br(Assembler::NE, notBool);
// ztos (same code as btos)
__ ldrsb(r0, field);
__ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
__ push(ztos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2550,7 +2543,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, itos);
__ br(Assembler::NE, notInt);
// itos
__ ldrw(r0, field);
__ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
__ push(itos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2562,7 +2555,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ctos);
__ br(Assembler::NE, notChar);
// ctos
__ load_unsigned_short(r0, field);
__ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
__ push(ctos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2574,7 +2567,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, stos);
__ br(Assembler::NE, notShort);
// stos
__ load_signed_short(r0, field);
__ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
__ push(stos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2586,7 +2579,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ltos);
__ br(Assembler::NE, notLong);
// ltos
__ ldr(r0, field);
__ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
__ push(ltos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2598,7 +2591,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ftos);
__ br(Assembler::NE, notFloat);
// ftos
__ ldrs(v0, field);
__ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(ftos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2612,7 +2605,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ br(Assembler::NE, notDouble);
#endif
// dtos
__ ldrd(v0, field);
__ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(dtos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@ -2750,7 +2743,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
__ strb(r0, field);
__ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
}
@ -2765,8 +2758,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ztos);
if (!is_static) pop_and_check_object(obj);
__ andw(r0, r0, 0x1);
__ strb(r0, field);
__ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
}
@ -2797,7 +2789,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
__ strw(r0, field);
__ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
}
@ -2812,7 +2804,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
__ strh(r0, field);
__ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
}
@ -2827,7 +2819,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
__ strh(r0, field);
__ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
}
@ -2842,7 +2834,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
__ str(r0, field);
__ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
}
@ -2857,7 +2849,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ftos);
if (!is_static) pop_and_check_object(obj);
__ strs(v0, field);
__ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
}
@ -2874,7 +2866,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(dtos);
if (!is_static) pop_and_check_object(obj);
__ strd(v0, field);
__ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
}
@ -3005,27 +2997,28 @@ void TemplateTable::fast_storefield(TosState state)
do_oop_store(_masm, field, r0, IN_HEAP);
break;
case Bytecodes::_fast_lputfield:
__ str(r0, field);
__ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_iputfield:
__ strw(r0, field);
__ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_zputfield:
__ andw(r0, r0, 0x1); // boolean is true if LSB is 1
// fall through to bputfield
__ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_bputfield:
__ strb(r0, field);
__ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_sputfield:
// fall through
__ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_cputfield:
__ strh(r0, field);
__ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_fputfield:
__ strs(v0, field);
__ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
break;
case Bytecodes::_fast_dputfield:
__ strd(v0, field);
__ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
break;
default:
ShouldNotReachHere();
@ -3098,25 +3091,25 @@ void TemplateTable::fast_accessfield(TosState state)
__ verify_oop(r0);
break;
case Bytecodes::_fast_lgetfield:
__ ldr(r0, field);
__ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_igetfield:
__ ldrw(r0, field);
__ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_bgetfield:
__ load_signed_byte(r0, field);
__ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_sgetfield:
__ load_signed_short(r0, field);
__ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_cgetfield:
__ load_unsigned_short(r0, field);
__ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_fgetfield:
__ ldrs(v0, field);
__ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
break;
case Bytecodes::_fast_dgetfield:
__ ldrd(v0, field);
__ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
break;
default:
ShouldNotReachHere();
@ -3161,14 +3154,14 @@ void TemplateTable::fast_xaccess(TosState state)
__ null_check(r0);
switch (state) {
case itos:
__ ldrw(r0, Address(r0, r1, Address::lsl(0)));
__ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
break;
case atos:
do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
__ verify_oop(r0);
break;
case ftos:
__ ldrs(v0, Address(r0, r1, Address::lsl(0)));
__ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
break;
default:
ShouldNotReachHere();

@ -1367,9 +1367,12 @@ void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Regi
// Bump total bytes allocated by this thread
Label done;
ldr(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
// Borrow the Rthread for alloc counter
Register Ralloc = Rthread;
add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
ldr(tmp, Address(Ralloc));
adds(tmp, tmp, size_in_bytes);
str(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())), cc);
str(tmp, Address(Ralloc), cc);
b(done, cc);
// Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)
@ -1387,14 +1390,17 @@ void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Regi
}
push(RegisterSet(low, high));
ldrd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
ldrd(low, Address(Ralloc));
adds(low, low, size_in_bytes);
adc(high, high, 0);
strd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
strd(low, Address(Ralloc));
pop(RegisterSet(low, high));
bind(done);
// Unborrow the Rthread
sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
#endif // AARCH64
}

@ -1346,7 +1346,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
#endif
__ verify_oop(dest->as_register());
// Load barrier has not yet been applied, so ZGC can't verify the oop here
if (!UseZGC) {
__ verify_oop(dest->as_register());
}
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedClassPointers) {

@ -0,0 +1,458 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#endif // COMPILER1
#undef __
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
static void call_vm(MacroAssembler* masm,
address entry_point,
Register arg0,
Register arg1) {
// Setup arguments
if (arg1 == c_rarg0) {
if (arg0 == c_rarg1) {
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, arg1);
__ movptr(c_rarg0, arg0);
}
} else {
if (arg0 != c_rarg0) {
__ movptr(c_rarg0, arg0);
}
if (arg1 != c_rarg1) {
__ movptr(c_rarg1, arg1);
}
}
// Call VM
__ MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread) {
if (!ZBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
// Allocate scratch register
Register scratch = tmp1;
if (tmp1 == noreg) {
scratch = r12;
__ push(scratch);
}
assert_different_registers(dst, scratch);
Label done;
//
// Fast Path
//
// Load address
__ lea(scratch, src);
// Load oop at address
__ movptr(dst, Address(scratch, 0));
// Test address bad mask
__ testptr(dst, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
//
// Slow path
//
// Save registers
__ push(rax);
__ push(rcx);
__ push(rdx);
__ push(rdi);
__ push(rsi);
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
// We may end up here from generate_native_wrapper, then the method may have
// floats as arguments, and we must spill them before calling the VM runtime
// leaf. From the interpreter all floats are passed on the stack.
assert(Argument::n_float_register_parameters_j == 8, "Assumption");
const int xmm_size = wordSize * 2;
const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
__ subptr(rsp, xmm_spill_size);
__ movdqu(Address(rsp, xmm_size * 7), xmm7);
__ movdqu(Address(rsp, xmm_size * 6), xmm6);
__ movdqu(Address(rsp, xmm_size * 5), xmm5);
__ movdqu(Address(rsp, xmm_size * 4), xmm4);
__ movdqu(Address(rsp, xmm_size * 3), xmm3);
__ movdqu(Address(rsp, xmm_size * 2), xmm2);
__ movdqu(Address(rsp, xmm_size * 1), xmm1);
__ movdqu(Address(rsp, xmm_size * 0), xmm0);
// Call VM
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
// Restore registers
__ movdqu(xmm0, Address(rsp, xmm_size * 0));
__ movdqu(xmm1, Address(rsp, xmm_size * 1));
__ movdqu(xmm2, Address(rsp, xmm_size * 2));
__ movdqu(xmm3, Address(rsp, xmm_size * 3));
__ movdqu(xmm4, Address(rsp, xmm_size * 4));
__ movdqu(xmm5, Address(rsp, xmm_size * 5));
__ movdqu(xmm6, Address(rsp, xmm_size * 6));
__ movdqu(xmm7, Address(rsp, xmm_size * 7));
__ addptr(rsp, xmm_spill_size);
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
__ pop(rsi);
__ pop(rdi);
__ pop(rdx);
__ pop(rcx);
if (dst == rax) {
__ addptr(rsp, wordSize);
} else {
__ movptr(dst, rax);
__ pop(rax);
}
__ bind(done);
// Restore scratch register
if (tmp1 == noreg) {
__ pop(scratch);
}
BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
}
#ifdef ASSERT
void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2) {
BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
// Verify oop store
if (type == T_OBJECT || type == T_ARRAY) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (src != noreg) {
Label done;
__ testptr(src, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
}
#endif // ASSERT
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count) {
if (!ZBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
return;
}
BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
// Save registers
__ pusha();
// Call VM
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
// Restore registers
__ popa();
BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
}
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
// Test address bad mask
__ testptr(obj, address_bad_mask_from_jni_env(jni_env));
__ jcc(Assembler::notZero, slowpath);
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
__ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
}
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
if (stub->ref_addr()->is_register()) {
// Address already in register
ref_addr = stub->ref_addr()->as_pointer_register();
} else {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
ref_addr = stub->tmp()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save rax unless it is the result register
if (ref != rax) {
__ push(rax);
}
// Setup arguments and call runtime stub
__ subptr(rsp, 2 * BytesPerWord);
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ call(RuntimeAddress(stub->runtime_stub()));
__ addptr(rsp, 2 * BytesPerWord);
// Verify result
__ verify_oop(rax, "Bad oop");
// Restore rax unless it is the result register
if (ref != rax) {
__ movptr(ref, rax);
__ pop(rax);
}
// Stub exit
__ jmp(*stub->continuation());
}
#undef __
#define __ sasm->
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
// Enter and save registers
__ enter();
__ save_live_registers_no_oop_map(true /* save_fpu_registers */);
// Setup arguments
__ load_parameter(1, c_rarg1);
__ load_parameter(0, c_rarg0);
// Call VM
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Restore registers and return
__ restore_live_registers_except_rax(true /* restore_fpu_registers */);
__ leave();
__ ret(0);
}
#endif // COMPILER1
#undef __
#define __ cgen->assembler()->
// Generates a register specific stub for calling
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
//
// The raddr register serves as both input and output for this stub. When the stub is
// called the raddr register contains the object field address (oop*) where the bad oop
// was loaded from, which caused the slow path to be taken. On return from the stub the
// raddr register contains the good/healed oop returned from
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
// Don't generate stub for invalid registers
if (raddr == rsp || raddr == r12 || raddr == r15) {
return NULL;
}
// Create stub name
char name[64];
const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
__ align(CodeEntryAlignment);
StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
address start = __ pc();
// Save live registers
if (raddr != rax) {
__ push(rax);
}
if (raddr != rcx) {
__ push(rcx);
}
if (raddr != rdx) {
__ push(rdx);
}
if (raddr != rsi) {
__ push(rsi);
}
if (raddr != rdi) {
__ push(rdi);
}
if (raddr != r8) {
__ push(r8);
}
if (raddr != r9) {
__ push(r9);
}
if (raddr != r10) {
__ push(r10);
}
if (raddr != r11) {
__ push(r11);
}
// Setup arguments
if (c_rarg1 != raddr) {
__ movq(c_rarg1, raddr);
}
__ movq(c_rarg0, Address(raddr, 0));
// Call barrier function
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Move result returned in rax to raddr, if needed
if (raddr != rax) {
__ movq(raddr, rax);
}
// Restore saved registers
if (raddr != r11) {
__ pop(r11);
}
if (raddr != r10) {
__ pop(r10);
}
if (raddr != r9) {
__ pop(r9);
}
if (raddr != r8) {
__ pop(r8);
}
if (raddr != rdi) {
__ pop(rdi);
}
if (raddr != rsi) {
__ pop(rsi);
}
if (raddr != rdx) {
__ pop(rdx);
}
if (raddr != rcx) {
__ pop(rcx);
}
if (raddr != rax) {
__ pop(rax);
}
__ ret(0);
return start;
}
#undef __
void ZBarrierSetAssembler::barrier_stubs_init() {
// Load barrier stubs
int stub_code_size = 256 * 16; // Rough estimate of code size
ResourceMark rm;
BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
CodeBuffer buf(bb);
StubCodeGenerator cgen(&buf);
Register rr = as_Register(0);
for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
_load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
_load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
rr = rr->successor();
}
}

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
#ifdef COMPILER1
class LIR_Assembler;
class LIR_OprDesc;
typedef LIR_OprDesc* LIR_Opr;
class StubAssembler;
class ZLoadBarrierStubC1;
#endif // COMPILER1
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
public:
ZBarrierSetAssembler() :
_load_barrier_slow_stub(),
_load_barrier_weak_slow_stub() {}
address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath);
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
virtual void barrier_stubs_init();
};
#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP

@ -44,6 +44,9 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zThreadLocalData.hpp"
#endif
// Declaration and definition of StubGenerator (no .hpp file).
// For a more detailed description of the stub routine structure
@ -1026,6 +1029,15 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if metadata bits indicate a bad oop
__ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, error);
}
#endif
// Check if the oop is in the right area of memory
__ movptr(c_rarg2, rax);
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());

@ -1067,6 +1067,138 @@ reg_class vectorz_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM
#endif
);
reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
#ifdef _LP64
reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
#endif
%}

@ -538,6 +538,12 @@ reg_class int_rdi_reg(RDI);
%}
source_hpp %{
#if INCLUDE_ZGC
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@ -4221,6 +4227,135 @@ operand cmpOpUCF2() %{
%}
%}
// Operands for bound floating pointer register arguments
operand rxmm0() %{
constraint(ALLOC_IN_RC(xmm0_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX<= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm1() %{
constraint(ALLOC_IN_RC(xmm1_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm2() %{
constraint(ALLOC_IN_RC(xmm2_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm3() %{
constraint(ALLOC_IN_RC(xmm3_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm4() %{
constraint(ALLOC_IN_RC(xmm4_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm5() %{
constraint(ALLOC_IN_RC(xmm5_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm6() %{
constraint(ALLOC_IN_RC(xmm6_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm7() %{
constraint(ALLOC_IN_RC(xmm7_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm8() %{
constraint(ALLOC_IN_RC(xmm8_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm9() %{
constraint(ALLOC_IN_RC(xmm9_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm10() %{
constraint(ALLOC_IN_RC(xmm10_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm11() %{
constraint(ALLOC_IN_RC(xmm11_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm12() %{
constraint(ALLOC_IN_RC(xmm12_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm13() %{
constraint(ALLOC_IN_RC(xmm13_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm14() %{
constraint(ALLOC_IN_RC(xmm14_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm15() %{
constraint(ALLOC_IN_RC(xmm15_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm16() %{
constraint(ALLOC_IN_RC(xmm16_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm17() %{
constraint(ALLOC_IN_RC(xmm17_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm18() %{
constraint(ALLOC_IN_RC(xmm18_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm19() %{
constraint(ALLOC_IN_RC(xmm19_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm20() %{
constraint(ALLOC_IN_RC(xmm20_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm21() %{
constraint(ALLOC_IN_RC(xmm21_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm22() %{
constraint(ALLOC_IN_RC(xmm22_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm23() %{
constraint(ALLOC_IN_RC(xmm23_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm24() %{
constraint(ALLOC_IN_RC(xmm24_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm25() %{
constraint(ALLOC_IN_RC(xmm25_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm26() %{
constraint(ALLOC_IN_RC(xmm26_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm27() %{
constraint(ALLOC_IN_RC(xmm27_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm28() %{
constraint(ALLOC_IN_RC(xmm28_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm29() %{
constraint(ALLOC_IN_RC(xmm29_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm30() %{
constraint(ALLOC_IN_RC(xmm30_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm31() %{
constraint(ALLOC_IN_RC(xmm31_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
@ -11547,6 +11682,16 @@ instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
ins_pipe(ialu_cr_reg_mem);
%}
instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
%{
match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
format %{ "testq $src, $mem" %}
opcode(0x85);
ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
ins_pipe(ialu_cr_reg_mem);
%}
// Manifest a CmpL result in an integer register. Very painful.
// This is the test to avoid.
instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@ -12320,6 +12465,223 @@ instruct RethrowException()
ins_pipe(pipe_jmp);
%}
//
// Execute ZGC load barrier (strong) slow path
//
// When running without XMM regs
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
//
// Execute ZGC load barrier (weak) slow path
//
// When running without XMM regs
instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.

@ -899,8 +899,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// guard pages might not fit on the tiny stack created.
int ret = pthread_attr_setstacksize(&attr, stack_size);
if (ret != 0) {
log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
(thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
stack_size / K);
thread->set_osthread(NULL);
delete osthread;
return false;
}
// Save some cycles and a page by disabling OS guard pages where we have our own

@ -1988,6 +1988,8 @@ void os::print_os_info(outputStream* st) {
os::Linux::print_full_memory_info(st);
os::Linux::print_proc_sys_info(st);
os::Linux::print_container_info(st);
}
@ -2120,6 +2122,24 @@ void os::Linux::print_libversion_info(outputStream* st) {
st->cr();
}
void os::Linux::print_proc_sys_info(outputStream* st) {
st->cr();
st->print_cr("/proc/sys/kernel/threads-max (system-wide limit on the number of threads):");
_print_ascii_file("/proc/sys/kernel/threads-max", st);
st->cr();
st->cr();
st->print_cr("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have):");
_print_ascii_file("/proc/sys/vm/max_map_count", st);
st->cr();
st->cr();
st->print_cr("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers):");
_print_ascii_file("/proc/sys/kernel/pid_max", st);
st->cr();
st->cr();
}
void os::Linux::print_full_memory_info(outputStream* st) {
st->print("\n/proc/meminfo:\n");
_print_ascii_file("/proc/meminfo", st);
@ -3106,7 +3126,10 @@ static address get_stack_commited_bottom(address bottom, size_t size) {
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
int mincore_return_value;
const size_t stripe = 1024; // query this many pages each time
unsigned char vec[stripe];
unsigned char vec[stripe + 1];
// set a guard
vec[stripe] = 'X';
const size_t page_sz = os::vm_page_size();
size_t pages = size / page_sz;
@ -3118,7 +3141,9 @@ bool os::committed_in_range(address start, size_t size, address& committed_start
int loops = (pages + stripe - 1) / stripe;
int committed_pages = 0;
address loop_base = start;
for (int index = 0; index < loops; index ++) {
bool found_range = false;
for (int index = 0; index < loops && !found_range; index ++) {
assert(pages > 0, "Nothing to do");
int pages_to_query = (pages >= stripe) ? stripe : pages;
pages -= pages_to_query;
@ -3133,12 +3158,14 @@ bool os::committed_in_range(address start, size_t size, address& committed_start
return false;
}
assert(vec[stripe] == 'X', "overflow guard");
assert(mincore_return_value == 0, "Range must be valid");
// Process this stripe
for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
if ((vec[vecIdx] & 0x01) == 0) { // not committed
// End of current contiguous region
if (committed_start != NULL) {
found_range = true;
break;
}
} else { // committed

@ -113,6 +113,7 @@ class Linux {
static void print_container_info(outputStream* st);
static void print_distro_info(outputStream* st);
static void print_libversion_info(outputStream* st);
static void print_proc_sys_info(outputStream* st);
public:
static bool _stack_is_executable;

@ -0,0 +1,31 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
inline uintptr_t ZAddress::address(uintptr_t value) {
return value;
}
#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP

@ -0,0 +1,360 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zBackingPath_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "logging/log.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/types.h>
#include <unistd.h>
// Filesystem names
#define ZFILESYSTEM_TMPFS "tmpfs"
#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
// Sysfs file for transparent huge page on tmpfs
#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
// Default mount points
#define ZMOUNTPOINT_TMPFS "/dev/shm"
#define ZMOUNTPOINT_HUGETLBFS "/hugepages"
// Java heap filename
#define ZFILENAME_HEAP "java_heap"
// Support for building on older Linux systems
#ifndef __NR_memfd_create
#define __NR_memfd_create 319
#endif
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_HUGETLB
#define MFD_HUGETLB 0x0004U
#endif
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
// Filesystem types, see statfs(2)
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
static int z_memfd_create(const char *name, unsigned int flags) {
return syscall(__NR_memfd_create, name, flags);
}
ZBackingFile::ZBackingFile() :
_fd(-1),
_filesystem(0),
_initialized(false) {
// Create backing file
_fd = create_fd(ZFILENAME_HEAP);
if (_fd == -1) {
return;
}
// Get filesystem type
struct statfs statfs_buf;
if (fstatfs(_fd, &statfs_buf) == -1) {
ZErrno err;
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
return;
}
_filesystem = statfs_buf.f_type;
// Make sure we're on a supported filesystem
if (!is_tmpfs() && !is_hugetlbfs()) {
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
return;
}
// Make sure the filesystem type matches requested large page type
if (ZLargePages::is_transparent() && !is_tmpfs()) {
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
// Successfully initialized
_initialized = true;
}
int ZBackingFile::create_mem_fd(const char* name) const {
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
// Create file
const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
if (fd == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create memfd file (%s)",
((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
return -1;
}
log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
return fd;
}
int ZBackingFile::create_file_fd(const char* name) const {
const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
// Find mountpoint
ZBackingPath path(filesystem, mountpoint);
if (path.get() == NULL) {
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
return -1;
}
// Try to create an anonymous file using the O_TMPFILE flag. Note that this
// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
(err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
struct stat stat_buf;
if (fstat(fd_anon, &stat_buf) == -1) {
ZErrno err;
log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
return -1;
}
log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
return fd_anon;
}
log_debug(gc, init)("Falling back to open/unlink");
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
// Create file
const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd == -1) {
ZErrno err;
log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
return -1;
}
// Unlink file
if (unlink(filename) == -1) {
ZErrno err;
log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
return -1;
}
log_debug(gc, init)("Heap backed by file %s", filename);
return fd;
}
int ZBackingFile::create_fd(const char* name) const {
if (ZPath == NULL) {
// If the path is not explicitly specified, then we first try to create a memfd file
// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
// not be supported at all (requires kernel >= 3.17), or it might not support large
// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
// file on an accessible tmpfs or hugetlbfs mount point.
const int fd = create_mem_fd(name);
if (fd != -1) {
return fd;
}
log_debug(gc, init)("Falling back to searching for an accessible moint point");
}
return create_file_fd(name);
}
bool ZBackingFile::is_initialized() const {
return _initialized;
}
int ZBackingFile::fd() const {
return _fd;
}
bool ZBackingFile::is_tmpfs() const {
return _filesystem == TMPFS_MAGIC;
}
bool ZBackingFile::is_hugetlbfs() const {
return _filesystem == HUGETLBFS_MAGIC;
}
bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
// If the shmem_enabled file exists and is readable then we
// know the kernel supports transparent huge pages for tmpfs.
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
}
bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
// Try first smaller part.
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, alignment);
if (!try_expand_tmpfs(offset0, length0, alignment)) {
return false;
}
// Try second smaller part.
const size_t offset1 = offset0 + length0;
const size_t length1 = length - length0;
if (!try_expand_tmpfs(offset1, length1, alignment)) {
return false;
}
return true;
}
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
assert(length > 0, "Invalid length");
assert(is_aligned(length, alignment), "Invalid length");
ZErrno err = posix_fallocate(_fd, offset, length);
if (err == EINTR && length > alignment) {
// Calling posix_fallocate() with a large length can take a long
// time to complete. When running profilers, such as VTune, this
// syscall will be constantly interrupted by signals. Expanding
// the file in smaller steps avoids this problem.
return try_split_and_expand_tmpfs(offset, length, alignment);
}
if (err) {
log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
return false;
}
return true;
}
bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
assert(is_tmpfs(), "Wrong filesystem");
return try_expand_tmpfs(offset, length, os::vm_page_size());
}
bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
assert(is_hugetlbfs(), "Wrong filesystem");
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
// Instead of posix_fallocate() we can use a well-known workaround,
// which involves truncating the file to requested size and then try
// to map it to verify that there are enough huge pages available to
// back it.
while (ftruncate(_fd, offset + length) == -1) {
ZErrno err;
if (err != EINTR) {
log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
return false;
}
}
// If we fail mapping during initialization, i.e. when we are pre-mapping
// the heap, then we wait and retry a few times before giving up. Otherwise
// there is a risk that running JVMs back-to-back will fail, since there
// is a delay between process termination and the huge pages owned by that
// process being returned to the huge page pool and made available for new
// allocations.
void* addr = MAP_FAILED;
const int max_attempts = 3;
for (int attempt = 1; attempt <= max_attempts; attempt++) {
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
if (addr != MAP_FAILED || is_init_completed()) {
// Mapping was successful or initialization phase has completed
break;
}
ZErrno err;
log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
err.to_string(), attempt, max_attempts);
// Wait and retry in one second, in the hope that
// huge pages will be available by then.
sleep(1);
}
if (addr == MAP_FAILED) {
// Not enough huge pages left
ZErrno err;
log_error(gc)("Failed to map backing file (%s)", err.to_string());
return false;
}
// Successful mapping, unmap again. From now on the pages we mapped
// will be reserved for this file.
if (munmap(addr, length) == -1) {
ZErrno err;
log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
return false;
}
return true;
}
bool ZBackingFile::expand(size_t offset, size_t length) const {
return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
}

@ -0,0 +1,58 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
#include "memory/allocation.hpp"
class ZBackingFile {
private:
int _fd;
uint64_t _filesystem;
bool _initialized;
int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
int create_fd(const char* name) const;
bool is_tmpfs() const;
bool is_hugetlbfs() const;
bool tmpfs_supports_transparent_huge_pages() const;
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
bool expand_tmpfs(size_t offset, size_t length) const;
bool expand_hugetlbfs(size_t offset, size_t length) const;
public:
ZBackingFile();
bool is_initialized() const;
int fd() const;
bool expand(size_t offset, size_t length) const;
};
#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP

@ -0,0 +1,141 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zBackingPath_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "logging/log.hpp"
#include <stdio.h>
#include <unistd.h>
// Mount information, see proc(5) for more details.
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
if (ZPath != NULL) {
// Use specified path
_path = strdup(ZPath);
} else {
// Find suitable path
_path = find_mountpoint(filesystem, preferred_path);
}
}
ZBackingPath::~ZBackingPath() {
free(_path);
_path = NULL;
}
char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
char* line_mountpoint = NULL;
char* line_filesystem = NULL;
// Parse line and return a newly allocated string containing the mountpoint if
// the line contains a matching filesystem and the mountpoint is accessible by
// the current user.
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
strcmp(line_filesystem, filesystem) != 0 ||
access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
// Not a matching or accessible filesystem
free(line_mountpoint);
line_mountpoint = NULL;
}
free(line_filesystem);
return line_mountpoint;
}
void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
if (fd == NULL) {
ZErrno err;
log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
return;
}
char* line = NULL;
size_t length = 0;
while (getline(&line, &length, fd) != -1) {
char* const mountpoint = get_mountpoint(line, filesystem);
if (mountpoint != NULL) {
mountpoints->add(mountpoint);
}
}
free(line);
fclose(fd);
}
void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
ZArrayIterator<char*> iter(mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
free(mountpoint);
}
mountpoints->clear();
}
char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
char* path = NULL;
ZArray<char*> mountpoints;
get_mountpoints(&mountpoints, filesystem);
if (mountpoints.size() == 0) {
// No filesystem found
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
} else if (mountpoints.size() == 1) {
// One filesystem found
path = strdup(mountpoints.at(0));
} else if (mountpoints.size() > 1) {
// More than one filesystem found
ZArrayIterator<char*> iter(&mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
if (!strcmp(mountpoint, preferred_mountpoint)) {
// Preferred mount point found
path = strdup(mountpoint);
break;
}
}
if (path == NULL) {
// Preferred mount point not found
log_error(gc, init)("More than one %s filesystem found:", filesystem);
ZArrayIterator<char*> iter2(&mountpoints);
for (char* mountpoint; iter2.next(&mountpoint);) {
log_error(gc, init)(" %s", mountpoint);
}
}
}
free_mountpoints(&mountpoints);
return path;
}
const char* ZBackingPath::get() const {
return _path;
}

@ -0,0 +1,46 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
#include "gc/z/zArray.hpp"
#include "memory/allocation.hpp"
class ZBackingPath : public StackObj {
private:
char* _path;
char* get_mountpoint(const char* line, const char* filesystem) const;
void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
void free_mountpoints(ZArray<char*>* mountpoints) const;
char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
public:
ZBackingPath(const char* filesystem, const char* preferred_path);
~ZBackingPath();
const char* get() const;
};
#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP

@ -0,0 +1,33 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
uintptr_t ZAddressReservedStart() {
return ZAddressMetadataMarked0;
}
uintptr_t ZAddressReservedEnd() {
return ZAddressMetadataRemapped + ZAddressOffsetMax;
}

@ -0,0 +1,88 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
//
// Page Allocation Tiers
// ---------------------
//
// Page Type Page Size Object Size Limit Object Alignment
// ------------------------------------------------------------------
// Small 2M <= 265K <MinObjAlignmentInBytes>
// Medium 32M <= 4M 4K
// Large X*M > 4M 2M
// ------------------------------------------------------------------
//
//
// Address Space & Pointer Layout
// ------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000140000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
//
// 6 4 4 4 4 4 0
// 3 7 6 5 2 1 0
// +-------------------+-+----+-----------------------------------------------+
// |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
// +-------------------+-+----+-----------------------------------------------+
// | | | |
// | | | * 41-0 Object Offset (42-bits, 4TB address space)
// | | |
// | | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | | 0010 = Marked1 (Address view 8-12TB)
// | | 0100 = Remapped (Address view 16-20TB)
// | | 1000 = Finalizable (Address view N/A)
// | |
// | * 46-46 Unused (1-bit, always zero)
// |
// * 63-47 Fixed (17-bits, always zero)
//
const size_t ZPlatformPageSizeSmallShift = 21; // 2M
const size_t ZPlatformAddressOffsetBits = 42; // 4TB
const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceStart = (uintptr_t)1 << ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceSize = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
const size_t ZPlatformCacheLineSize = 64;
#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP

@ -0,0 +1,38 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zLargePages.hpp"
#include "runtime/globals.hpp"
void ZLargePages::initialize_platform() {
if (UseLargePages) {
if (UseTransparentHugePages) {
_state = Transparent;
} else {
_state = Explicit;
}
} else {
_state = Disabled;
}
}

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "gc/z/zErrno.hpp"
#include "gc/z/zCPU.hpp"
#include "gc/z/zNUMA.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include <unistd.h>
#include <sys/syscall.h>
#ifndef MPOL_F_NODE
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#endif
#ifndef MPOL_F_ADDR
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#endif
static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
}
void ZNUMA::initialize_platform() {
_enabled = UseNUMA;
}
uint32_t ZNUMA::count() {
if (!_enabled) {
// NUMA support not enabled
return 1;
}
return os::Linux::numa_max_node() + 1;
}
uint32_t ZNUMA::id() {
if (!_enabled) {
// NUMA support not enabled
return 0;
}
return os::Linux::get_node_by_cpu(ZCPU::id());
}
uint32_t ZNUMA::memory_id(uintptr_t addr) {
if (!_enabled) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
uint32_t id = (uint32_t)-1;
if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
ZErrno err;
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
}
assert(id < count(), "Invalid NUMA id");
return id;
}

@ -0,0 +1,237 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMemory.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
#include "logging/log.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <stdio.h>
#include <sys/mman.h>
#include <sys/types.h>
// Support for building on older Linux systems
#ifndef MADV_HUGEPAGE
#define MADV_HUGEPAGE 14
#endif
// Proc file entry for max map mount
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
_manager(),
_file(),
_granule_size(granule_size) {
// Check and warn if max map count seems too low
check_max_map_count(max_capacity, granule_size);
}
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
// Failed to open file, skip check
log_debug(gc)("Failed to open %s", filename);
return;
}
size_t actual_max_map_count = 0;
const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
fclose(file);
if (result != 1) {
// Failed to read file, skip check
log_debug(gc)("Failed to read %s", filename);
return;
}
// The required max map count is impossible to calculate exactly since subsystems
// other than ZGC are also creating memory mappings, and we have no control over that.
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning(gc)("The system limit on number of memory mappings "
"per process might be too low for the given");
log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
"adjust %s to allow for at least", max_capacity / M, filename);
log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
"Continuing execution with the current limit could",
required_max_map_count, actual_max_map_count);
log_warning(gc)("lead to a fatal error down the line, due to failed "
"attempts to map memory.");
}
}
bool ZPhysicalMemoryBacking::is_initialized() const {
return _file.is_initialized();
}
bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
const size_t size = to - from;
// Expand
if (!_file.expand(from, size)) {
return false;
}
// Add expanded space to free list
_manager.free(from, size);
return true;
}
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
assert(is_aligned(size, _granule_size), "Invalid size");
ZPhysicalMemory pmem;
// Allocate segments
for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
const uintptr_t start = _manager.alloc_from_front(_granule_size);
assert(start != UINTPTR_MAX, "Allocation should never fail");
pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
}
return pmem;
}
void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
const size_t nsegments = pmem.nsegments();
// Free segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment segment = pmem.segment(i);
_manager.free(segment.start(), segment.size());
}
}
void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
if (err == ENOMEM) {
fatal("Failed to map memory. Please check the system limit on number of "
"memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
} else {
fatal("Failed to map memory (%s)", err.to_string());
}
}
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
ZErrno err;
log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
}
}
void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
const size_t nsegments = pmem.nsegments();
// Map segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment segment = pmem.segment(i);
const size_t size = segment.size();
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
// Advise on use of transparent huge pages before touching it
if (ZLargePages::is_transparent()) {
advise_view(addr, size);
}
// NUMA interleave memory before touching it
ZNUMA::memory_interleave(addr, size);
if (pretouch) {
pretouch_view(addr, size);
}
addr += size;
}
}
void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
// Note that we must keep the address space reservation intact and just detach
// the backing memory. For this reason we map a new anonymous, non-accessible
// and non-reserved page over the mapping instead of actually unmapping.
const size_t size = pmem.size();
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
}
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
// From an NMT point of view we treat the first heap mapping (marked0) as committed
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
}
}
void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
unmap_view(pmem, ZAddress::good(offset));
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
}
}
void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
assert(ZUnmapBadViews, "Should be enabled");
const uintptr_t addr_good = ZAddress::good(offset);
const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
// Map/Unmap views
map_view(pmem, addr_good, false /* pretouch */);
unmap_view(pmem, addr_bad);
}

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zMemory.hpp"
class ZErrno;
class ZPhysicalMemory;
class ZPhysicalMemoryBacking {
private:
ZMemoryManager _manager;
ZBackingFile _file;
const size_t _granule_size;
void check_max_map_count(size_t max_capacity, size_t granule_size) const;
void map_failed(ZErrno err) const;
void advise_view(uintptr_t addr, size_t size) const;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
public:
ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
bool is_initialized() const;
bool expand(size_t from, size_t to);
ZPhysicalMemory alloc(size_t size);
void free(ZPhysicalMemory pmem);
uintptr_t nmt_address(uintptr_t offset) const;
void map(ZPhysicalMemory pmem, uintptr_t offset) const;
void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
};
#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP

@ -0,0 +1,41 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "logging/log.hpp"
#include <sys/mman.h>
#include <sys/types.h>
bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
// Reserve address space
const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (actual_start != start) {
log_error(gc)("Failed to reserve address space for Java heap");
return false;
}
return true;
}

@ -2282,6 +2282,9 @@ bool OperandForm::is_bound_register() const {
if (strcmp(name, "RegD") == 0) size = 2;
if (strcmp(name, "RegL") == 0) size = 2;
if (strcmp(name, "RegN") == 0) size = 1;
if (strcmp(name, "VecX") == 0) size = 4;
if (strcmp(name, "VecY") == 0) size = 8;
if (strcmp(name, "VecZ") == 0) size = 16;
if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
if (size == 0) {
return false;
@ -3509,6 +3512,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"ClearArray",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
};
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
if( strcmp(_opType,"PrefetchAllocation")==0 )

@ -337,6 +337,7 @@ public:
class CodeBuffer: public StackObj {
friend class CodeSection;
friend class StubCodeGenerator;
private:
// CodeBuffers must be allocated on the stack except for a single

@ -756,6 +756,9 @@ bool vmIntrinsics::is_disabled_by_flags(vmIntrinsics::ID id) {
#endif // COMPILER1
#ifdef COMPILER2
case vmIntrinsics::_clone:
#if INCLUDE_ZGC
if (UseZGC) return true;
#endif
case vmIntrinsics::_copyOf:
case vmIntrinsics::_copyOfRange:
// These intrinsics use both the objectcopy and the arraycopy

@ -1609,6 +1609,7 @@ void CodeCache::print() {
}
void CodeCache::print_summary(outputStream* st, bool detailed) {
int full_count = 0;
FOR_ALL_HEAPS(heap_iterator) {
CodeHeap* heap = (*heap_iterator);
size_t total = (heap->high_boundary() - heap->low_boundary());
@ -1627,6 +1628,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
p2i(heap->low_boundary()),
p2i(heap->high()),
p2i(heap->high_boundary()));
full_count += get_codemem_full_count(heap->code_blob_type());
}
}
@ -1638,6 +1641,10 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
"enabled" : Arguments::mode() == Arguments::_int ?
"disabled (interpreter mode)" :
"disabled (not enough contiguous free space left)");
st->print_cr(" stopped_count=%d, restarted_count=%d",
CompileBroker::get_total_compiler_stopped_count(),
CompileBroker::get_total_compiler_restarted_count());
st->print_cr(" full_count=%d", full_count);
}
}

@ -170,21 +170,23 @@ elapsedTimer CompileBroker::_t_standard_compilation;
elapsedTimer CompileBroker::_t_invalidated_compilation;
elapsedTimer CompileBroker::_t_bailedout_compilation;
int CompileBroker::_total_bailout_count = 0;
int CompileBroker::_total_invalidated_count = 0;
int CompileBroker::_total_compile_count = 0;
int CompileBroker::_total_osr_compile_count = 0;
int CompileBroker::_total_standard_compile_count = 0;
int CompileBroker::_total_bailout_count = 0;
int CompileBroker::_total_invalidated_count = 0;
int CompileBroker::_total_compile_count = 0;
int CompileBroker::_total_osr_compile_count = 0;
int CompileBroker::_total_standard_compile_count = 0;
int CompileBroker::_total_compiler_stopped_count = 0;
int CompileBroker::_total_compiler_restarted_count = 0;
int CompileBroker::_sum_osr_bytes_compiled = 0;
int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0;
int CompileBroker::_sum_osr_bytes_compiled = 0;
int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0;
long CompileBroker::_peak_compilation_time = 0;
long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;

@ -219,6 +219,8 @@ class CompileBroker: AllStatic {
static int _total_native_compile_count;
static int _total_osr_compile_count;
static int _total_standard_compile_count;
static int _total_compiler_stopped_count;
static int _total_compiler_restarted_count;
static int _sum_osr_bytes_compiled;
static int _sum_standard_bytes_compiled;
static int _sum_nmethod_size;
@ -338,7 +340,15 @@ public:
static bool set_should_compile_new_jobs(jint new_state) {
// Return success if the current caller set it
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
return (old == (1-new_state));
bool success = (old == (1-new_state));
if (success) {
if (new_state == run_compilation) {
_total_compiler_restarted_count++;
} else {
_total_compiler_stopped_count++;
}
}
return success;
}
static void disable_compilation_forever() {
@ -393,18 +403,20 @@ public:
static CompileLog* get_log(CompilerThread* ct);
static int get_total_compile_count() { return _total_compile_count; }
static int get_total_bailout_count() { return _total_bailout_count; }
static int get_total_invalidated_count() { return _total_invalidated_count; }
static int get_total_native_compile_count() { return _total_native_compile_count; }
static int get_total_osr_compile_count() { return _total_osr_compile_count; }
static int get_total_standard_compile_count() { return _total_standard_compile_count; }
static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
static int get_sum_nmethod_size() { return _sum_nmethod_size;}
static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
static long get_peak_compilation_time() { return _peak_compilation_time; }
static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
static int get_total_compile_count() { return _total_compile_count; }
static int get_total_bailout_count() { return _total_bailout_count; }
static int get_total_invalidated_count() { return _total_invalidated_count; }
static int get_total_native_compile_count() { return _total_native_compile_count; }
static int get_total_osr_compile_count() { return _total_osr_compile_count; }
static int get_total_standard_compile_count() { return _total_standard_compile_count; }
static int get_total_compiler_stopped_count() { return _total_compiler_stopped_count; }
static int get_total_compiler_restarted_count() { return _total_compiler_restarted_count; }
static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
static int get_sum_nmethod_size() { return _sum_nmethod_size;}
static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
static long get_peak_compilation_time() { return _peak_compilation_time; }
static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
// Log that compilation profiling is skipped because metaspace is full.
static void log_metaspace_failure();

@ -66,7 +66,8 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
#else
#define compilerdirectives_c2_flags(cflags)
#endif

@ -155,6 +155,7 @@ class decode_env {
CodeStrings _strings;
outputStream* _output;
address _start, _end;
ptrdiff_t _offset;
char _option_buf[512];
char _print_raw;
@ -191,7 +192,8 @@ class decode_env {
void print_address(address value);
public:
decode_env(CodeBlob* code, outputStream* output, CodeStrings c = CodeStrings());
decode_env(CodeBlob* code, outputStream* output,
CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
address decode_instructions(address start, address end);
@ -221,13 +223,15 @@ class decode_env {
const char* options() { return _option_buf; }
};
decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c,
ptrdiff_t offset) {
memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
_output = output ? output : tty;
_code = code;
if (code != NULL && code->is_nmethod())
_nm = (nmethod*) code;
_strings.copy(c);
_offset = offset;
// by default, output pc but not bytes:
_print_pc = true;
@ -354,7 +358,7 @@ void decode_env::print_insn_labels() {
if (cb != NULL) {
cb->print_block_comment(st, p);
}
_strings.print_block_comment(st, (intptr_t)(p - _start));
_strings.print_block_comment(st, (intptr_t)(p - _start + _offset));
if (_print_pc) {
st->print(" " PTR_FORMAT ": ", p2i(p));
}
@ -507,10 +511,11 @@ void Disassembler::decode(CodeBlob* cb, outputStream* st) {
env.decode_instructions(cb->code_begin(), cb->code_end());
}
void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c) {
void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c,
ptrdiff_t offset) {
ttyLocker ttyl;
if (!load_library()) return;
decode_env env(CodeCache::find_blob_unsafe(start), st, c);
decode_env env(CodeCache::find_blob_unsafe(start), st, c, offset);
env.decode_instructions(start, end);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,8 @@ class Disassembler {
}
static void decode(CodeBlob *cb, outputStream* st = NULL);
static void decode(nmethod* nm, outputStream* st = NULL);
static void decode(address begin, address end, outputStream* st = NULL, CodeStrings c = CodeStrings());
static void decode(address begin, address end, outputStream* st = NULL,
CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
};
#endif // SHARE_VM_COMPILER_DISASSEMBLER_HPP

@ -380,8 +380,12 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
continue;
}
#ifdef ASSERT
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
!Universe::heap()->is_in_or_null(*loc)) {
// We can not verify the oop here if we are using ZGC, the oop
// will be bad in case we had a safepoint between a load and a
// load barrier.
if (!UseZGC &&
((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
!Universe::heap()->is_in_or_null(*loc))) {
tty->print_cr("# Found non oop pointer. Dumping state at failure");
// try to dump out some helpful debugging information
trace_codeblob_maps(fr, reg_map);

@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/epsilon/epsilonArguments.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonCollectorPolicy.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
size_t EpsilonArguments::conservative_max_heap_alignment() {
return UseLargePages ? os::large_page_size() : os::vm_page_size();
}
void EpsilonArguments::initialize() {
GCArguments::initialize();
assert(UseEpsilonGC, "Sanity");
// Forcefully exit when OOME is detected. Nothing we can do at that point.
if (FLAG_IS_DEFAULT(ExitOnOutOfMemoryError)) {
FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true);
}
if (EpsilonMaxTLABSize < MinTLABSize) {
warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
EpsilonMaxTLABSize = MinTLABSize;
}
if (!EpsilonElasticTLAB && EpsilonElasticTLABDecay) {
warning("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled");
FLAG_SET_DEFAULT(EpsilonElasticTLABDecay, false);
}
#ifdef COMPILER2
// Enable loop strip mining: there are still non-GC safepoints, no need to make it worse
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
}
}
#endif
}
CollectedHeap* EpsilonArguments::create_heap() {
return create_heap_with_policy<EpsilonHeap, EpsilonCollectorPolicy>();
}

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
#define SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class EpsilonArguments : public GCArguments {
public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
#endif // SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP

@ -0,0 +1,51 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/thread.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "gc/shared/c1/barrierSetC1.hpp"
#endif
#ifdef COMPILER2
#include "gc/shared/c2/barrierSetC2.hpp"
#endif
EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
make_barrier_set_assembler<BarrierSetAssembler>(),
make_barrier_set_c1<BarrierSetC1>(),
make_barrier_set_c2<BarrierSetC2>(),
BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {};
void EpsilonBarrierSet::on_thread_create(Thread *thread) {
EpsilonThreadLocalData::create(thread);
}
void EpsilonBarrierSet::on_thread_destroy(Thread *thread) {
EpsilonThreadLocalData::destroy(thread);
}

@ -0,0 +1,57 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_BARRIERSET_HPP
#define SHARE_VM_GC_EPSILON_BARRIERSET_HPP
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSet.hpp"
// No interaction with application is required for Epsilon, and therefore
// the barrier set is empty.
class EpsilonBarrierSet: public BarrierSet {
friend class VMStructs;
public:
EpsilonBarrierSet();
virtual void print_on(outputStream *st) const {}
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);
template <DecoratorSet decorators, typename BarrierSetT = EpsilonBarrierSet>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {};
};
template<>
struct BarrierSet::GetName<EpsilonBarrierSet> {
static const BarrierSet::Name value = BarrierSet::EpsilonBarrierSet;
};
template<>
struct BarrierSet::GetType<BarrierSet::EpsilonBarrierSet> {
typedef ::EpsilonBarrierSet type;
};
#endif // SHARE_VM_GC_EPSILON_BARRIERSET_HPP

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
#define SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
#include "gc/shared/collectorPolicy.hpp"
class EpsilonCollectorPolicy: public CollectorPolicy {
protected:
virtual void initialize_alignments() {
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
_space_alignment = align;
_heap_alignment = align;
}
public:
EpsilonCollectorPolicy() : CollectorPolicy() {};
};
#endif // SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP

@ -0,0 +1,285 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
jint EpsilonHeap::initialize() {
size_t align = _policy->heap_alignment();
size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
// Initialize backing storage
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
_virtual_space.initialize(heap_rs, init_byte_size);
MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
initialize_reserved_region(reserved_region.start(), reserved_region.end());
_space = new ContiguousSpace();
_space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
// Precompute hot fields
_max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
_step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
_step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
_decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
// Enable monitoring
_monitoring_support = new EpsilonMonitoringSupport(this);
_last_counter_update = 0;
_last_heap_print = 0;
// Install barrier set
BarrierSet::set_barrier_set(new EpsilonBarrierSet());
// All done, print out the configuration
if (init_byte_size != max_byte_size) {
log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
} else {
log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
}
if (UseTLAB) {
log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
if (EpsilonElasticTLAB) {
log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
}
if (EpsilonElasticTLABDecay) {
log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
}
} else {
log_info(gc)("Not using TLAB allocation");
}
return JNI_OK;
}
void EpsilonHeap::post_initialize() {
CollectedHeap::post_initialize();
}
void EpsilonHeap::initialize_serviceability() {
_pool = new EpsilonMemoryPool(this);
_memory_manager.add_pool(_pool);
}
GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
GrowableArray<GCMemoryManager*> memory_managers(1);
memory_managers.append(&_memory_manager);
return memory_managers;
}
GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
GrowableArray<MemoryPool*> memory_pools(1);
memory_pools.append(_pool);
return memory_pools;
}
size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
// Return max allocatable TLAB size, and let allocation path figure out
// the actual TLAB allocation size.
return _max_tlab_size;
}
EpsilonHeap* EpsilonHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
return (EpsilonHeap*)heap;
}
HeapWord* EpsilonHeap::allocate_work(size_t size) {
HeapWord* res = _space->par_allocate(size);
while (res == NULL) {
// Allocation failed, attempt expansion, and retry:
MutexLockerEx ml(Heap_lock);
size_t space_left = max_capacity() - capacity();
size_t want_space = MAX2(size, EpsilonMinHeapExpand);
if (want_space < space_left) {
// Enough space to expand in bulk:
bool expand = _virtual_space.expand_by(want_space);
assert(expand, "Should be able to expand");
} else if (size < space_left) {
// No space to expand in bulk, and this allocation is still possible,
// take all the remaining space:
bool expand = _virtual_space.expand_by(space_left);
assert(expand, "Should be able to expand");
} else {
// No space left:
return NULL;
}
_space->set_end((HeapWord *) _virtual_space.high());
res = _space->par_allocate(size);
}
size_t used = _space->used();
// Allocation successful, update counters
{
size_t last = _last_counter_update;
if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
_monitoring_support->update_counters();
}
}
// ...and print the occupancy line, if needed
{
size_t last = _last_heap_print;
if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
max_capacity() / M,
capacity() / M,
capacity() * 100.0 / max_capacity(),
used / M,
used * 100.0 / max_capacity());
}
}
return res;
}
HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size) {
Thread* thread = Thread::current();
// Defaults in case elastic paths are not taken
bool fits = true;
size_t size = requested_size;
size_t ergo_tlab = requested_size;
int64_t time = 0;
if (EpsilonElasticTLAB) {
ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
if (EpsilonElasticTLABDecay) {
int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
time = (int64_t) os::javaTimeNanos();
assert(last_time <= time, "time should be monotonic");
// If the thread had not allocated recently, retract the ergonomic size.
// This conserves memory when the thread had initial burst of allocations,
// and then started allocating only sporadically.
if (last_time != 0 && (time - last_time > _decay_time_ns)) {
ergo_tlab = 0;
EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
}
}
// If we can fit the allocation under current TLAB size, do so.
// Otherwise, we want to elastically increase the TLAB size.
fits = (requested_size <= ergo_tlab);
if (!fits) {
size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
}
}
// Always honor boundaries
size = MAX2(min_size, MIN2(_max_tlab_size, size));
if (log_is_enabled(Trace, gc)) {
ResourceMark rm;
log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
"K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
thread->name(),
requested_size * HeapWordSize / K,
min_size * HeapWordSize / K,
_max_tlab_size * HeapWordSize / K,
ergo_tlab * HeapWordSize / K,
size * HeapWordSize / K);
}
// All prepared, let's do it!
HeapWord* res = allocate_work(size);
if (res != NULL) {
// Allocation successful
*actual_size = size;
if (EpsilonElasticTLABDecay) {
EpsilonThreadLocalData::set_last_tlab_time(thread, time);
}
if (EpsilonElasticTLAB && !fits) {
// If we requested expansion, this is our new ergonomic TLAB size
EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
}
} else {
// Allocation failed, reset ergonomics to try and fit smaller TLABs
if (EpsilonElasticTLAB) {
EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
}
}
return res;
}
HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
*gc_overhead_limit_was_exceeded = false;
return allocate_work(size);
}
void EpsilonHeap::collect(GCCause::Cause cause) {
log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
_monitoring_support->update_counters();
}
void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
_monitoring_support->update_counters();
}
void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
_space->safe_object_iterate(cl);
}
void EpsilonHeap::print_on(outputStream *st) const {
st->print_cr("Epsilon Heap");
// Cast away constness:
((VirtualSpace)_virtual_space).print_on(st);
st->print_cr("Allocation space:");
_space->print_on(st);
}
void EpsilonHeap::print_tracing_info() const {
Log(gc) log;
size_t allocated_kb = used() / K;
log.info("Total allocated: " SIZE_FORMAT " KB",
allocated_kb);
log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
(size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));
}

@ -0,0 +1,147 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
#define SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/space.hpp"
#include "services/memoryManager.hpp"
#include "gc/epsilon/epsilonCollectorPolicy.hpp"
#include "gc/epsilon/epsilonMonitoringSupport.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilon_globals.hpp"
class EpsilonHeap : public CollectedHeap {
friend class VMStructs;
private:
EpsilonCollectorPolicy* _policy;
SoftRefPolicy _soft_ref_policy;
EpsilonMonitoringSupport* _monitoring_support;
MemoryPool* _pool;
GCMemoryManager _memory_manager;
ContiguousSpace* _space;
VirtualSpace _virtual_space;
size_t _max_tlab_size;
size_t _step_counter_update;
size_t _step_heap_print;
int64_t _decay_time_ns;
volatile size_t _last_counter_update;
volatile size_t _last_heap_print;
public:
static EpsilonHeap* heap();
EpsilonHeap(EpsilonCollectorPolicy* p) :
_policy(p),
_memory_manager("Epsilon Heap", "") {};
virtual Name kind() const {
return CollectedHeap::Epsilon;
}
virtual const char* name() const {
return "Epsilon";
}
virtual CollectorPolicy* collector_policy() const {
return _policy;
}
virtual SoftRefPolicy* soft_ref_policy() {
return &_soft_ref_policy;
}
virtual jint initialize();
virtual void post_initialize();
virtual void initialize_serviceability();
virtual GrowableArray<GCMemoryManager*> memory_managers();
virtual GrowableArray<MemoryPool*> memory_pools();
virtual size_t max_capacity() const { return _virtual_space.reserved_size(); }
virtual size_t capacity() const { return _virtual_space.committed_size(); }
virtual size_t used() const { return _space->used(); }
virtual bool is_in(const void* p) const {
return _space->is_in(p);
}
virtual bool is_scavengable(oop obj) {
// No GC is going to happen, therefore no objects ever move.
return false;
}
virtual bool is_maximal_no_gc() const {
// No GC is going to happen. Return "we are at max", when we are about to fail.
return used() == capacity();
}
// Allocation
HeapWord* allocate_work(size_t size);
virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
virtual HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size);
// TLAB allocation
virtual bool supports_tlab_allocation() const { return true; }
virtual size_t tlab_capacity(Thread* thr) const { return capacity(); }
virtual size_t tlab_used(Thread* thr) const { return used(); }
virtual size_t max_tlab_size() const { return _max_tlab_size; }
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
virtual void collect(GCCause::Cause cause);
virtual void do_full_collection(bool clear_all_soft_refs);
// Heap walking support
virtual void safe_object_iterate(ObjectClosure* cl);
virtual void object_iterate(ObjectClosure* cl) {
safe_object_iterate(cl);
}
// No support for block parsing.
virtual HeapWord* block_start(const void* addr) const { return NULL; }
virtual size_t block_size(const HeapWord* addr) const { return 0; }
virtual bool block_is_obj(const HeapWord* addr) const { return false; }
// No GC threads
virtual void print_gc_threads_on(outputStream* st) const {}
virtual void gc_threads_do(ThreadClosure* tc) const {}
// No heap verification
virtual void prepare_for_verify() {}
virtual void verify(VerifyOption option) {}
virtual jlong millis_since_last_gc() {
// Report time since the VM start
return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
}
virtual void print_on(outputStream* st) const;
virtual void print_tracing_info() const;
};
#endif // SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP

@ -0,0 +1,45 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
EpsilonMemoryPool::EpsilonMemoryPool(EpsilonHeap* heap) :
_heap(heap),
CollectedMemoryPool("Epsilon Heap",
heap->capacity(),
heap->max_capacity(),
false) {
assert(UseEpsilonGC, "sanity");
}
MemoryUsage EpsilonMemoryPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = committed_in_bytes();
return MemoryUsage(initial_sz, used, committed, max_sz);
}

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
#define SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
#include "gc/epsilon/epsilonHeap.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryUsage.hpp"
#include "utilities/macros.hpp"
class EpsilonMemoryPool : public CollectedMemoryPool {
private:
EpsilonHeap* _heap;
public:
EpsilonMemoryPool(EpsilonHeap* heap);
size_t committed_in_bytes() { return _heap->capacity(); }
size_t used_in_bytes() { return _heap->used(); }
size_t max_size() const { return _heap->max_capacity(); }
MemoryUsage get_memory_usage();
};
#endif // SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP

@ -0,0 +1,118 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/epsilon/epsilonMonitoringSupport.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/shared/generationCounters.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceCounters.hpp"
#include "memory/resourceArea.hpp"
#include "services/memoryService.hpp"
class EpsilonSpaceCounters: public CHeapObj<mtGC> {
friend class VMStructs;
private:
PerfVariable* _capacity;
PerfVariable* _used;
char* _name_space;
public:
EpsilonSpaceCounters(const char* name,
int ordinal,
size_t max_size,
size_t initial_capacity,
GenerationCounters* gc) {
if (UsePerfData) {
EXCEPTION_MARK;
ResourceMark rm;
const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal);
_name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
strcpy(_name_space, cns);
const char* cname = PerfDataManager::counter_name(_name_space, "name");
PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, (jlong)max_size, CHECK);
cname = PerfDataManager::counter_name(_name_space, "capacity");
_capacity = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "used");
_used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, (jlong) 0, CHECK);
cname = PerfDataManager::counter_name(_name_space, "initCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
}
}
~EpsilonSpaceCounters() {
if (_name_space != NULL) {
FREE_C_HEAP_ARRAY(char, _name_space);
}
}
inline void update_all(size_t capacity, size_t used) {
_capacity->set_value(capacity);
_used->set_value(used);
}
};
class EpsilonGenerationCounters : public GenerationCounters {
private:
EpsilonHeap* _heap;
public:
EpsilonGenerationCounters(EpsilonHeap* heap) :
GenerationCounters("Heap", 1, 1, 0, heap->max_capacity(), heap->capacity()),
_heap(heap)
{};
virtual void update_all() {
_current_size->set_value(_heap->capacity());
}
};
EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
_heap_counters = new EpsilonGenerationCounters(heap);
_space_counters = new EpsilonSpaceCounters("Heap", 0, heap->max_capacity(), 0, _heap_counters);
}
void EpsilonMonitoringSupport::update_counters() {
MemoryService::track_memory_usage();
if (UsePerfData) {
EpsilonHeap* heap = EpsilonHeap::heap();
size_t used = heap->used();
size_t capacity = heap->capacity();
_heap_counters->update_all();
_space_counters->update_all(capacity, used);
MetaspaceCounters::update_performance_counters();
CompressedClassSpaceCounters::update_performance_counters();
}
}

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
#define SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
#include "memory/allocation.hpp"
class GenerationCounters;
class EpsilonSpaceCounters;
class EpsilonHeap;
class EpsilonMonitoringSupport : public CHeapObj<mtGC> {
private:
GenerationCounters* _heap_counters;
EpsilonSpaceCounters* _space_counters;
public:
EpsilonMonitoringSupport(EpsilonHeap* heap);
void update_counters();
};
#endif // SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP

@ -0,0 +1,70 @@
/*
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
#define SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
class EpsilonThreadLocalData {
private:
size_t _ergo_tlab_size;
int64_t _last_tlab_time;
EpsilonThreadLocalData() :
_ergo_tlab_size(0),
_last_tlab_time(0) {}
static EpsilonThreadLocalData* data(Thread* thread) {
assert(UseEpsilonGC, "Sanity");
return thread->gc_data<EpsilonThreadLocalData>();
}
public:
static void create(Thread* thread) {
new (data(thread)) EpsilonThreadLocalData();
}
static void destroy(Thread* thread) {
data(thread)->~EpsilonThreadLocalData();
}
static size_t ergo_tlab_size(Thread *thread) {
return data(thread)->_ergo_tlab_size;
}
static int64_t last_tlab_time(Thread *thread) {
return data(thread)->_last_tlab_time;
}
static void set_ergo_tlab_size(Thread *thread, size_t val) {
data(thread)->_ergo_tlab_size = val;
}
static void set_last_tlab_time(Thread *thread, int64_t time) {
data(thread)->_last_tlab_time = time;
}
};
#endif // SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP

@ -0,0 +1,96 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_EPSILON_GLOBALS_HPP
#define SHARE_VM_GC_EPSILON_GLOBALS_HPP
#include "runtime/globals.hpp"
//
// Defines all globals flags used by the Epsilon GC.
//
#define GC_EPSILON_FLAGS(develop, \
develop_pd, \
product, \
product_pd, \
diagnostic, \
diagnostic_pd, \
experimental, \
notproduct, \
manageable, \
product_rw, \
lp64_product, \
range, \
constraint, \
writeable) \
\
experimental(size_t, EpsilonPrintHeapSteps, 20, \
"Print heap occupancy stats with this number of steps. " \
"0 turns the printing off.") \
range(0, max_intx) \
\
experimental(size_t, EpsilonUpdateCountersStep, 1 * M, \
"Update heap occupancy counters after allocating this much " \
"memory. Higher values would make allocations faster at " \
"the expense of lower resolution in heap counters.") \
range(1, max_intx) \
\
experimental(size_t, EpsilonMaxTLABSize, 4 * M, \
"Max TLAB size to use with Epsilon GC. Larger value improves " \
"performance at the expense of per-thread memory waste. This " \
"asks TLAB machinery to cap TLAB sizes at this value.") \
range(1, max_intx) \
\
experimental(bool, EpsilonElasticTLAB, true, \
"Use elastic policy to manage TLAB sizes. This conserves memory " \
"for non-actively allocating threads, even when they request " \
"large TLABs for themselves. Active threads would experience " \
"smaller TLABs until policy catches up.") \
\
experimental(bool, EpsilonElasticTLABDecay, true, \
"Use timed decays to shrik TLAB sizes. This conserves memory " \
"for the threads that allocate in bursts of different sizes, " \
"for example the small/rare allocations coming after the initial "\
"large burst.") \
\
experimental(double, EpsilonTLABElasticity, 1.1, \
"Multiplier to use when deciding on next TLAB size. Larger value "\
"improves performance at the expense of per-thread memory waste. "\
"Lower value improves memory footprint, but penalizes actively " \
"allocating threads.") \
range(1, max_intx) \
\
experimental(size_t, EpsilonTLABDecayTime, 1000, \
"TLAB sizing policy decays to initial size after thread had not " \
"allocated for this long. Time is in milliseconds. Lower value " \
"improves memory footprint, but penalizes actively allocating " \
"threads.") \
range(1, max_intx) \
\
experimental(size_t, EpsilonMinHeapExpand, 128 * M, \
"Min expansion step for heap. Larger value improves performance " \
"at the potential expense of memory waste.") \
range(1, max_intx)
#endif // SHARE_VM_GC_EPSILON_GLOBALS_HPP

@ -0,0 +1,46 @@
/*
* Copyright (c) 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_EPSILON_VMSTRUCTS_HPP
#define SHARE_GC_EPSILON_VMSTRUCTS_HPP
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/shared/space.hpp"
#include "memory/virtualspace.hpp"
#define VM_STRUCTS_EPSILONGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field) \
nonstatic_field(EpsilonHeap, _virtual_space, VirtualSpace) \
nonstatic_field(EpsilonHeap, _space, ContiguousSpace*)
#define VM_TYPES_EPSILONGC(declare_type, \
declare_toplevel_type, \
declare_integer_type) \
declare_type(EpsilonHeap, CollectedHeap)
#define VM_INT_CONSTANTS_EPSILONGC(declare_constant, \
declare_constant_with_value)
#endif // SHARE_GC_EPSILON_VMSTRUCTS_HPP

@ -149,19 +149,16 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
void RefProcTaskExecutor::execute(ProcessTask& task)
{
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
OopTaskQueueSet* qset = ParCompactionManager::stack_array();
ParallelTaskTerminator terminator(active_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<parallel_gc_threads; i++) {
for(uint i=0; i<active_gc_threads; i++) {
q->enqueue(new RefProcTaskProxy(task, i));
}
if (task.marks_oops_alive()) {
if (parallel_gc_threads>1) {
for (uint j=0; j<active_gc_threads; j++) {
q->enqueue(new StealMarkingTask(&terminator));
}
if (task.marks_oops_alive() && (active_gc_threads>1)) {
for (uint j=0; j<active_gc_threads; j++) {
q->enqueue(new StealMarkingTask(&terminator));
}
}
PSParallelCompact::gc_task_manager()->execute_and_wait(q);

@ -2112,7 +2112,10 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ReferenceProcessorStats stats;
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
if (ref_processor()->processing_is_mt()) {
ref_processor()->set_active_mt_degree(active_gc_threads);
RefProcTaskExecutor task_executor;
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,

@ -103,17 +103,17 @@ protected:
~BarrierSet() { }
template <class BarrierSetAssemblerT>
BarrierSetAssembler* make_barrier_set_assembler() {
static BarrierSetAssembler* make_barrier_set_assembler() {
return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
}
template <class BarrierSetC1T>
BarrierSetC1* make_barrier_set_c1() {
static BarrierSetC1* make_barrier_set_c1() {
return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
}
template <class BarrierSetC2T>
BarrierSetC2* make_barrier_set_c2() {
static BarrierSetC2* make_barrier_set_c2() {
return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
}

@ -30,7 +30,9 @@
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
f(CardTableBarrierSet) \
G1GC_ONLY(f(G1BarrierSet))
EPSILONGC_ONLY(f(EpsilonBarrierSet)) \
G1GC_ONLY(f(G1BarrierSet)) \
ZGC_ONLY(f(ZBarrierSet))
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
f(ModRef)

@ -30,8 +30,14 @@
#include "gc/shared/modRefBarrierSet.inline.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilonBarrierSet.hpp"
#endif
#if INCLUDE_G1GC
#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
#include "gc/g1/g1BarrierSet.inline.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zBarrierSet.inline.hpp"
#endif
#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP

@ -89,6 +89,7 @@ class GCHeapLog : public EventLogBase<GCMessage> {
// CMSHeap
// G1CollectedHeap
// ParallelScavengeHeap
// ZCollectedHeap
//
class CollectedHeap : public CHeapObj<mtInternal> {
friend class VMStructs;
@ -206,7 +207,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
Serial,
Parallel,
CMS,
G1
G1,
Epsilon,
Z
};
static inline size_t filler_array_max_size() {

@ -105,6 +105,21 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _dcmd_gc_run:
return "Diagnostic Command";
case _z_timer:
return "Timer";
case _z_warmup:
return "Warmup";
case _z_allocation_rate:
return "Allocation Rate";
case _z_allocation_stall:
return "Allocation Stall";
case _z_proactive:
return "Proactive";
case _last_gc_cause:
return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";

@ -78,6 +78,12 @@ class GCCause : public AllStatic {
_dcmd_gc_run,
_z_timer,
_z_warmup,
_z_allocation_rate,
_z_allocation_stall,
_z_proactive,
_last_gc_cause
};

@ -31,6 +31,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/cmsArguments.hpp"
#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilonArguments.hpp"
#endif
#if INCLUDE_G1GC
#include "gc/g1/g1Arguments.hpp"
#endif
@ -40,6 +43,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serialArguments.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zArguments.hpp"
#endif
struct SupportedGC {
bool& _flag;
@ -52,18 +58,22 @@ struct SupportedGC {
};
CMSGC_ONLY(static CMSArguments cmsArguments;)
EPSILONGC_ONLY(static EpsilonArguments epsilonArguments;)
G1GC_ONLY(static G1Arguments g1Arguments;)
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
SERIALGC_ONLY(static SerialArguments serialArguments;)
ZGC_ONLY(static ZArguments zArguments;)
// Table of supported GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.
static const SupportedGC SupportedGCs[] = {
CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc"))
EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC, CollectedHeap::Epsilon, epsilonArguments, "epsilon gc"))
G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc"))
ZGC_ONLY_ARG(SupportedGC(UseZGC, CollectedHeap::Z, zArguments, "z gc"))
};
#define FOR_EACH_SUPPORTED_GC(var) \
@ -88,10 +98,12 @@ void GCConfig::select_gc_ergonomically() {
}
NOT_CMSGC( UNSUPPORTED_OPTION(UseConcMarkSweepGC));
NOT_EPSILONGC( UNSUPPORTED_OPTION(UseEpsilonGC);)
NOT_G1GC( UNSUPPORTED_OPTION(UseG1GC);)
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
NOT_SERIALGC( UNSUPPORTED_OPTION(UseSerialGC);)
NOT_ZGC( UNSUPPORTED_OPTION(UseZGC);)
}
bool GCConfig::is_no_gc_selected() {

@ -43,6 +43,10 @@ GCName GCConfiguration::young_collector() const {
return ParNew;
}
if (UseZGC) {
return NA;
}
return DefNew;
}
@ -59,6 +63,10 @@ GCName GCConfiguration::old_collector() const {
return ParallelOld;
}
if (UseZGC) {
return Z;
}
return SerialOld;
}

@ -38,6 +38,8 @@ enum GCName {
ConcurrentMarkSweep,
G1Old,
G1Full,
Z,
NA,
GCNameEndSentinel
};
@ -55,6 +57,8 @@ class GCNameHelper {
case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
case G1Old: return "G1Old";
case G1Full: return "G1Full";
case Z: return "Z";
case NA: return "N/A";
default: ShouldNotReachHere(); return NULL;
}
}

@ -40,6 +40,6 @@
// should consider placing frequently accessed fields first in
// T, so that field offsets relative to Thread are small, which
// often allows for a more compact instruction encoding.
typedef uint64_t GCThreadLocalData[14]; // 112 bytes
typedef uint64_t GCThreadLocalData[18]; // 144 bytes
#endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP

@ -29,6 +29,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/cms_globals.hpp"
#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilon_globals.hpp"
#endif
#if INCLUDE_G1GC
#include "gc/g1/g1_globals.hpp"
#endif
@ -38,6 +41,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_globals.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/z_globals.hpp"
#endif
#define GC_FLAGS(develop, \
develop_pd, \
@ -70,6 +76,22 @@
constraint, \
writeable)) \
\
EPSILONGC_ONLY(GC_EPSILON_FLAGS( \
develop, \
develop_pd, \
product, \
product_pd, \
diagnostic, \
diagnostic_pd, \
experimental, \
notproduct, \
manageable, \
product_rw, \
lp64_product, \
range, \
constraint, \
writeable)) \
\
G1GC_ONLY(GC_G1_FLAGS( \
develop, \
develop_pd, \
@ -118,6 +140,22 @@
constraint, \
writeable)) \
\
ZGC_ONLY(GC_Z_FLAGS( \
develop, \
develop_pd, \
product, \
product_pd, \
diagnostic, \
diagnostic_pd, \
experimental, \
notproduct, \
manageable, \
product_rw, \
lp64_product, \
range, \
constraint, \
writeable)) \
\
/* gc */ \
\
product(bool, UseConcMarkSweepGC, false, \
@ -135,6 +173,12 @@
product(bool, UseParallelOldGC, false, \
"Use the Parallel Old garbage collector") \
\
experimental(bool, UseEpsilonGC, false, \
"Use the Epsilon (no-op) garbage collector") \
\
experimental(bool, UseZGC, false, \
"Use the Z garbage collector") \
\
product(uint, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \

@ -52,9 +52,7 @@ OopStorage::AllocateEntry::~AllocateEntry() {
assert(_next == NULL, "deleting attached block");
}
OopStorage::AllocateList::AllocateList(const AllocateEntry& (*get_entry)(const Block& block)) :
_head(NULL), _tail(NULL), _get_entry(get_entry)
{}
OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
OopStorage::AllocateList::~AllocateList() {
// ~OopStorage() empties its lists before destroying them.
@ -68,8 +66,8 @@ void OopStorage::AllocateList::push_front(const Block& block) {
assert(_tail == NULL, "invariant");
_head = _tail = &block;
} else {
_get_entry(block)._next = old;
_get_entry(*old)._prev = &block;
block.allocate_entry()._next = old;
old->allocate_entry()._prev = &block;
_head = &block;
}
}
@ -80,14 +78,14 @@ void OopStorage::AllocateList::push_back(const Block& block) {
assert(_head == NULL, "invariant");
_head = _tail = &block;
} else {
_get_entry(*old)._next = &block;
_get_entry(block)._prev = old;
old->allocate_entry()._next = &block;
block.allocate_entry()._prev = old;
_tail = &block;
}
}
void OopStorage::AllocateList::unlink(const Block& block) {
const AllocateEntry& block_entry = _get_entry(block);
const AllocateEntry& block_entry = block.allocate_entry();
const Block* prev_blk = block_entry._prev;
const Block* next_blk = block_entry._next;
block_entry._prev = NULL;
@ -98,15 +96,15 @@ void OopStorage::AllocateList::unlink(const Block& block) {
_head = _tail = NULL;
} else if (prev_blk == NULL) {
assert(_head == &block, "invariant");
_get_entry(*next_blk)._prev = NULL;
next_blk->allocate_entry()._prev = NULL;
_head = next_blk;
} else if (next_blk == NULL) {
assert(_tail == &block, "invariant");
_get_entry(*prev_blk)._next = NULL;
prev_blk->allocate_entry()._next = NULL;
_tail = prev_blk;
} else {
_get_entry(*next_blk)._prev = prev_blk;
_get_entry(*prev_blk)._next = next_blk;
next_blk->allocate_entry()._prev = prev_blk;
prev_blk->allocate_entry()._next = next_blk;
}
}
@ -232,10 +230,6 @@ OopStorage::Block::~Block() {
const_cast<OopStorage* volatile&>(_owner) = NULL;
}
const OopStorage::AllocateEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
return block._allocate_entry;
}
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
STATIC_ASSERT(_data_pos == 0);
@ -769,7 +763,7 @@ OopStorage::OopStorage(const char* name,
Mutex* active_mutex) :
_name(dup_name(name)),
_active_array(ActiveArray::create(initial_active_array_size)),
_allocate_list(&Block::get_allocate_entry),
_allocate_list(),
_deferred_updates(NULL),
_allocate_mutex(allocate_mutex),
_active_mutex(active_mutex),

@ -178,14 +178,13 @@ NOT_AIX( private: )
class AllocateList {
const Block* _head;
const Block* _tail;
const AllocateEntry& (*_get_entry)(const Block& block);
// Noncopyable.
AllocateList(const AllocateList&);
AllocateList& operator=(const AllocateList&);
public:
AllocateList(const AllocateEntry& (*get_entry)(const Block& block));
AllocateList();
~AllocateList();
Block* head();

@ -158,7 +158,7 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
Block& operator=(const Block&);
public:
static const AllocateEntry& get_allocate_entry(const Block& block);
const AllocateEntry& allocate_entry() const;
static size_t allocation_size();
static size_t allocation_alignment_shift();
@ -214,19 +214,19 @@ inline const OopStorage::Block* OopStorage::AllocateList::ctail() const {
}
inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
return const_cast<Block*>(_get_entry(block)._prev);
return const_cast<Block*>(block.allocate_entry()._prev);
}
inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
return const_cast<Block*>(_get_entry(block)._next);
return const_cast<Block*>(block.allocate_entry()._next);
}
inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
return _get_entry(block)._prev;
return block.allocate_entry()._prev;
}
inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
return _get_entry(block)._next;
return block.allocate_entry()._next;
}
template<typename Closure>
@ -296,7 +296,11 @@ inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
return SkipNullFn<F>(f);
}
// Inline Block accesses for use in iteration inner loop.
// Inline Block accesses for use in iteration loops.
inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
return _allocate_entry;
}
inline void OopStorage::Block::check_index(unsigned index) const {
assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);

@ -35,6 +35,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_specialized_oop_closures.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/z_specialized_oop_closures.hpp"
#endif
// The following OopClosure types get specialized versions of
// "oop_oop_iterate" that invoke the closures' do_oop methods
@ -67,7 +70,8 @@ class OopsInGenClosure;
SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)) \
CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)) \
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)) \
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f)) \
ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
// We separate these out, because sometime the general one has
// a different definition from the specialized ones, and sometimes it

@ -37,6 +37,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/vmStructs_cms.hpp"
#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/vmStructs_epsilon.hpp"
#endif
#if INCLUDE_G1GC
#include "gc/g1/vmStructs_g1.hpp"
#endif
@ -47,6 +50,9 @@
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/vmStructs_serial.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/vmStructs_z.hpp"
#endif
#define VM_STRUCTS_GC(nonstatic_field, \
volatile_nonstatic_field, \
@ -55,6 +61,9 @@
CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
EPSILONGC_ONLY(VM_STRUCTS_EPSILONGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
G1GC_ONLY(VM_STRUCTS_G1GC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
@ -64,6 +73,10 @@
SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
\
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
/**********************************************************************************/ \
@ -153,6 +166,9 @@
CMSGC_ONLY(VM_TYPES_CMSGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
EPSILONGC_ONLY(VM_TYPES_EPSILONGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
G1GC_ONLY(VM_TYPES_G1GC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
@ -162,6 +178,10 @@
SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
ZGC_ONLY(VM_TYPES_ZGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
\
/******************************************/ \
/* Generation and space hierarchies */ \
/* (needed for run-time type information) */ \
@ -225,12 +245,16 @@
declare_constant_with_value) \
CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant, \
declare_constant_with_value)) \
EPSILONGC_ONLY(VM_INT_CONSTANTS_EPSILONGC(declare_constant, \
declare_constant_with_value)) \
G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant, \
declare_constant_with_value)) \
PARALLELGC_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant, \
declare_constant_with_value)) \
SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant, \
declare_constant_with_value)) \
ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant, \
declare_constant_with_value)) \
\
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
@ -274,5 +298,7 @@
declare_constant(Generation::LogOfGenGrain) \
declare_constant(Generation::GenGrain) \
#define VM_LONG_CONSTANTS_GC(declare_constant) \
ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
#endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP

@ -0,0 +1,247 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "utilities/macros.hpp"
ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
_decorators(access.decorators()),
_ref_addr(access.resolved_addr()),
_ref(ref),
_tmp(LIR_OprFact::illegalOpr),
_patch_info(access.patch_emit_info()),
_runtime_stub(runtime_stub) {
// Allocate tmp register if needed
if (!_ref_addr->is_register()) {
assert(_ref_addr->is_address(), "Must be an address");
if (_ref_addr->as_address_ptr()->index()->is_valid() ||
_ref_addr->as_address_ptr()->disp() != 0) {
// Has index or displacement, need tmp register to load address into
_tmp = access.gen()->new_pointer_register();
} else {
// No index or displacement, address available in base register
_ref_addr = _ref_addr->as_address_ptr()->base();
}
}
assert(_ref->is_register(), "Must be a register");
assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
}
DecoratorSet ZLoadBarrierStubC1::decorators() const {
return _decorators;
}
LIR_Opr ZLoadBarrierStubC1::ref() const {
return _ref;
}
LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
return _ref_addr;
}
LIR_Opr ZLoadBarrierStubC1::tmp() const {
return _tmp;
}
LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
}
CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
return _patch_info;
}
address ZLoadBarrierStubC1::runtime_stub() const {
return _runtime_stub;
}
void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
if (_patch_info != NULL) {
visitor->do_slow_case(_patch_info);
} else {
visitor->do_slow_case();
}
visitor->do_input(_ref_addr);
visitor->do_output(_ref);
if (_tmp->is_valid()) {
visitor->do_temp(_tmp);
}
}
void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
}
#ifndef PRODUCT
void ZLoadBarrierStubC1::print_name(outputStream* out) const {
out->print("ZLoadBarrierStubC1");
}
#endif // PRODUCT
class LIR_OpZLoadBarrierTest : public LIR_Op {
private:
LIR_Opr _opr;
public:
LIR_OpZLoadBarrierTest(LIR_Opr opr) :
LIR_Op(),
_opr(opr) {}
virtual void visit(LIR_OpVisitState* state) {
state->do_input(_opr);
}
virtual void emit_code(LIR_Assembler* ce) {
ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
}
virtual void print_instr(outputStream* out) const {
_opr->print(out);
out->print(" ");
}
#ifndef PRODUCT
virtual const char* name() const {
return "lir_z_load_barrier_test";
}
#endif // PRODUCT
};
static bool barrier_needed(LIRAccess& access) {
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
}
ZBarrierSetC1::ZBarrierSetC1() :
_load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
_load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
if ((decorators & ON_WEAK_OOP_REF) != 0) {
return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
} else {
return _load_barrier_on_oop_field_preloaded_runtime_stub;
}
}
#ifdef ASSERT
#define __ access.gen()->lir(__FILE__, __LINE__)->
#else
#define __ access.gen()->lir()->
#endif
void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
// Fast path
__ append(new LIR_OpZLoadBarrierTest(result));
// Slow path
const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
__ branch(lir_cond_notEqual, T_ADDRESS, stub);
__ branch_destination(stub->continuation());
}
#undef __
void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
BarrierSetC1::load_at_resolved(access, result);
if (barrier_needed(access)) {
load_barrier(access, result);
}
}
static void pre_load_barrier(LIRAccess& access) {
DecoratorSet decorators = access.decorators();
// Downgrade access to MO_UNORDERED
decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
// Remove C1_WRITE_ACCESS
decorators = (decorators & ~C1_WRITE_ACCESS);
// Generate synthetic load at
access.gen()->access_load_at(decorators,
access.type(),
access.base().item(),
access.offset().opr(),
access.gen()->new_register(access.type()),
NULL /* patch_emit_info */,
NULL /* load_emit_info */);
}
LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_xchg_at_resolved(access, value);
}
LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
}
class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
private:
const DecoratorSet _decorators;
public:
ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
_decorators(decorators) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
return NULL;
}
};
static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
return code_blob->code_begin();
}
void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
_load_barrier_on_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
_load_barrier_on_weak_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
}

@ -0,0 +1,80 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_LIR.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "oops/accessDecorators.hpp"
class ZLoadBarrierStubC1 : public CodeStub {
private:
DecoratorSet _decorators;
LIR_Opr _ref_addr;
LIR_Opr _ref;
LIR_Opr _tmp;
CodeEmitInfo* _patch_info;
address _runtime_stub;
public:
ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
DecoratorSet decorators() const;
LIR_Opr ref() const;
LIR_Opr ref_addr() const;
LIR_Opr tmp() const;
LIR_PatchCode patch_code() const;
CodeEmitInfo*& patch_info();
address runtime_stub() const;
virtual void emit_code(LIR_Assembler* ce);
virtual void visit(LIR_OpVisitState* visitor);
#ifndef PRODUCT
virtual void print_name(outputStream* out) const;
#endif // PRODUCT
};
class ZBarrierSetC1 : public BarrierSetC1 {
private:
address _load_barrier_on_oop_field_preloaded_runtime_stub;
address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
void load_barrier(LIRAccess& access, LIR_Opr result) const;
protected:
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
public:
ZBarrierSetC1();
virtual void generate_c1_runtime_stubs(BufferBlob* blob);
};
#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,206 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.hpp"
#include "opto/node.hpp"
#include "utilities/growableArray.hpp"
class LoadBarrierNode : public MultiNode {
private:
bool _weak;
bool _writeback; // Controls if the barrier writes the healed oop back to memory
// A swap on a memory location must never write back the healed oop
bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
// before healing, otherwise both the oop and the address must be passed to the
// barrier from the oop
static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
void push_dominated_barriers(PhaseIterGVN* igvn) const;
public:
enum {
Control,
Memory,
Oop,
Address,
Number_of_Outputs = Address,
Similar,
Number_of_Inputs
};
LoadBarrierNode(Compile* C,
Node* c,
Node* mem,
Node* val,
Node* adr,
bool weak,
bool writeback,
bool oop_reload_allowed);
virtual int Opcode() const;
virtual const Type *bottom_type() const;
virtual const Type *Value(PhaseGVN *phase) const;
virtual Node *Identity(PhaseGVN *phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
bool linear_only,
bool look_for_similar);
void fix_similar_in_uses(PhaseIterGVN* igvn);
bool has_true_uses() const;
bool can_be_eliminated() const {
return !in(Similar)->is_top();
}
bool is_weak() const {
return _weak;
}
bool is_writeback() const {
return _writeback;
}
bool oop_reload_allowed() const {
return _oop_reload_allowed;
}
};
class LoadBarrierSlowRegNode : public LoadPNode {
public:
LoadBarrierSlowRegNode(Node *c,
Node *mem,
Node *adr,
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual const char * name() {
return "LoadBarrierSlowRegNode";
}
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
}
virtual int Opcode() const;
};
class LoadBarrierWeakSlowRegNode : public LoadPNode {
public:
LoadBarrierWeakSlowRegNode(Node *c,
Node *mem,
Node *adr,
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual const char * name() {
return "LoadBarrierWeakSlowRegNode";
}
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
}
virtual int Opcode() const;
};
class ZBarrierSetC2State : public ResourceObj {
private:
// List of load barrier nodes which need to be expanded before matching
GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
public:
ZBarrierSetC2State(Arena* comp_arena);
int load_barrier_count() const;
void add_load_barrier_node(LoadBarrierNode* n);
void remove_load_barrier_node(LoadBarrierNode* n);
LoadBarrierNode* load_barrier_node(int idx) const;
};
class ZBarrierSetC2 : public BarrierSetC2 {
private:
ZBarrierSetC2State* state() const;
Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
const TypeFunc* load_barrier_Type() const;
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
Node* expected_val,
Node* new_val,
const Type* val_type) const;
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
Node* expected_val,
Node* new_val,
const Type* value_type) const;
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
Node* new_val,
const Type* val_type) const;
public:
Node* load_barrier(GraphKit* kit,
Node* val,
Node* adr,
bool weak = false,
bool writeback = true,
bool oop_reload_allowed = true) const;
virtual void* create_barrier_state(Arena* comp_arena) const;
virtual bool is_gc_barrier_node(Node* node) const;
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
virtual void register_potential_barrier_node(Node* node) const;
virtual void unregister_potential_barrier_node(Node* node) const;
virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
virtual Node* step_over_gc_barrier(Node* c) const { return c; }
// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
// expanded later, then now is the time to do so.
virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
static void find_dominating_barriers(PhaseIterGVN& igvn);
static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
#ifdef ASSERT
virtual void verify_gc_barriers(bool post_parse) const;
#endif
};
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/z/vmStructs_z.hpp"
ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
_ZGlobalPhase(&ZGlobalPhase),
_ZAddressGoodMask(&ZAddressGoodMask),
_ZAddressBadMask(&ZAddressBadMask),
_ZAddressWeakBadMask(&ZAddressWeakBadMask),
_ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
_ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
}
ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;

@ -0,0 +1,121 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
#include "gc/z/zAddressRangeMap.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zPhysicalMemory.hpp"
#include "utilities/macros.hpp"
// Expose some ZGC globals to the SA agent.
class ZGlobalsForVMStructs {
static ZGlobalsForVMStructs _instance;
public:
static ZGlobalsForVMStructs* _instance_p;
ZGlobalsForVMStructs();
uint32_t* _ZGlobalPhase;
uintptr_t* _ZAddressGoodMask;
uintptr_t* _ZAddressBadMask;
uintptr_t* _ZAddressWeakBadMask;
const int* _ZObjectAlignmentSmallShift;
const int* _ZObjectAlignmentSmall;
};
typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
nonstatic_field(ZGlobalsForVMStructs, _ZGlobalPhase, uint32_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressGoodMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressBadMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressWeakBadMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmallShift, const int*) \
nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmall, const int*) \
\
nonstatic_field(ZCollectedHeap, _heap, ZHeap) \
\
nonstatic_field(ZHeap, _page_allocator, ZPageAllocator) \
nonstatic_field(ZHeap, _pagetable, ZPageTable) \
\
nonstatic_field(ZPage, _type, const uint8_t) \
nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
nonstatic_field(ZPage, _forwarding, ZForwardingTable) \
\
nonstatic_field(ZPageAllocator, _physical, ZPhysicalMemoryManager) \
nonstatic_field(ZPageAllocator, _used, size_t) \
\
nonstatic_field(ZPageTable, _map, ZAddressRangeMapForPageTable) \
\
nonstatic_field(ZAddressRangeMapForPageTable, _map, ZPageTableEntry* const) \
\
nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
\
nonstatic_field(ZForwardingTable, _table, ZForwardingTableEntry*) \
nonstatic_field(ZForwardingTable, _size, size_t) \
\
nonstatic_field(ZPhysicalMemoryManager, _max_capacity, const size_t) \
nonstatic_field(ZPhysicalMemoryManager, _capacity, size_t)
#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \
declare_constant(ZPhaseRelocate) \
declare_constant(ZPageTypeSmall) \
declare_constant(ZPageTypeMedium) \
declare_constant(ZPageTypeLarge) \
declare_constant(ZObjectAlignmentMediumShift) \
declare_constant(ZObjectAlignmentLargeShift)
#define VM_LONG_CONSTANTS_ZGC(declare_constant) \
declare_constant(ZPageSizeSmallShift) \
declare_constant(ZPageSizeMediumShift) \
declare_constant(ZPageSizeMinShift) \
declare_constant(ZAddressOffsetShift) \
declare_constant(ZAddressOffsetBits) \
declare_constant(ZAddressOffsetMask) \
declare_constant(ZAddressSpaceStart)
#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type) \
declare_toplevel_type(ZGlobalsForVMStructs) \
declare_type(ZCollectedHeap, CollectedHeap) \
declare_toplevel_type(ZHeap) \
declare_toplevel_type(ZPage) \
declare_toplevel_type(ZPageAllocator) \
declare_toplevel_type(ZPageTable) \
declare_toplevel_type(ZPageTableEntry) \
declare_toplevel_type(ZAddressRangeMapForPageTable) \
declare_toplevel_type(ZVirtualMemory) \
declare_toplevel_type(ZForwardingTable) \
declare_toplevel_type(ZForwardingTableEntry) \
declare_toplevel_type(ZPhysicalMemoryManager)
#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP

@ -0,0 +1,48 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "runtime/thread.hpp"
void ZAddressMasks::set_good_mask(uintptr_t mask) {
uintptr_t old_bad_mask = ZAddressBadMask;
ZAddressGoodMask = mask;
ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
}
void ZAddressMasks::initialize() {
ZAddressMetadataMarked = ZAddressMetadataMarked0;
set_good_mask(ZAddressMetadataRemapped);
}
void ZAddressMasks::flip_to_marked() {
ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
set_good_mask(ZAddressMetadataMarked);
}
void ZAddressMasks::flip_to_remapped() {
set_good_mask(ZAddressMetadataRemapped);
}

@ -0,0 +1,66 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESS_HPP
#define SHARE_GC_Z_ZADDRESS_HPP
#include "memory/allocation.hpp"
class ZAddress : public AllStatic {
public:
static bool is_null(uintptr_t value);
static bool is_bad(uintptr_t value);
static bool is_good(uintptr_t value);
static bool is_good_or_null(uintptr_t value);
static bool is_weak_bad(uintptr_t value);
static bool is_weak_good(uintptr_t value);
static bool is_weak_good_or_null(uintptr_t value);
static bool is_marked(uintptr_t value);
static bool is_finalizable(uintptr_t value);
static bool is_remapped(uintptr_t value);
static uintptr_t address(uintptr_t value);
static uintptr_t offset(uintptr_t value);
static uintptr_t good(uintptr_t value);
static uintptr_t good_or_null(uintptr_t value);
static uintptr_t finalizable_good(uintptr_t value);
static uintptr_t marked(uintptr_t value);
static uintptr_t marked0(uintptr_t value);
static uintptr_t marked1(uintptr_t value);
static uintptr_t remapped(uintptr_t value);
static uintptr_t remapped_or_null(uintptr_t value);
};
class ZAddressMasks : public AllStatic {
friend class ZAddressTest;
private:
static void set_good_mask(uintptr_t mask);
public:
static void initialize();
static void flip_to_marked();
static void flip_to_remapped();
};
#endif // SHARE_GC_Z_ZADDRESS_HPP

@ -0,0 +1,117 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
#include "gc/z/zAddress.hpp"
#include "gc/z/zGlobals.hpp"
#include "utilities/macros.hpp"
#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
inline bool ZAddress::is_null(uintptr_t value) {
return value == 0;
}
inline bool ZAddress::is_bad(uintptr_t value) {
return value & ZAddressBadMask;
}
inline bool ZAddress::is_good(uintptr_t value) {
return !is_bad(value) && !is_null(value);
}
inline bool ZAddress::is_good_or_null(uintptr_t value) {
// Checking if an address is "not bad" is an optimized version of
// checking if it's "good or null", which eliminates an explicit
// null check. However, the implicit null check only checks that
// the mask bits are zero, not that the entire address is zero.
// This means that an address without mask bits would pass through
// the barrier as if it was null. This should be harmless as such
// addresses should ever be passed through the barrier.
const bool result = !is_bad(value);
assert((is_good(value) || is_null(value)) == result, "Bad address");
return result;
}
inline bool ZAddress::is_weak_bad(uintptr_t value) {
return value & ZAddressWeakBadMask;
}
inline bool ZAddress::is_weak_good(uintptr_t value) {
return !is_weak_bad(value) && !is_null(value);
}
inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
return !is_weak_bad(value);
}
inline bool ZAddress::is_marked(uintptr_t value) {
return value & ZAddressMetadataMarked;
}
inline bool ZAddress::is_finalizable(uintptr_t value) {
return value & ZAddressMetadataFinalizable;
}
inline bool ZAddress::is_remapped(uintptr_t value) {
return value & ZAddressMetadataRemapped;
}
inline uintptr_t ZAddress::offset(uintptr_t value) {
return value & ZAddressOffsetMask;
}
inline uintptr_t ZAddress::good(uintptr_t value) {
return address(offset(value) | ZAddressGoodMask);
}
inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
return is_null(value) ? 0 : good(value);
}
inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
}
inline uintptr_t ZAddress::marked(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked);
}
inline uintptr_t ZAddress::marked0(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked0);
}
inline uintptr_t ZAddress::marked1(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked1);
}
inline uintptr_t ZAddress::remapped(uintptr_t value) {
return address(offset(value) | ZAddressMetadataRemapped);
}
inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
return is_null(value) ? 0 : remapped(value);
}
#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP

@ -0,0 +1,63 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
#include "memory/allocation.hpp"
template<typename T, size_t AddressRangeShift>
class ZAddressRangeMapIterator;
template <typename T, size_t AddressRangeShift>
class ZAddressRangeMap {
friend class VMStructs;
friend class ZAddressRangeMapIterator<T, AddressRangeShift>;
private:
T* const _map;
size_t index_for_addr(uintptr_t addr) const;
size_t size() const;
public:
ZAddressRangeMap();
~ZAddressRangeMap();
T get(uintptr_t addr) const;
void put(uintptr_t addr, T value);
};
template <typename T, size_t AddressRangeShift>
class ZAddressRangeMapIterator : public StackObj {
public:
const ZAddressRangeMap<T, AddressRangeShift>* const _map;
size_t _next;
public:
ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map);
bool next(T* value);
};
#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP

@ -0,0 +1,84 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zAddressRangeMap.hpp"
#include "gc/z/zGlobals.hpp"
#include "memory/allocation.inline.hpp"
template <typename T, size_t AddressRangeShift>
ZAddressRangeMap<T, AddressRangeShift>::ZAddressRangeMap() :
_map(MmapArrayAllocator<T>::allocate(size(), mtGC)) {}
template <typename T, size_t AddressRangeShift>
ZAddressRangeMap<T, AddressRangeShift>::~ZAddressRangeMap() {
MmapArrayAllocator<T>::free(_map, size());
}
template <typename T, size_t AddressRangeShift>
size_t ZAddressRangeMap<T, AddressRangeShift>::index_for_addr(uintptr_t addr) const {
assert(!ZAddress::is_null(addr), "Invalid address");
const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
assert(index < size(), "Invalid index");
return index;
}
template <typename T, size_t AddressRangeShift>
size_t ZAddressRangeMap<T, AddressRangeShift>::size() const {
return ZAddressOffsetMax >> AddressRangeShift;
}
template <typename T, size_t AddressRangeShift>
T ZAddressRangeMap<T, AddressRangeShift>::get(uintptr_t addr) const {
const uintptr_t index = index_for_addr(addr);
return _map[index];
}
template <typename T, size_t AddressRangeShift>
void ZAddressRangeMap<T, AddressRangeShift>::put(uintptr_t addr, T value) {
const uintptr_t index = index_for_addr(addr);
_map[index] = value;
}
template <typename T, size_t AddressRangeShift>
inline ZAddressRangeMapIterator<T, AddressRangeShift>::ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map) :
_map(map),
_next(0) {}
template <typename T, size_t AddressRangeShift>
inline bool ZAddressRangeMapIterator<T, AddressRangeShift>::next(T* value) {
if (_next < _map->size()) {
*value = _map->_map[_next++];
return true;
}
// End of map
return false;
}
#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP

@ -0,0 +1,107 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
#include "gc/z/zBitField.hpp"
#include "memory/allocation.hpp"
//
// Allocation flags layout
// -----------------------
//
// 7 4 3 2 1 0
// +---+-+-+-+-+-+
// |000|1|1|1|1|1|
// +---+-+-+-+-+-+
// | | | | | |
// | | | | | * 0-0 Java Thread Flag (1-bit)
// | | | | |
// | | | | * 1-1 Worker Thread Flag (1-bit)
// | | | |
// | | | * 2-2 Non-Blocking Flag (1-bit)
// | | |
// | | * 3-3 Relocation Flag (1-bit)
// | |
// | * 4-4 No Reserve Flag (1-bit)
// |
// * 7-5 Unused (3-bits)
//
class ZAllocationFlags {
private:
typedef ZBitField<uint8_t, bool, 0, 1> field_java_thread;
typedef ZBitField<uint8_t, bool, 1, 1> field_worker_thread;
typedef ZBitField<uint8_t, bool, 2, 1> field_non_blocking;
typedef ZBitField<uint8_t, bool, 3, 1> field_relocation;
typedef ZBitField<uint8_t, bool, 4, 1> field_no_reserve;
uint8_t _flags;
public:
ZAllocationFlags() :
_flags(0) {}
void set_java_thread() {
_flags |= field_java_thread::encode(true);
}
void set_worker_thread() {
_flags |= field_worker_thread::encode(true);
}
void set_non_blocking() {
_flags |= field_non_blocking::encode(true);
}
void set_relocation() {
_flags |= field_relocation::encode(true);
}
void set_no_reserve() {
_flags |= field_no_reserve::encode(true);
}
bool java_thread() const {
return field_java_thread::decode(_flags);
}
bool worker_thread() const {
return field_worker_thread::decode(_flags);
}
bool non_blocking() const {
return field_non_blocking::decode(_flags);
}
bool relocation() const {
return field_relocation::decode(_flags);
}
bool no_reserve() const {
return field_no_reserve::decode(_flags);
}
};
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP

@ -0,0 +1,106 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zWorkers.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
size_t ZArguments::conservative_max_heap_alignment() {
return 0;
}
void ZArguments::initialize() {
GCArguments::initialize();
// Enable NUMA by default
if (FLAG_IS_DEFAULT(UseNUMA)) {
FLAG_SET_DEFAULT(UseNUMA, true);
}
// Disable biased locking by default
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
FLAG_SET_DEFAULT(UseBiasedLocking, false);
}
// Select number of parallel threads
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
}
if (ParallelGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
}
// Select number of concurrent threads
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
}
if (ConcGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
}
#ifdef COMPILER2
// Enable loop strip mining by default
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
}
}
#endif
// To avoid asserts in set_active_workers()
FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
// CompressedOops/UseCompressedClassPointers not supported
FLAG_SET_DEFAULT(UseCompressedOops, false);
FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
// ClassUnloading not (yet) supported
FLAG_SET_DEFAULT(ClassUnloading, false);
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
// Verification before startup and after exit not (yet) supported
FLAG_SET_DEFAULT(VerifyDuringStartup, false);
FLAG_SET_DEFAULT(VerifyBeforeExit, false);
// Verification of stacks not (yet) supported, for the same reason
// we need fixup_partial_loads
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
// JVMCI not (yet) supported
if (EnableJVMCI) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
}
}
CollectedHeap* ZArguments::create_heap() {
return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
}

@ -0,0 +1,38 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
#define SHARE_GC_Z_ZARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class ZArguments : public GCArguments {
public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
#endif // SHARE_GC_Z_ZARGUMENTS_HPP

@ -0,0 +1,87 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARRAY_HPP
#define SHARE_GC_Z_ZARRAY_HPP
#include "memory/allocation.hpp"
template <typename T>
class ZArray {
private:
static const size_t initial_capacity = 32;
T* _array;
size_t _size;
size_t _capacity;
// Copy and assignment are not allowed
ZArray(const ZArray<T>& array);
ZArray<T>& operator=(const ZArray<T>& array);
void expand(size_t new_capacity);
public:
ZArray();
~ZArray();
size_t size() const;
bool is_empty() const;
T at(size_t index) const;
void add(T value);
void clear();
};
template <typename T, bool parallel>
class ZArrayIteratorImpl : public StackObj {
private:
ZArray<T>* const _array;
size_t _next;
public:
ZArrayIteratorImpl(ZArray<T>* array);
bool next(T* elem);
};
// Iterator types
#define ZARRAY_SERIAL false
#define ZARRAY_PARALLEL true
template <typename T>
class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
public:
ZArrayIterator(ZArray<T>* array) :
ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
};
template <typename T>
class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
public:
ZArrayParallelIterator(ZArray<T>* array) :
ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
};
#endif // SHARE_GC_Z_ZARRAY_HPP

@ -0,0 +1,111 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
#define SHARE_GC_Z_ZARRAY_INLINE_HPP
#include "gc/z/zArray.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
template <typename T>
inline ZArray<T>::ZArray() :
_array(NULL),
_size(0),
_capacity(0) {}
template <typename T>
inline ZArray<T>::~ZArray() {
if (_array != NULL) {
FREE_C_HEAP_ARRAY(T, _array);
}
}
template <typename T>
inline size_t ZArray<T>::size() const {
return _size;
}
template <typename T>
inline bool ZArray<T>::is_empty() const {
return size() == 0;
}
template <typename T>
inline T ZArray<T>::at(size_t index) const {
assert(index < _size, "Index out of bounds");
return _array[index];
}
template <typename T>
inline void ZArray<T>::expand(size_t new_capacity) {
T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
if (_array != NULL) {
memcpy(new_array, _array, sizeof(T) * _capacity);
FREE_C_HEAP_ARRAY(T, _array);
}
_array = new_array;
_capacity = new_capacity;
}
template <typename T>
inline void ZArray<T>::add(T value) {
if (_size == _capacity) {
const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
expand(new_capacity);
}
_array[_size++] = value;
}
template <typename T>
inline void ZArray<T>::clear() {
_size = 0;
}
template <typename T, bool parallel>
inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
_array(array),
_next(0) {}
template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) {
const size_t next = Atomic::add(1u, &_next) - 1u;
if (next < _array->size()) {
*elem = _array->at(next);
return true;
}
} else {
if (_next < _array->size()) {
*elem = _array->at(_next++);
return true;
}
}
// No more elements
return false;
}
#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP

@ -0,0 +1,270 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
bool ZBarrier::during_mark() {
return ZGlobalPhase == ZPhaseMark;
}
bool ZBarrier::during_relocate() {
return ZGlobalPhase == ZPhaseRelocate;
}
template <bool finalizable>
bool ZBarrier::should_mark_through(uintptr_t addr) {
// Finalizable marked oops can still exists on the heap after marking
// has completed, in which case we just want to convert this into a
// good oop and not push it on the mark stack.
if (!during_mark()) {
assert(ZAddress::is_marked(addr), "Should be marked");
assert(ZAddress::is_finalizable(addr), "Should be finalizable");
return false;
}
// During marking, we mark through already marked oops to avoid having
// some large part of the object graph hidden behind a pushed, but not
// yet flushed, entry on a mutator mark stack. Always marking through
// allows the GC workers to proceed through the object graph even if a
// mutator touched an oop first, which in turn will reduce the risk of
// having to flush mark stacks multiple times to terminate marking.
//
// However, when doing finalizable marking we don't always want to mark
// through. First, marking through an already strongly marked oop would
// be wasteful, since we will then proceed to do finalizable marking on
// an object which is, or will be, marked strongly. Second, marking
// through an already finalizable marked oop would also be wasteful,
// since such oops can never end up on a mutator mark stack and can
// therefore not hide some part of the object graph from GC workers.
if (finalizable) {
return !ZAddress::is_marked(addr);
}
// Mark through
return true;
}
template <bool finalizable, bool publish>
uintptr_t ZBarrier::mark(uintptr_t addr) {
uintptr_t good_addr;
if (ZAddress::is_marked(addr)) {
// Already marked, but try to mark though anyway
good_addr = ZAddress::good(addr);
} else if (ZAddress::is_remapped(addr)) {
// Already remapped, but also needs to be marked
good_addr = ZAddress::good(addr);
} else {
// Needs to be both remapped and marked
good_addr = remap(addr);
}
// Mark
if (should_mark_through<finalizable>(addr)) {
ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
}
return good_addr;
}
uintptr_t ZBarrier::remap(uintptr_t addr) {
assert(!ZAddress::is_good(addr), "Should not be good");
assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
if (ZHeap::heap()->is_relocating(addr)) {
// Forward
return ZHeap::heap()->forward_object(addr);
}
// Remap
return ZAddress::good(addr);
}
uintptr_t ZBarrier::relocate(uintptr_t addr) {
assert(!ZAddress::is_good(addr), "Should not be good");
assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
if (ZHeap::heap()->is_relocating(addr)) {
// Relocate
return ZHeap::heap()->relocate_object(addr);
}
// Remap
return ZAddress::good(addr);
}
uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
}
uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
return during_relocate() ? relocate(addr) : remap(addr);
}
//
// Load barrier
//
uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
return relocate_or_mark(addr);
}
void ZBarrier::load_barrier_on_oop_fields(oop o) {
assert(ZOop::is_good(o), "Should be good");
ZLoadBarrierOopClosure cl;
o->oop_iterate(&cl);
}
//
// Weak load barrier
//
uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr);
}
uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (ZHeap::heap()->is_object_strongly_live(good_addr)) {
return good_addr;
}
// Not strongly live
return 0;
}
uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (ZHeap::heap()->is_object_live(good_addr)) {
return good_addr;
}
// Not live
return 0;
}
//
// Keep alive barrier
//
uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
return good_addr;
}
uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(ZHeap::heap()->is_object_live(good_addr), "Should be live");
return good_addr;
}
//
// Mark barrier
//
uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
return mark<Strong, Overflow>(addr);
}
uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = mark<Finalizable, Overflow>(addr);
if (ZAddress::is_good(addr)) {
// If the oop was already strongly marked/good, then we do
// not want to downgrade it to finalizable marked/good.
return good_addr;
}
// Make the oop finalizable marked/good, instead of normal marked/good.
// This is needed because an object might first becomes finalizable
// marked by the GC, and then loaded by a mutator thread. In this case,
// the mutator thread must be able to tell that the object needs to be
// strongly marked. The finalizable bit in the oop exists to make sure
// that a load of a finalizable marked oop will fall into the barrier
// slow path so that we can mark the object as strongly reachable.
return ZAddress::finalizable_good(good_addr);
}
uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
assert(during_mark(), "Invalid phase");
// Mark
return mark<Strong, Publish>(addr);
}
//
// Relocate barrier
//
uintptr_t ZBarrier::relocate_barrier_on_root_oop_slow_path(uintptr_t addr) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
assert(during_relocate(), "Invalid phase");
// Relocate
return relocate(addr);
}
//
// Narrow oop variants, never used.
//
oop ZBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
void ZBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
ShouldNotReachHere();
}
oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}

@ -0,0 +1,121 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIER_HPP
#define SHARE_GC_Z_ZBARRIER_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
typedef bool (*ZBarrierFastPath)(uintptr_t);
typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t);
class ZBarrier : public AllStatic {
private:
static const bool Strong = false;
static const bool Finalizable = true;
static const bool Publish = true;
static const bool Overflow = false;
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
static bool is_null_fast_path(uintptr_t addr);
static bool is_good_or_null_fast_path(uintptr_t addr);
static bool is_weak_good_or_null_fast_path(uintptr_t addr);
static bool is_resurrection_blocked(volatile oop* p, oop* o);
static bool during_mark();
static bool during_relocate();
template <bool finalizable> static bool should_mark_through(uintptr_t addr);
template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
static uintptr_t remap(uintptr_t addr);
static uintptr_t relocate(uintptr_t addr);
static uintptr_t relocate_or_mark(uintptr_t addr);
static uintptr_t relocate_or_remap(uintptr_t addr);
static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
public:
// Load barrier
static oop load_barrier_on_oop(oop o);
static oop load_barrier_on_oop_field(volatile oop* p);
static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static void load_barrier_on_oop_array(volatile oop* p, size_t length);
static void load_barrier_on_oop_fields(oop o);
static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
// Weak load barrier
static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_weak_oop(oop o);
static oop weak_load_barrier_on_weak_oop_field(volatile oop* p);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_phantom_oop(oop o);
static oop weak_load_barrier_on_phantom_oop_field(volatile oop* p);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
// Is alive barrier
static bool is_alive_barrier_on_weak_oop(oop o);
static bool is_alive_barrier_on_phantom_oop(oop o);
// Keep alive barrier
static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
// Mark barrier
static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
static void mark_barrier_on_root_oop_field(oop* p);
// Relocate barrier
static void relocate_barrier_on_root_oop_field(oop* p);
// Narrow oop variants, never used.
static oop load_barrier_on_oop_field(volatile narrowOop* p);
static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
};
#endif // SHARE_GC_Z_ZBARRIER_HPP

@ -0,0 +1,300 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
#define SHARE_GC_Z_ZBARRIER_INLINE_HPP
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zResurrection.inline.hpp"
#include "runtime/atomic.hpp"
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline oop ZBarrier::barrier(volatile oop* p, oop o) {
uintptr_t addr = ZOop::to_address(o);
retry:
// Fast path
if (fast_path(addr)) {
return ZOop::to_oop(addr);
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
// Self heal, but only if the address was actually updated by the slow path,
// which might not be the case, e.g. when marking through an already good oop.
if (p != NULL && good_addr != addr) {
const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
if (prev_addr != addr) {
// Some other thread overwrote the oop. If this oop was updated by a
// weak barrier the new oop might not be good, in which case we need
// to re-apply this barrier.
addr = prev_addr;
goto retry;
}
}
return ZOop::to_oop(good_addr);
}
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
const uintptr_t addr = ZOop::to_address(o);
// Fast path
if (fast_path(addr)) {
// Return the good address instead of the weak good address
// to ensure that the currently active heap view is used.
return ZOop::to_oop(ZAddress::good_or_null(addr));
}
// Slow path
uintptr_t good_addr = slow_path(addr);
// Self heal unless the address returned from the slow path is null,
// in which case resurrection was blocked and we must let the reference
// processor clear the oop. Mutators are not allowed to clear oops in
// these cases, since that would be similar to calling Reference.clear(),
// which would make the reference non-discoverable or silently dropped
// by the reference processor.
if (p != NULL && good_addr != 0) {
// The slow path returns a good/marked address, but we never mark oops
// in a weak load barrier so we always self heal with the remapped address.
const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
if (prev_addr != addr) {
// Some other thread overwrote the oop. The new
// oop is guaranteed to be weak good or null.
assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
// Return the good address instead of the weak good address
// to ensure that the currently active heap view is used.
good_addr = ZAddress::good_or_null(prev_addr);
}
}
return ZOop::to_oop(good_addr);
}
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline void ZBarrier::root_barrier(oop* p, oop o) {
const uintptr_t addr = ZOop::to_address(o);
// Fast path
if (fast_path(addr)) {
return;
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
// Non-atomic healing helps speed up root scanning. This is safe to do
// since we are always healing roots in a safepoint, which means we are
// never racing with mutators modifying roots while we are healing them.
// It's also safe in case multiple GC threads try to heal the same root,
// since they would always heal the root in the same way and it does not
// matter in which order it happens.
*p = ZOop::to_oop(good_addr);
}
inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
return ZAddress::is_null(addr);
}
inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
return ZAddress::is_good_or_null(addr);
}
inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
return ZAddress::is_weak_good_or_null(addr);
}
inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
const bool is_blocked = ZResurrection::is_blocked();
// Reload oop after checking the resurrection blocked state. This is
// done to prevent a race where we first load an oop, which is logically
// null but not yet cleared, then this oop is cleared by the reference
// processor and resurrection is unblocked. At this point the mutator
// would see the unblocked state and pass this invalid oop through the
// normal barrier path, which would incorrectly try to mark this oop.
if (p != NULL) {
// First assign to reloaded_o to avoid compiler warning about
// implicit dereference of volatile oop.
const oop reloaded_o = *p;
*o = reloaded_o;
}
return is_blocked;
}
//
// Load barrier
//
inline oop ZBarrier::load_barrier_on_oop(oop o) {
return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
const oop o = *p;
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
}
inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
for (volatile const oop* const end = p + length; p < end; p++) {
load_barrier_on_oop_field(p);
}
}
inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
//
// Weak load barrier
//
inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
const oop o = *p;
return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
const oop o = *p;
return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
//
// Is alive barrier
//
inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_weak_oop(o) != NULL;
}
inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_phantom_oop(o) != NULL;
}
//
// Keep alive barrier
//
inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
// This operation is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
const oop o = *p;
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
}
inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
// This operation is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
const oop o = *p;
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
}
//
// Mark barrier
//
inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
// The fast path only checks for null since the GC worker
// threads doing marking wants to mark through good oops.
const oop o = *p;
if (finalizable) {
barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
} else {
barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
}
}
inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
for (volatile const oop* const end = p + length; p < end; p++) {
mark_barrier_on_oop_field(p, finalizable);
}
}
inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
}
//
// Relocate barrier
//
inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
}
#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP

@ -0,0 +1,82 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#include "gc/z/c2/zBarrierSetC2.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "runtime/thread.hpp"
ZBarrierSet::ZBarrierSet() :
BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
make_barrier_set_c1<ZBarrierSetC1>(),
make_barrier_set_c2<ZBarrierSetC2>(),
BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
ZBarrierSetAssembler* ZBarrierSet::assembler() {
BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
return reinterpret_cast<ZBarrierSetAssembler*>(bsa);
}
bool ZBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
assert((decorators & AS_RAW) == 0, "Unexpected decorator");
assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator");
assert((decorators & IN_ARCHIVE_ROOT) == 0, "Unexpected decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
if (type == T_OBJECT || type == T_ARRAY) {
if (((decorators & IN_HEAP) != 0) ||
((decorators & IN_CONCURRENT_ROOT) != 0) ||
((decorators & ON_PHANTOM_OOP_REF) != 0)) {
// Barrier needed
return true;
}
}
// Barrier not neeed
return false;
}
void ZBarrierSet::on_thread_create(Thread* thread) {
// Create thread local data
ZThreadLocalData::create(thread);
}
void ZBarrierSet::on_thread_destroy(Thread* thread) {
// Destroy thread local data
ZThreadLocalData::destroy(thread);
}
void ZBarrierSet::on_thread_attach(JavaThread* thread) {
// Set thread local address bad mask
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
}
void ZBarrierSet::on_thread_detach(JavaThread* thread) {
// Flush and free any remaining mark stacks
ZHeap::heap()->mark_flush_and_free(thread);
}

@ -0,0 +1,109 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSET_HPP
#define SHARE_GC_Z_ZBARRIERSET_HPP
#include "gc/shared/barrierSet.hpp"
class ZBarrierSetAssembler;
class ZBarrierSet : public BarrierSet {
public:
ZBarrierSet();
static ZBarrierSetAssembler* assembler();
static bool barrier_needed(DecoratorSet decorators, BasicType type);
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);
virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
virtual void print_on(outputStream* st) const {}
template <DecoratorSet decorators, typename BarrierSetT = ZBarrierSet>
class AccessBarrier : public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
private:
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
template <DecoratorSet expected>
static void verify_decorators_present();
template <DecoratorSet expected>
static void verify_decorators_absent();
static oop* field_addr(oop base, ptrdiff_t offset);
template <typename T>
static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
template <typename T>
static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
public:
//
// In heap
//
template <typename T>
static oop oop_load_in_heap(T* addr);
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
template <typename T>
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
template <typename T>
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length);
static void clone_in_heap(oop src, oop dst, size_t size);
//
// Not in heap
//
template <typename T>
static oop oop_load_not_in_heap(T* addr);
template <typename T>
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
template <typename T>
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
};
};
template<> struct BarrierSet::GetName<ZBarrierSet> {
static const BarrierSet::Name value = BarrierSet::ZBarrierSet;
};
template<> struct BarrierSet::GetType<BarrierSet::ZBarrierSet> {
typedef ::ZBarrierSet type;
};
#endif // SHARE_GC_Z_ZBARRIERSET_HPP

@ -0,0 +1,243 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
#define SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
#include "gc/shared/accessBarrierSupport.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "utilities/debug.hpp"
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_present() {
if ((decorators & expected) == 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_absent() {
if ((decorators & expected) != 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop* ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::field_addr(oop base, ptrdiff_t offset) {
assert(base != NULL, "Invalid base");
return reinterpret_cast<oop*>(reinterpret_cast<intptr_t>((void*)base) + offset);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) {
verify_decorators_present<ON_UNKNOWN_OOP_REF>();
const DecoratorSet decorators_known_strength =
AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
//
// In heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
const oop o = Raw::oop_load_in_heap(addr);
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
oop* const addr = field_addr(base, offset);
const oop o = Raw::oop_load_in_heap(addr);
if (HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o);
}
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
ZBarrier::load_barrier_on_oop_field(addr);
return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
// Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
// calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
// with the motivation that if you're doing Unsafe operations on a Reference.referent
// field, then you're on your own anyway.
ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
return ZBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
return ZBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline bool ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length) {
T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// No check cast, bulk barrier and bulk copy
ZBarrier::load_barrier_on_oop_array(src, length);
return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length);
}
// Check cast and copy each elements
Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
for (const T* const end = src + length; src < end; src++, dst++) {
const oop elem = ZBarrier::load_barrier_on_oop_field(src);
if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) {
// Check cast failed
return false;
}
// Cast is safe, since we know it's never a narrowOop
*(oop*)dst = elem;
}
return true;
}
template <DecoratorSet decorators, typename BarrierSetT>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
ZBarrier::load_barrier_on_oop_fields(src);
Raw::clone_in_heap(src, dst, size);
}
//
// Not in heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
const oop o = Raw::oop_load_not_in_heap(addr);
if (HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value) {
return load_barrier_on_oop_field_preloaded(addr, o);
}
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return o;
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
}
#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP

Some files were not shown because too many files have changed in this diff Show More