This commit is contained in:
Jesper Wilhelmsson 2018-02-10 09:25:35 +01:00
commit 9beff15bed
384 changed files with 5988 additions and 2688 deletions
make
src
hotspot
java.base/share/classes/java/lang/invoke
jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc
jdk.internal.vm.compiler
.mx.graal
share/classes
org.graalvm.collections.test/src/org/graalvm/collections/test
org.graalvm.collections/src/org/graalvm/collections
org.graalvm.compiler.api.directives.test/src/org/graalvm/compiler/api/directives/test
org.graalvm.compiler.api.directives/src/org/graalvm/compiler/api/directives
org.graalvm.compiler.api.replacements/src/org/graalvm/compiler/api/replacements
org.graalvm.compiler.api.runtime/src/org/graalvm/compiler/api/runtime
org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64
org.graalvm.compiler.code/src/org/graalvm/compiler/code
org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64
org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common
org.graalvm.compiler.core.match.processor/src/org/graalvm/compiler/core/match/processor
org.graalvm.compiler.core.sparc/src/org/graalvm/compiler/core/sparc
org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test

@ -443,6 +443,7 @@ jdk.internal.vm.compiler_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \
# #
jdk.internal.vm.compiler_EXCLUDES += \ jdk.internal.vm.compiler_EXCLUDES += \
org.graalvm.collections.test \
org.graalvm.compiler.core.match.processor \ org.graalvm.compiler.core.match.processor \
org.graalvm.compiler.nodeinfo.processor \ org.graalvm.compiler.nodeinfo.processor \
org.graalvm.compiler.options.processor \ org.graalvm.compiler.options.processor \
@ -461,6 +462,7 @@ jdk.internal.vm.compiler_EXCLUDES += \
org.graalvm.compiler.graph.test \ org.graalvm.compiler.graph.test \
org.graalvm.compiler.hotspot.amd64.test \ org.graalvm.compiler.hotspot.amd64.test \
org.graalvm.compiler.hotspot.lir.test \ org.graalvm.compiler.hotspot.lir.test \
org.graalvm.compiler.hotspot.sparc.test \
org.graalvm.compiler.hotspot.test \ org.graalvm.compiler.hotspot.test \
org.graalvm.compiler.jtt \ org.graalvm.compiler.jtt \
org.graalvm.compiler.lir.jtt \ org.graalvm.compiler.lir.jtt \

@ -48,6 +48,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.word/src \ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \ $(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \ $(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \ $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
@ -101,6 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \ $(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \ $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \ $(SRC_DIR)/org.graalvm.util/src \
@ -117,6 +119,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.word/src \ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \ $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \ $(SRC_DIR)/org.graalvm.compiler.code/src \

@ -67,6 +67,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(TOPDIR)/test/hotspot/jtreg/compiler/calls \ $(TOPDIR)/test/hotspot/jtreg/compiler/calls \
$(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup \ $(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup \
$(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption \ $(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/CanGenerateAllClassHook \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \
@ -101,6 +102,7 @@ endif
ifeq ($(TOOLCHAIN_TYPE), solstudio) ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libCanGenerateAllClassHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorStackDepthInfoTest := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorStackDepthInfoTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -2532,7 +2532,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3150,7 +3150,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
assert(op->tmp1()->is_register(), "tmp1 must be allocated"); assert(op->tmp1()->is_register(), "tmp1 must be allocated");

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -2746,7 +2746,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64 #ifdef _LP64

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -2715,7 +2715,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");

@ -2761,7 +2761,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");

@ -3504,7 +3504,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());

@ -676,6 +676,7 @@ class StubGenerator: public StubCodeGenerator {
assert_different_registers(start, count); assert_different_registers(start, count);
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) { switch (bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized // With G1, don't generate the call if we statically know that the target in uninitialized
if (!uninitialized_target) { if (!uninitialized_target) {
@ -703,6 +704,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered); __ bind(filtered);
} }
break; break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:
case BarrierSet::ModRef: case BarrierSet::ModRef:
@ -726,6 +728,7 @@ class StubGenerator: public StubCodeGenerator {
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert_different_registers(start, count); assert_different_registers(start, count);
switch (bs->kind()) { switch (bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
{ {
__ pusha(); // push registers __ pusha(); // push registers
@ -734,6 +737,7 @@ class StubGenerator: public StubCodeGenerator {
__ popa(); __ popa();
} }
break; break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS: case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension: case BarrierSet::CardTableExtension:

@ -188,6 +188,7 @@ int os::Aix::_extshm = -1;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// local variables // local variables
static volatile jlong max_real_time = 0;
static jlong initial_time_count = 0; static jlong initial_time_count = 0;
static int clock_tics_per_sec = 100; static int clock_tics_per_sec = 100;
static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks) static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks)
@ -1076,32 +1077,50 @@ void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
nanos = jlong(time.tv_usec) * 1000; nanos = jlong(time.tv_usec) * 1000;
} }
// We use mread_real_time here.
// On AIX: If the CPU has a time register, the result will be RTC_POWER and
// it has to be converted to real time. AIX documentations suggests to do
// this unconditionally, so we do it.
//
// See: https://www.ibm.com/support/knowledgecenter/ssw_aix_61/com.ibm.aix.basetrf2/read_real_time.htm
//
// On PASE: mread_real_time will always return RTC_POWER_PC data, so no
// conversion is necessary. However, mread_real_time will not return
// monotonic results but merely matches read_real_time. So we need a tweak
// to ensure monotonic results.
//
// For PASE no public documentation exists, just word by IBM
jlong os::javaTimeNanos() { jlong os::javaTimeNanos() {
timebasestruct_t time;
int rc = mread_real_time(&time, TIMEBASE_SZ);
if (os::Aix::on_pase()) { if (os::Aix::on_pase()) {
assert(rc == RTC_POWER, "expected time format RTC_POWER from mread_real_time in PASE");
timeval time; jlong now = jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
int status = gettimeofday(&time, NULL); jlong prev = max_real_time;
assert(status != -1, "PASE error at gettimeofday()"); if (now <= prev) {
jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec); return prev; // same or retrograde time;
return 1000 * usecs;
} else {
// On AIX use the precision of processors real time clock
// or time base registers.
timebasestruct_t time;
int rc;
// If the CPU has a time register, it will be used and
// we have to convert to real time first. After convertion we have following data:
// time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
// time.tb_low [nanoseconds after the last full second above]
// We better use mread_real_time here instead of read_real_time
// to ensure that we will get a monotonic increasing time.
if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
rc = time_base_to_time(&time, TIMEBASE_SZ);
assert(rc != -1, "aix error at time_base_to_time()");
} }
return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low); jlong obsv = Atomic::cmpxchg(now, &max_real_time, prev);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
// we should return "obsv". If the CAS failed and now > obsv > prv then
// some other thread raced this thread and installed a new value, in which case
// we could either (a) retry the entire operation, (b) retry trying to install now
// or (c) just return obsv. We use (c). No loop is required although in some cases
// we might discard a higher "now" value in deference to a slightly lower but freshly
// installed obsv value. That's entirely benign -- it admits no new orderings compared
// to (a) or (b) -- and greatly reduces coherence traffic.
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
} else {
if (rc != RTC_POWER) {
rc = time_base_to_time(&time, TIMEBASE_SZ);
assert(rc != -1, "error calling time_base_to_time()");
}
return jlong(time.tb_high) * NANOSECS_PER_SEC + jlong(time.tb_low);
} }
} }

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,12 @@ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
case SIGILL: case SIGILL:
case SIGSEGV: case SIGSEGV:
#if defined(__APPLE__)
/* On Darwin, memory access errors commonly results in SIGBUS instead
* of SIGSEGV. */
case SIGBUS:
#endif
/* The following signal is used by the VM to dump thread stacks unless /* The following signal is used by the VM to dump thread stacks unless
ReduceSignalUsage is set, in which case the user is allowed to set ReduceSignalUsage is set, in which case the user is allowed to set
his own _native_ handler for this signal; thus, in either case, his own _native_ handler for this signal; thus, in either case,

@ -2206,7 +2206,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
bool os::release_memory_special(char* base, size_t bytes) { bool os::release_memory_special(char* base, size_t bytes) {
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); Tracker tkr(Tracker::release);
// detaching the SHM segment will also delete it, see reserve_memory_special() // detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base); int rslt = shmdt(base);
if (rslt == 0) { if (rslt == 0) {

@ -122,35 +122,40 @@ template <typename T> int subsystem_file_contents(CgroupSubsystem* c,
char file[MAXPATHLEN+1]; char file[MAXPATHLEN+1];
char buf[MAXPATHLEN+1]; char buf[MAXPATHLEN+1];
if (c != NULL && c->subsystem_path() != NULL) { if (c == NULL) {
strncpy(file, c->subsystem_path(), MAXPATHLEN); log_debug(os, container)("subsystem_file_contents: CgroupSubsytem* is NULL");
file[MAXPATHLEN-1] = '\0'; return OSCONTAINER_ERROR;
int filelen = strlen(file); }
if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) { if (c->subsystem_path() == NULL) {
log_debug(os, container)("File path too long %s, %s", file, filename); log_debug(os, container)("subsystem_file_contents: subsystem path is NULL");
return OSCONTAINER_ERROR; return OSCONTAINER_ERROR;
} }
strncat(file, filename, MAXPATHLEN-filelen);
log_trace(os, container)("Path to %s is %s", filename, file); strncpy(file, c->subsystem_path(), MAXPATHLEN);
fp = fopen(file, "r"); file[MAXPATHLEN-1] = '\0';
if (fp != NULL) { int filelen = strlen(file);
p = fgets(buf, MAXPATHLEN, fp); if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) {
if (p != NULL) { log_debug(os, container)("File path too long %s, %s", file, filename);
int matched = sscanf(p, scan_fmt, returnval); return OSCONTAINER_ERROR;
if (matched == 1) { }
fclose(fp); strncat(file, filename, MAXPATHLEN-filelen);
return 0; log_trace(os, container)("Path to %s is %s", filename, file);
} else { fp = fopen(file, "r");
log_debug(os, container)("Type %s not found in file %s", if (fp != NULL) {
scan_fmt , file); p = fgets(buf, MAXPATHLEN, fp);
} if (p != NULL) {
int matched = sscanf(p, scan_fmt, returnval);
if (matched == 1) {
fclose(fp);
return 0;
} else { } else {
log_debug(os, container)("Empty file %s", file); log_debug(os, container)("Type %s not found in file %s", scan_fmt, file);
} }
} else { } else {
log_debug(os, container)("Open of file %s failed, %s", file, log_debug(os, container)("Empty file %s", file);
os::strerror(errno));
} }
} else {
log_debug(os, container)("Open of file %s failed, %s", file, os::strerror(errno));
} }
if (fp != NULL) if (fp != NULL)
fclose(fp); fclose(fp);
@ -273,7 +278,7 @@ void OSContainer::init() {
else { else {
log_debug(os, container)("Incompatible str containing cgroup and cpuset: %s", p); log_debug(os, container)("Incompatible str containing cgroup and cpuset: %s", p);
} }
} else if (strstr(p, "cpu,cpuacct") != NULL) { } else if (strstr(p, "cpu,cpuacct") != NULL || strstr(p, "cpuacct,cpu") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s", int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid, &mountid,
&parentid, &parentid,
@ -322,8 +327,20 @@ void OSContainer::init() {
fclose(mntinfo); fclose(mntinfo);
if (memory == NULL || cpuset == NULL || cpu == NULL || cpuacct == NULL) { if (memory == NULL) {
log_debug(os, container)("Required cgroup subsystems not found"); log_debug(os, container)("Required cgroup memory subsystem not found");
return;
}
if (cpuset == NULL) {
log_debug(os, container)("Required cgroup cpuset subsystem not found");
return;
}
if (cpu == NULL) {
log_debug(os, container)("Required cgroup cpu subsystem not found");
return;
}
if (cpuacct == NULL) {
log_debug(os, container)("Required cgroup cpuacct subsystem not found");
return; return;
} }
@ -374,7 +391,7 @@ void OSContainer::init() {
memory->set_subsystem_path(base); memory->set_subsystem_path(base);
} else if (strstr(controller, "cpuset") != NULL) { } else if (strstr(controller, "cpuset") != NULL) {
cpuset->set_subsystem_path(base); cpuset->set_subsystem_path(base);
} else if (strstr(controller, "cpu,cpuacct") != NULL) { } else if (strstr(controller, "cpu,cpuacct") != NULL || strstr(controller, "cpuacct,cpu") != NULL) {
cpu->set_subsystem_path(base); cpu->set_subsystem_path(base);
cpuacct->set_subsystem_path(base); cpuacct->set_subsystem_path(base);
} else if (strstr(controller, "cpuacct") != NULL) { } else if (strstr(controller, "cpuacct") != NULL) {

@ -3862,7 +3862,7 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
bool os::release_memory_special(char* base, size_t bytes) { bool os::release_memory_special(char* base, size_t bytes) {
bool res; bool res;
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); Tracker tkr(Tracker::release);
res = os::Linux::release_memory_special_impl(base, bytes); res = os::Linux::release_memory_special_impl(base, bytes);
if (res) { if (res) {
tkr.record((address)base, bytes); tkr.record((address)base, bytes);

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1840,7 +1840,7 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
// it does not go through os api, the operation has to record from here // it does not go through os api, the operation has to record from here
Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); Tracker tkr(Tracker::release);
remove_file_mapping(addr); remove_file_mapping(addr);
tkr.record((address)addr, bytes); tkr.record((address)addr, bytes);
} else { } else {

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1556,8 +1556,6 @@ void GraphBuilder::method_return(Value x, bool ignore_return) {
} }
if (profile_return() && x->type()->is_object_kind()) { if (profile_return() && x->type()->is_object_kind()) {
ciMethod* caller = state()->scope()->method(); ciMethod* caller = state()->scope()->method();
ciMethodData* md = caller->method_data_or_null();
ciProfileData* data = md->bci_to_data(invoke_bci);
profile_return_type(x, method(), caller, invoke_bci); profile_return_type(x, method(), caller, invoke_bci);
} }
} }

@ -80,6 +80,9 @@
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#endif #endif
volatile size_t ClassLoaderDataGraph::_num_array_classes = 0;
volatile size_t ClassLoaderDataGraph::_num_instance_classes = 0;
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -329,36 +332,36 @@ void ClassLoaderData::record_dependency(const Klass* k, TRAPS) {
ClassLoaderData * const from_cld = this; ClassLoaderData * const from_cld = this;
ClassLoaderData * const to_cld = k->class_loader_data(); ClassLoaderData * const to_cld = k->class_loader_data();
// Dependency to the null class loader data doesn't need to be recorded // Do not need to record dependency if the dependency is to a class whose
// because the null class loader data never goes away. // class loader data is never freed. (i.e. the dependency's class loader
if (to_cld->is_the_null_class_loader_data()) { // is one of the three builtin class loaders and the dependency is not
// anonymous.)
if (to_cld->is_permanent_class_loader_data()) {
return; return;
} }
oop to; oop to;
if (to_cld->is_anonymous()) { if (to_cld->is_anonymous()) {
// Just return if an anonymous class is attempting to record a dependency
// to itself. (Note that every anonymous class has its own unique class
// loader data.)
if (to_cld == from_cld) {
return;
}
// Anonymous class dependencies are through the mirror. // Anonymous class dependencies are through the mirror.
to = k->java_mirror(); to = k->java_mirror();
} else { } else {
to = to_cld->class_loader(); to = to_cld->class_loader();
oop from = from_cld->class_loader();
// If from_cld is anonymous, even if it's class_loader is a parent of 'to' // Just return if this dependency is to a class with the same or a parent
// we still have to add it. The class_loader won't keep from_cld alive. // class_loader.
if (!from_cld->is_anonymous()) { if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
// Check that this dependency isn't from the same or parent class_loader return; // this class loader is in the parent list, no need to add it.
oop from = from_cld->class_loader();
oop curr = from;
while (curr != NULL) {
if (curr == to) {
return; // this class loader is in the parent list, no need to add it.
}
curr = java_lang_ClassLoader::parent(curr);
}
} }
} }
// It's a dependency we won't find through GC, add it. This is relatively rare // It's a dependency we won't find through GC, add it. This is relatively rare.
// Must handle over GC point. // Must handle over GC point.
Handle dependency(THREAD, to); Handle dependency(THREAD, to);
from_cld->_dependencies.add(dependency, CHECK); from_cld->_dependencies.add(dependency, CHECK);
@ -443,6 +446,11 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
// Link the new item into the list, making sure the linked class is stable // Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock // since the list can be walked without a lock
OrderAccess::release_store(&_klasses, k); OrderAccess::release_store(&_klasses, k);
if (k->is_array_klass()) {
ClassLoaderDataGraph::inc_array_classes(1);
} else {
ClassLoaderDataGraph::inc_instance_classes(1);
}
} }
if (publicize && k->class_loader_data() != NULL) { if (publicize && k->class_loader_data() != NULL) {
@ -468,9 +476,9 @@ class ClassLoaderDataGraphKlassIteratorStatic {
InstanceKlass* try_get_next_class() { InstanceKlass* try_get_next_class() {
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
int max_classes = InstanceKlass::number_of_instance_classes(); size_t max_classes = ClassLoaderDataGraph::num_instance_classes();
assert(max_classes > 0, "should not be called with no instance classes"); assert(max_classes > 0, "should not be called with no instance classes");
for (int i = 0; i < max_classes; ) { for (size_t i = 0; i < max_classes; ) {
if (_current_class_entry != NULL) { if (_current_class_entry != NULL) {
Klass* k = _current_class_entry; Klass* k = _current_class_entry;
@ -545,6 +553,13 @@ void ClassLoaderData::remove_class(Klass* scratch_class) {
Klass* next = k->next_link(); Klass* next = k->next_link();
prev->set_next_link(next); prev->set_next_link(next);
} }
if (k->is_array_klass()) {
ClassLoaderDataGraph::dec_array_classes(1);
} else {
ClassLoaderDataGraph::dec_instance_classes(1);
}
return; return;
} }
prev = k; prev = k;
@ -639,9 +654,34 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
return alive; return alive;
} }
class ReleaseKlassClosure: public KlassClosure {
private:
size_t _instance_class_released;
size_t _array_class_released;
public:
ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
size_t instance_class_released() const { return _instance_class_released; }
size_t array_class_released() const { return _array_class_released; }
void do_klass(Klass* k) {
if (k->is_array_klass()) {
_array_class_released ++;
} else {
assert(k->is_instance_klass(), "Must be");
_instance_class_released ++;
InstanceKlass::release_C_heap_structures(InstanceKlass::cast(k));
}
}
};
ClassLoaderData::~ClassLoaderData() { ClassLoaderData::~ClassLoaderData() {
// Release C heap structures for all the classes. // Release C heap structures for all the classes.
classes_do(InstanceKlass::release_C_heap_structures); ReleaseKlassClosure cl;
classes_do(&cl);
ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
// Release C heap allocated hashtable for all the packages. // Release C heap allocated hashtable for all the packages.
if (_packages != NULL) { if (_packages != NULL) {
@ -693,25 +733,37 @@ ClassLoaderData::~ClassLoaderData() {
} }
} }
// Returns true if this class loader data is for the system class loader. // Returns true if this class loader data is for the app class loader
// or a user defined system class loader. (Note that the class loader
// data may be anonymous.)
bool ClassLoaderData::is_system_class_loader_data() const { bool ClassLoaderData::is_system_class_loader_data() const {
return SystemDictionary::is_system_class_loader(class_loader()); return SystemDictionary::is_system_class_loader(class_loader());
} }
// Returns true if this class loader data is for the platform class loader. // Returns true if this class loader data is for the platform class loader.
// (Note that the class loader data may be anonymous.)
bool ClassLoaderData::is_platform_class_loader_data() const { bool ClassLoaderData::is_platform_class_loader_data() const {
return SystemDictionary::is_platform_class_loader(class_loader()); return SystemDictionary::is_platform_class_loader(class_loader());
} }
// Returns true if this class loader data is one of the 3 builtin // Returns true if the class loader for this class loader data is one of
// (boot, application/system or platform) class loaders. Note, the // the 3 builtin (boot application/system or platform) class loaders,
// builtin loaders are not freed by a GC. // including a user-defined system class loader. Note that if the class
// loader data is for an anonymous class then it may get freed by a GC
// even if its class loader is one of these loaders.
bool ClassLoaderData::is_builtin_class_loader_data() const { bool ClassLoaderData::is_builtin_class_loader_data() const {
return (is_the_null_class_loader_data() || return (is_boot_class_loader_data() ||
SystemDictionary::is_system_class_loader(class_loader()) || SystemDictionary::is_system_class_loader(class_loader()) ||
SystemDictionary::is_platform_class_loader(class_loader())); SystemDictionary::is_platform_class_loader(class_loader()));
} }
// Returns true if this class loader data is a class loader data
// that is not ever freed by a GC. It must be one of the builtin
// class loaders and not anonymous.
bool ClassLoaderData::is_permanent_class_loader_data() const {
return is_builtin_class_loader_data() && !is_anonymous();
}
Metaspace* ClassLoaderData::metaspace_non_null() { Metaspace* ClassLoaderData::metaspace_non_null() {
// If the metaspace has not been allocated, create a new one. Might want // If the metaspace has not been allocated, create a new one. Might want
// to create smaller arena for Reflection class loaders also. // to create smaller arena for Reflection class loaders also.

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -80,6 +80,9 @@ class ClassLoaderDataGraph : public AllStatic {
// allocations until class unloading // allocations until class unloading
static bool _metaspace_oom; static bool _metaspace_oom;
static volatile size_t _num_instance_classes;
static volatile size_t _num_array_classes;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(); static void post_class_unload_events();
public: public:
@ -154,6 +157,15 @@ class ClassLoaderDataGraph : public AllStatic {
static void print_creation(outputStream* out, Handle loader, ClassLoaderData* cld, TRAPS); static void print_creation(outputStream* out, Handle loader, ClassLoaderData* cld, TRAPS);
static bool unload_list_contains(const void* x); static bool unload_list_contains(const void* x);
// instance and array class counters
static inline size_t num_instance_classes();
static inline size_t num_array_classes();
static inline void inc_instance_classes(size_t count);
static inline void dec_instance_classes(size_t count);
static inline void inc_array_classes(size_t count);
static inline void dec_array_classes(size_t count);
#ifndef PRODUCT #ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data); static bool contains_loader_data(ClassLoaderData* loader_data);
#endif #endif
@ -344,7 +356,15 @@ class ClassLoaderData : public CHeapObj<mtClass> {
} }
bool is_system_class_loader_data() const; bool is_system_class_loader_data() const;
bool is_platform_class_loader_data() const; bool is_platform_class_loader_data() const;
// Returns true if this class loader data is for the boot class loader.
// (Note that the class loader data may be anonymous.)
bool is_boot_class_loader_data() const {
return class_loader() == NULL;
}
bool is_builtin_class_loader_data() const; bool is_builtin_class_loader_data() const;
bool is_permanent_class_loader_data() const;
// The Metaspace is created lazily so may be NULL. This // The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed. // method will allocate a Metaspace if needed.

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,3 +50,29 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP
} }
return ClassLoaderDataGraph::add(loader, false, THREAD); return ClassLoaderDataGraph::add(loader, false, THREAD);
} }
size_t ClassLoaderDataGraph::num_instance_classes() {
return _num_instance_classes;
}
size_t ClassLoaderDataGraph::num_array_classes() {
return _num_array_classes;
}
void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
Atomic::add(count, &_num_instance_classes);
}
void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
assert(count <= _num_instance_classes, "Sanity");
Atomic::sub(count, &_num_instance_classes);
}
void ClassLoaderDataGraph::inc_array_classes(size_t count) {
Atomic::add(count, &_num_array_classes);
}
void ClassLoaderDataGraph::dec_array_classes(size_t count) {
assert(count <= _num_array_classes, "Sanity");
Atomic::sub(count, &_num_array_classes);
}

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -103,7 +103,11 @@ public:
void set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd); void set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd);
ClassLoaderData* loader_data() const { return _loader_data; } ClassLoaderData* loader_data() const { return _loader_data; }
void set_loader_data(ClassLoaderData* l) { _loader_data = l; }
void set_loader_data(ClassLoaderData* cld) {
assert(!cld->is_anonymous(), "Unexpected anonymous class loader data");
_loader_data = cld;
}
Symbol* version() const { return _version; } Symbol* version() const { return _version; }
void set_version(Symbol* version); void set_version(Symbol* version);

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -417,7 +417,7 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT #ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
assert (cb->is_compiled(), "must be compiled!"); assert (cb != NULL && cb->is_compiled(), "must be compiled!");
#endif /* ASSERT */ #endif /* ASSERT */
// This is MT safe if we come from a clean-cache and go through a // This is MT safe if we come from a clean-cache and go through a

@ -955,6 +955,7 @@ void nmethod::verify_clean_inline_caches() {
CompiledIC *ic = CompiledIC_at(&iter); CompiledIC *ic = CompiledIC_at(&iter);
// Ok, to lookup references to zombies here // Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
assert(cb != NULL, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null(); nmethod* nm = cb->as_nmethod_or_null();
if( nm != NULL ) { if( nm != NULL ) {
// Verify that inline caches pointing to both zombie and not_entrant methods are clean // Verify that inline caches pointing to both zombie and not_entrant methods are clean
@ -967,6 +968,7 @@ void nmethod::verify_clean_inline_caches() {
case relocInfo::static_call_type: { case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
assert(cb != NULL, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null(); nmethod* nm = cb->as_nmethod_or_null();
if( nm != NULL ) { if( nm != NULL ) {
// Verify that inline caches pointing to both zombie and not_entrant methods are clean // Verify that inline caches pointing to both zombie and not_entrant methods are clean
@ -2732,7 +2734,7 @@ public:
virtual void verify_resolve_call(address dest) const { virtual void verify_resolve_call(address dest) const {
CodeBlob* db = CodeCache::find_blob_unsafe(dest); CodeBlob* db = CodeCache::find_blob_unsafe(dest);
assert(!db->is_adapter_blob(), "must use stub!"); assert(db != NULL && !db->is_adapter_blob(), "must use stub!");
} }
virtual bool is_call_to_interpreted(address dest) const { virtual bool is_call_to_interpreted(address dest) const {

@ -26,7 +26,9 @@
#include "gc/shared/oopStorage.inline.hpp" #include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageParState.inline.hpp" #include "gc/shared/oopStorageParState.inline.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
@ -107,7 +109,7 @@ void OopStorage::BlockList::unlink(const Block& block) {
} }
// Blocks start with an array of BitsPerWord oop entries. That array // Blocks start with an array of BitsPerWord oop entries. That array
// is divided into conceptual BytesPerWord sections of BitsPerWord // is divided into conceptual BytesPerWord sections of BitsPerByte
// entries. Blocks are allocated aligned on section boundaries, for // entries. Blocks are allocated aligned on section boundaries, for
// the convenience of mapping from an entry to the containing block; // the convenience of mapping from an entry to the containing block;
// see block_for_ptr(). Aligning on section boundary rather than on // see block_for_ptr(). Aligning on section boundary rather than on
@ -130,7 +132,9 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
_owner(owner), _owner(owner),
_memory(memory), _memory(memory),
_active_entry(), _active_entry(),
_allocate_entry() _allocate_entry(),
_deferred_updates_next(NULL),
_release_refcount(0)
{ {
STATIC_ASSERT(_data_pos == 0); STATIC_ASSERT(_data_pos == 0);
STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data)); STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
@ -143,6 +147,8 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
#endif #endif
OopStorage::Block::~Block() { OopStorage::Block::~Block() {
assert(_release_refcount == 0, "deleting block while releasing");
assert(_deferred_updates_next == NULL, "deleting block with deferred update");
// Clear fields used by block_for_ptr and entry validation, which // Clear fields used by block_for_ptr and entry validation, which
// might help catch bugs. Volatile to prevent dead-store elimination. // might help catch bugs. Volatile to prevent dead-store elimination.
const_cast<uintx volatile&>(_allocated_bitmask) = 0; const_cast<uintx volatile&>(_allocated_bitmask) = 0;
@ -182,8 +188,24 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
return bitmask_for_index(get_index(ptr)); return bitmask_for_index(get_index(ptr));
} }
uintx OopStorage::Block::cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value) { // A block is deletable if
return Atomic::cmpxchg(new_value, &_allocated_bitmask, compare_value); // (1) It is empty.
// (2) There is not a release() operation currently operating on it.
// (3) It is not in the deferred updates list.
// The order of tests is important for proper interaction between release()
// and concurrent deletion.
bool OopStorage::Block::is_deletable() const {
return (OrderAccess::load_acquire(&_allocated_bitmask) == 0) &&
(OrderAccess::load_acquire(&_release_refcount) == 0) &&
(OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
return _deferred_updates_next;
}
void OopStorage::Block::set_deferred_updates_next(Block* block) {
_deferred_updates_next = block;
} }
bool OopStorage::Block::contains(const oop* ptr) const { bool OopStorage::Block::contains(const oop* ptr) const {
@ -203,7 +225,7 @@ oop* OopStorage::Block::allocate() {
assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
unsigned index = count_trailing_zeros(~allocated); unsigned index = count_trailing_zeros(~allocated);
uintx new_value = allocated | bitmask_for_index(index); uintx new_value = allocated | bitmask_for_index(index);
uintx fetched = cmpxchg_allocated_bitmask(new_value, allocated); uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
if (fetched == allocated) { if (fetched == allocated) {
return get_pointer(index); // CAS succeeded; return entry for index. return get_pointer(index); // CAS succeeded; return entry for index.
} }
@ -261,20 +283,6 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
return NULL; return NULL;
} }
bool OopStorage::is_valid_block_locked_or_safepoint(const Block* check_block) const {
assert_locked_or_safepoint(_allocate_mutex);
// For now, simple linear search. Do something more clever if this
// is a performance bottleneck, particularly for allocation_status.
for (const Block* block = _active_list.chead();
block != NULL;
block = _active_list.next(*block)) {
if (check_block == block) {
return true;
}
}
return false;
}
#ifdef ASSERT #ifdef ASSERT
void OopStorage::assert_at_safepoint() { void OopStorage::assert_at_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@ -291,39 +299,49 @@ void OopStorage::assert_at_safepoint() {
// kept at the end of the _allocate_list, to make it easy for empty block // kept at the end of the _allocate_list, to make it easy for empty block
// deletion to find them. // deletion to find them.
// //
// allocate(), release(), and delete_empty_blocks_concurrent() all lock the // allocate(), and delete_empty_blocks_concurrent() lock the
// _allocate_mutex while performing any list modifications. // _allocate_mutex while performing any list modifications.
// //
// allocate() and release() update a block's _allocated_bitmask using CAS // allocate() and release() update a block's _allocated_bitmask using CAS
// loops. This prevents loss of updates even though release() may perform // loops. This prevents loss of updates even though release() performs
// some updates without any locking. // its updates without any locking.
// //
// allocate() obtains the entry from the first block in the _allocate_list, // allocate() obtains the entry from the first block in the _allocate_list,
// and updates that block's _allocated_bitmask to indicate the entry is in // and updates that block's _allocated_bitmask to indicate the entry is in
// use. If this makes the block full (all entries in use), the block is // use. If this makes the block full (all entries in use), the block is
// removed from the _allocate_list so it won't be considered by future // removed from the _allocate_list so it won't be considered by future
// allocations until some entries in it are relased. // allocations until some entries in it are released.
// //
// release() looks up the block for the entry without locking. Once the block // release() is performed lock-free. release() first looks up the block for
// has been determined, its _allocated_bitmask needs to be updated, and its // the entry, using address alignment to find the enclosing block (thereby
// position in the _allocate_list may need to be updated. There are two // avoiding iteration over the _active_list). Once the block has been
// cases: // determined, its _allocated_bitmask needs to be updated, and its position in
// the _allocate_list may need to be updated. There are two cases:
// //
// (a) If the block is neither full nor would become empty with the release of // (a) If the block is neither full nor would become empty with the release of
// the entry, only its _allocated_bitmask needs to be updated. But if the CAS // the entry, only its _allocated_bitmask needs to be updated. But if the CAS
// update fails, the applicable case may change for the retry. // update fails, the applicable case may change for the retry.
// //
// (b) Otherwise, the _allocate_list will also need to be modified. This // (b) Otherwise, the _allocate_list also needs to be modified. This requires
// requires locking the _allocate_mutex, and then attempting to CAS the // locking the _allocate_mutex. To keep the release() operation lock-free,
// _allocated_bitmask. If the CAS fails, the applicable case may change for // rather than updating the _allocate_list itself, it instead performs a
// the retry. If the CAS succeeds, then update the _allocate_list according // lock-free push of the block onto the _deferred_updates list. Entries on
// to the the state changes. If the block changed from full to not full, then // that list are processed by allocate() and delete_empty_blocks_XXX(), while
// it needs to be added to the _allocate_list, for use in future allocations. // they already hold the necessary lock. That processing makes the block's
// If the block changed from not empty to empty, then it is moved to the end // list state consistent with its current _allocated_bitmask. The block is
// of the _allocate_list, for ease of empty block deletion processing. // added to the _allocate_list if not already present and the bitmask is not
// full. The block is moved to the end of the _allocated_list if the bitmask
// is empty, for ease of empty block deletion processing.
oop* OopStorage::allocate() { oop* OopStorage::allocate() {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
// Do some deferred update processing every time we allocate.
// Continue processing deferred updates if _allocate_list is empty,
// in the hope that we'll get a block from that, rather than
// allocating a new block.
while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {}
// Use the first block in _allocate_list for the allocation.
Block* block = _allocate_list.head(); Block* block = _allocate_list.head();
if (block == NULL) { if (block == NULL) {
// No available blocks; make a new one, and add to storage. // No available blocks; make a new one, and add to storage.
@ -331,7 +349,17 @@ oop* OopStorage::allocate() {
MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag); MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
block = Block::new_block(this); block = Block::new_block(this);
} }
if (block != NULL) { if (block == NULL) {
while (_allocate_list.head() == NULL) {
if (!reduce_deferred_updates()) {
// Failed to make new block, no other thread made a block
// available while the mutex was released, and didn't get
// one from a deferred update either, so return failure.
log_info(oopstorage, ref)("%s: failed allocation", name());
return NULL;
}
}
} else {
// Add new block to storage. // Add new block to storage.
log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block)); log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
@ -340,22 +368,14 @@ oop* OopStorage::allocate() {
// to allocate from non-empty blocks, to allow empty blocks to // to allocate from non-empty blocks, to allow empty blocks to
// be deleted. // be deleted.
_allocate_list.push_back(*block); _allocate_list.push_back(*block);
++_empty_block_count;
// Add to front of _active_list, and then record as the head // Add to front of _active_list, and then record as the head
// block, for concurrent iteration protocol. // block, for concurrent iteration protocol.
_active_list.push_front(*block); _active_list.push_front(*block);
++_block_count; ++_block_count;
// Ensure all setup of block is complete before making it visible. // Ensure all setup of block is complete before making it visible.
OrderAccess::release_store(&_active_head, block); OrderAccess::release_store(&_active_head, block);
} else {
log_info(oopstorage, blocks)("%s: failed new block allocation", name());
} }
block = _allocate_list.head(); block = _allocate_list.head();
if (block == NULL) {
// Failed to make new block, and no other thread made a block
// available while the mutex was released, so return failure.
return NULL;
}
} }
// Allocate from first block. // Allocate from first block.
assert(block != NULL, "invariant"); assert(block != NULL, "invariant");
@ -363,7 +383,6 @@ oop* OopStorage::allocate() {
if (block->is_empty()) { if (block->is_empty()) {
// Transitioning from empty to not empty. // Transitioning from empty to not empty.
log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
--_empty_block_count;
} }
oop* result = block->allocate(); oop* result = block->allocate();
assert(result != NULL, "allocation failed"); assert(result != NULL, "allocation failed");
@ -384,72 +403,115 @@ OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
return Block::block_for_ptr(this, ptr); return Block::block_for_ptr(this, ptr);
} }
void OopStorage::release_from_block(Block& block, uintx releasing) { static void log_release_transitions(uintx releasing,
assert(releasing != 0, "invariant"); uintx old_allocated,
uintx allocated = block.allocated_bitmask(); const OopStorage* owner,
const void* block) {
ResourceMark rm;
Log(oopstorage, blocks) log;
LogStream ls(log.debug());
if (is_full_bitmask(old_allocated)) {
ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
}
if (releasing == old_allocated) {
ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
}
}
void OopStorage::Block::release_entries(uintx releasing, Block* volatile* deferred_list) {
assert(releasing != 0, "preconditon");
// Prevent empty block deletion when transitioning to empty.
Atomic::inc(&_release_refcount);
// Atomically update allocated bitmask.
uintx old_allocated = _allocated_bitmask;
while (true) { while (true) {
assert(releasing == (allocated & releasing), "invariant"); assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = allocated ^ releasing; uintx new_value = old_allocated ^ releasing;
// CAS new_value into block's allocated bitmask, retrying with uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
// updated allocated bitmask until the CAS succeeds. if (fetched == old_allocated) break; // Successful update.
uintx fetched; old_allocated = fetched; // Retry with updated bitmask.
if (!is_full_bitmask(allocated) && !is_empty_bitmask(new_value)) { }
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
if (fetched == allocated) return; // Now that the bitmask has been updated, if we have a state transition
} else { // (updated bitmask is empty or old bitmask was full), atomically push
// Need special handling if transitioning from full to not full, // this block onto the deferred updates list. Some future call to
// or from not empty to empty. For those cases, must hold the // reduce_deferred_updates will make any needed changes related to this
// _allocation_mutex when updating the allocated bitmask, to // block and _allocate_list. This deferral avoids list updates and the
// ensure the associated list manipulations will be consistent // associated locking here.
// with the allocation bitmask that is visible to other threads if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
// in allocate() or deleting empty blocks. // Log transitions. Both transitions are possible in a single update.
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); if (log_is_enabled(Debug, oopstorage, blocks)) {
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated); log_release_transitions(releasing, old_allocated, _owner, this);
if (fetched == allocated) { }
// CAS succeeded; handle special cases, which might no longer apply. // Attempt to claim responsibility for adding this block to the deferred
if (is_full_bitmask(allocated)) { // list, by setting the link to non-NULL by self-looping. If this fails,
// Transitioning from full to not-full; add to _allocate_list. // then someone else has made such a claim and the deferred update has not
log_debug(oopstorage, blocks)("%s: block not full " PTR_FORMAT, name(), p2i(&block)); // yet been processed and will include our change, so we don't need to do
_allocate_list.push_front(block); // anything further.
assert(!block.is_full(), "invariant"); // Still not full. if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
} // Successfully claimed. Push, with self-loop for end-of-list.
if (is_empty_bitmask(new_value)) { Block* head = *deferred_list;
// Transitioning from not-empty to empty; move to end of while (true) {
// _allocate_list, to make it a deletion candidate. _deferred_updates_next = (head == NULL) ? this : head;
log_debug(oopstorage, blocks)("%s: block empty " PTR_FORMAT, name(), p2i(&block)); Block* fetched = Atomic::cmpxchg(this, deferred_list, head);
_allocate_list.unlink(block); if (fetched == head) break; // Successful update.
_allocate_list.push_back(block); head = fetched; // Retry with updated head.
++_empty_block_count; }
assert(block.is_empty(), "invariant"); // Still empty. log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
} _owner->name(), p2i(this));
return; // Successful CAS and transitions handled.
}
} }
// CAS failed; retry with latest value.
allocated = fetched;
} }
// Release hold on empty block deletion.
Atomic::dec(&_release_refcount);
} }
#ifdef ASSERT // Process one available deferred update. Returns true if one was processed.
void OopStorage::check_release(const Block* block, const oop* ptr) const { bool OopStorage::reduce_deferred_updates() {
switch (allocation_status_validating_block(block, ptr)) { assert_locked_or_safepoint(_allocate_mutex);
case INVALID_ENTRY: // Atomically pop a block off the list, if any available.
fatal("Releasing invalid entry: " PTR_FORMAT, p2i(ptr)); // No ABA issue because this is only called by one thread at a time.
break; // The atomicity is wrto pushes by release().
Block* block = OrderAccess::load_acquire(&_deferred_updates);
case UNALLOCATED_ENTRY: while (true) {
fatal("Releasing unallocated entry: " PTR_FORMAT, p2i(ptr)); if (block == NULL) return false;
break; // Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
case ALLOCATED_ENTRY: if (block == tail) tail = NULL; // Handle self-loop end marker.
assert(block->contains(ptr), "invariant"); Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
break; if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
default:
ShouldNotReachHere();
} }
block->set_deferred_updates_next(NULL); // Clear tail after updating head.
// Ensure bitmask read after pop is complete, including clearing tail, for
// ordering with release(). Without this, we may be processing a stale
// bitmask state here while blocking a release() operation from recording
// the deferred update needed for its bitmask change.
OrderAccess::storeload();
// Process popped block.
uintx allocated = block->allocated_bitmask();
// Make membership in list consistent with bitmask state.
if ((_allocate_list.ctail() != NULL) &&
((_allocate_list.ctail() == block) ||
(_allocate_list.next(*block) != NULL))) {
// Block is in the allocate list.
assert(!is_full_bitmask(allocated), "invariant");
} else if (!is_full_bitmask(allocated)) {
// Block is not in the allocate list, but now should be.
_allocate_list.push_front(*block);
} // Else block is full and not in list, which is correct.
// Move empty block to end of list, for possible deletion.
if (is_empty_bitmask(allocated)) {
_allocate_list.unlink(*block);
_allocate_list.push_back(*block);
}
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
name(), p2i(block));
return true; // Processed one pending update.
} }
#endif // ASSERT
inline void check_release_entry(const oop* entry) { inline void check_release_entry(const oop* entry) {
assert(entry != NULL, "Releasing NULL"); assert(entry != NULL, "Releasing NULL");
@ -459,9 +521,9 @@ inline void check_release_entry(const oop* entry) {
void OopStorage::release(const oop* ptr) { void OopStorage::release(const oop* ptr) {
check_release_entry(ptr); check_release_entry(ptr);
Block* block = find_block_or_null(ptr); Block* block = find_block_or_null(ptr);
check_release(block, ptr); assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr)); log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
release_from_block(*block, block->bitmask_for_entry(ptr)); block->release_entries(block->bitmask_for_entry(ptr), &_deferred_updates);
Atomic::dec(&_allocation_count); Atomic::dec(&_allocation_count);
} }
@ -470,15 +532,15 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
while (i < size) { while (i < size) {
check_release_entry(ptrs[i]); check_release_entry(ptrs[i]);
Block* block = find_block_or_null(ptrs[i]); Block* block = find_block_or_null(ptrs[i]);
check_release(block, ptrs[i]); assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i])); log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
size_t count = 0; size_t count = 0;
uintx releasing = 0; uintx releasing = 0;
for ( ; i < size; ++i) { for ( ; i < size; ++i) {
const oop* entry = ptrs[i]; const oop* entry = ptrs[i];
check_release_entry(entry);
// If entry not in block, finish block and resume outer loop with entry. // If entry not in block, finish block and resume outer loop with entry.
if (!block->contains(entry)) break; if (!block->contains(entry)) break;
check_release_entry(entry);
// Add entry to releasing bitmap. // Add entry to releasing bitmap.
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry)); log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
uintx entry_bitmask = block->bitmask_for_entry(entry); uintx entry_bitmask = block->bitmask_for_entry(entry);
@ -488,7 +550,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
++count; ++count;
} }
// Release the contiguous entries that are in block. // Release the contiguous entries that are in block.
release_from_block(*block, releasing); block->release_entries(releasing, &_deferred_updates);
Atomic::sub(count, &_allocation_count); Atomic::sub(count, &_allocation_count);
} }
} }
@ -506,11 +568,11 @@ OopStorage::OopStorage(const char* name,
_active_list(&Block::get_active_entry), _active_list(&Block::get_active_entry),
_allocate_list(&Block::get_allocate_entry), _allocate_list(&Block::get_allocate_entry),
_active_head(NULL), _active_head(NULL),
_deferred_updates(NULL),
_allocate_mutex(allocate_mutex), _allocate_mutex(allocate_mutex),
_active_mutex(active_mutex), _active_mutex(active_mutex),
_allocation_count(0), _allocation_count(0),
_block_count(0), _block_count(0),
_empty_block_count(0),
_concurrent_iteration_active(false) _concurrent_iteration_active(false)
{ {
assert(_active_mutex->rank() < _allocate_mutex->rank(), assert(_active_mutex->rank() < _allocate_mutex->rank(),
@ -529,6 +591,10 @@ void OopStorage::delete_empty_block(const Block& block) {
OopStorage::~OopStorage() { OopStorage::~OopStorage() {
Block* block; Block* block;
while ((block = _deferred_updates) != NULL) {
_deferred_updates = block->deferred_updates_next();
block->set_deferred_updates_next(NULL);
}
while ((block = _allocate_list.head()) != NULL) { while ((block = _allocate_list.head()) != NULL) {
_allocate_list.unlink(*block); _allocate_list.unlink(*block);
} }
@ -539,43 +605,47 @@ OopStorage::~OopStorage() {
FREE_C_HEAP_ARRAY(char, _name); FREE_C_HEAP_ARRAY(char, _name);
} }
void OopStorage::delete_empty_blocks_safepoint(size_t retain) { void OopStorage::delete_empty_blocks_safepoint() {
assert_at_safepoint(); assert_at_safepoint();
// Process any pending release updates, which may make more empty
// blocks available for deletion.
while (reduce_deferred_updates()) {}
// Don't interfere with a concurrent iteration. // Don't interfere with a concurrent iteration.
if (_concurrent_iteration_active) return; if (_concurrent_iteration_active) return;
// Compute the number of blocks to remove, to minimize volatile accesses. // Delete empty (and otherwise deletable) blocks from end of _allocate_list.
size_t empty_blocks = _empty_block_count; for (const Block* block = _allocate_list.ctail();
if (retain < empty_blocks) { (block != NULL) && block->is_deletable();
size_t remove_count = empty_blocks - retain; block = _allocate_list.ctail()) {
// Update volatile counters once. _active_list.unlink(*block);
_block_count -= remove_count; _allocate_list.unlink(*block);
_empty_block_count -= remove_count; delete_empty_block(*block);
do { --_block_count;
const Block* block = _allocate_list.ctail();
assert(block != NULL, "invariant");
assert(block->is_empty(), "invariant");
// Remove block from lists, and delete it.
_active_list.unlink(*block);
_allocate_list.unlink(*block);
delete_empty_block(*block);
} while (--remove_count > 0);
// Update _active_head, in case current value was in deleted set.
_active_head = _active_list.head();
} }
// Update _active_head, in case current value was in deleted set.
_active_head = _active_list.head();
} }
void OopStorage::delete_empty_blocks_concurrent(size_t retain) { void OopStorage::delete_empty_blocks_concurrent() {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
// Other threads could be adding to the empty block count while we // Other threads could be adding to the empty block count while we
// release the mutex across the block deletions. Set an upper bound // release the mutex across the block deletions. Set an upper bound
// on how many blocks we'll try to release, so other threads can't // on how many blocks we'll try to release, so other threads can't
// cause an unbounded stay in this function. // cause an unbounded stay in this function.
if (_empty_block_count <= retain) return; size_t limit = _block_count;
size_t limit = _empty_block_count - retain;
for (size_t i = 0; (i < limit) && (retain < _empty_block_count); ++i) { for (size_t i = 0; i < limit; ++i) {
// Additional updates might become available while we dropped the
// lock. But limit number processed to limit lock duration.
reduce_deferred_updates();
const Block* block = _allocate_list.ctail(); const Block* block = _allocate_list.ctail();
assert(block != NULL, "invariant"); if ((block == NULL) || !block->is_deletable()) {
assert(block->is_empty(), "invariant"); // No block to delete, so done. There could be more pending
// deferred updates that could give us more work to do; deal with
// that in some later call, to limit lock duration here.
return;
}
{ {
MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
// Don't interfere with a concurrent iteration. // Don't interfere with a concurrent iteration.
@ -589,28 +659,31 @@ void OopStorage::delete_empty_blocks_concurrent(size_t retain) {
} }
// Remove block from _allocate_list and delete it. // Remove block from _allocate_list and delete it.
_allocate_list.unlink(*block); _allocate_list.unlink(*block);
--_empty_block_count;
// Release mutex while deleting block. // Release mutex while deleting block.
MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag); MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
delete_empty_block(*block); delete_empty_block(*block);
} }
} }
OopStorage::EntryStatus
OopStorage::allocation_status_validating_block(const Block* block,
const oop* ptr) const {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
if ((block == NULL) || !is_valid_block_locked_or_safepoint(block)) {
return INVALID_ENTRY;
} else if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
} else {
return UNALLOCATED_ENTRY;
}
}
OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const { OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
return allocation_status_validating_block(find_block_or_null(ptr), ptr); const Block* block = find_block_or_null(ptr);
if (block != NULL) {
// Verify block is a real block. For now, simple linear search.
// Do something more clever if this is a performance bottleneck.
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
for (const Block* check_block = _active_list.chead();
check_block != NULL;
check_block = _active_list.next(*check_block)) {
if (check_block == block) {
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
} else {
return UNALLOCATED_ENTRY;
}
}
}
}
return INVALID_ENTRY;
} }
size_t OopStorage::allocation_count() const { size_t OopStorage::allocation_count() const {
@ -621,10 +694,6 @@ size_t OopStorage::block_count() const {
return _block_count; return _block_count;
} }
size_t OopStorage::empty_block_count() const {
return _empty_block_count;
}
size_t OopStorage::total_memory_usage() const { size_t OopStorage::total_memory_usage() const {
size_t total_size = sizeof(OopStorage); size_t total_size = sizeof(OopStorage);
total_size += strlen(name()) + 1; total_size += strlen(name()) + 1;
@ -690,17 +759,12 @@ const char* OopStorage::name() const { return _name; }
void OopStorage::print_on(outputStream* st) const { void OopStorage::print_on(outputStream* st) const {
size_t allocations = _allocation_count; size_t allocations = _allocation_count;
size_t blocks = _block_count; size_t blocks = _block_count;
size_t empties = _empty_block_count;
// Comparison is being careful about racy accesses.
size_t used = (blocks < empties) ? 0 : (blocks - empties);
double data_size = section_size * section_count; double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, used * data_size); double alloc_percentage = percent_of((double)allocations, blocks * data_size);
st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
SIZE_FORMAT " empties, " SIZE_FORMAT " bytes", name(), allocations, blocks, alloc_percentage, total_memory_usage());
name(), allocations, used, alloc_percentage,
empties, total_memory_usage());
if (_concurrent_iteration_active) { if (_concurrent_iteration_active) {
st->print(", concurrent iteration active"); st->print(", concurrent iteration active");
} }

@ -84,10 +84,6 @@ public:
// The number of blocks of entries. Useful for sizing parallel iteration. // The number of blocks of entries. Useful for sizing parallel iteration.
size_t block_count() const; size_t block_count() const;
// The number of blocks with no allocated entries. Useful for sizing
// parallel iteration and scheduling block deletion.
size_t empty_block_count() const;
// Total number of blocks * memory allocation per block, plus // Total number of blocks * memory allocation per block, plus
// bookkeeping overhead, including this storage object. // bookkeeping overhead, including this storage object.
size_t total_memory_usage() const; size_t total_memory_usage() const;
@ -107,14 +103,13 @@ public:
// postcondition: *result == NULL. // postcondition: *result == NULL.
oop* allocate(); oop* allocate();
// Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex. // Deallocates ptr. No locking.
// precondition: ptr is a valid allocated entry. // precondition: ptr is a valid allocated entry.
// precondition: *ptr == NULL. // precondition: *ptr == NULL.
void release(const oop* ptr); void release(const oop* ptr);
// Releases all the ptrs. Possibly faster than individual calls to // Releases all the ptrs. Possibly faster than individual calls to
// release(oop*). Best if ptrs is sorted by address. Locks // release(oop*). Best if ptrs is sorted by address. No locking.
// _allocate_mutex.
// precondition: All elements of ptrs are valid allocated entries. // precondition: All elements of ptrs are valid allocated entries.
// precondition: *ptrs[i] == NULL, for i in [0,size). // precondition: *ptrs[i] == NULL, for i in [0,size).
void release(const oop* const* ptrs, size_t size); void release(const oop* const* ptrs, size_t size);
@ -160,8 +155,8 @@ public:
// Block cleanup functions are for the exclusive use of the GC. // Block cleanup functions are for the exclusive use of the GC.
// Both stop deleting if there is an in-progress concurrent iteration. // Both stop deleting if there is an in-progress concurrent iteration.
// Concurrent deletion locks both the allocate_mutex and the active_mutex. // Concurrent deletion locks both the allocate_mutex and the active_mutex.
void delete_empty_blocks_safepoint(size_t retain = 1); void delete_empty_blocks_safepoint();
void delete_empty_blocks_concurrent(size_t retain = 1); void delete_empty_blocks_concurrent();
// Debugging and logging support. // Debugging and logging support.
const char* name() const; const char* name() const;
@ -231,6 +226,7 @@ private:
BlockList _active_list; BlockList _active_list;
BlockList _allocate_list; BlockList _allocate_list;
Block* volatile _active_head; Block* volatile _active_head;
Block* volatile _deferred_updates;
Mutex* _allocate_mutex; Mutex* _allocate_mutex;
Mutex* _active_mutex; Mutex* _active_mutex;
@ -238,16 +234,12 @@ private:
// Counts are volatile for racy unlocked accesses. // Counts are volatile for racy unlocked accesses.
volatile size_t _allocation_count; volatile size_t _allocation_count;
volatile size_t _block_count; volatile size_t _block_count;
volatile size_t _empty_block_count;
// mutable because this gets set even for const iteration. // mutable because this gets set even for const iteration.
mutable bool _concurrent_iteration_active; mutable bool _concurrent_iteration_active;
Block* find_block_or_null(const oop* ptr) const; Block* find_block_or_null(const oop* ptr) const;
bool is_valid_block_locked_or_safepoint(const Block* block) const;
EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const;
void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN;
void release_from_block(Block& block, uintx release_bitmask);
void delete_empty_block(const Block& block); void delete_empty_block(const Block& block);
bool reduce_deferred_updates();
static void assert_at_safepoint() NOT_DEBUG_RETURN; static void assert_at_safepoint() NOT_DEBUG_RETURN;

@ -44,6 +44,8 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
void* _memory; // Unaligned storage containing block. void* _memory; // Unaligned storage containing block.
BlockEntry _active_entry; BlockEntry _active_entry;
BlockEntry _allocate_entry; BlockEntry _allocate_entry;
Block* volatile _deferred_updates_next;
volatile uintx _release_refcount;
Block(const OopStorage* owner, void* memory); Block(const OopStorage* owner, void* memory);
~Block(); ~Block();
@ -75,7 +77,10 @@ public:
bool is_full() const; bool is_full() const;
bool is_empty() const; bool is_empty() const;
uintx allocated_bitmask() const; uintx allocated_bitmask() const;
uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value); bool is_deletable() const;
Block* deferred_updates_next() const;
void set_deferred_updates_next(Block* new_next);
bool contains(const oop* ptr) const; bool contains(const oop* ptr) const;
@ -86,6 +91,8 @@ public:
static Block* new_block(const OopStorage* owner); static Block* new_block(const OopStorage* owner);
static void delete_block(const Block& block); static void delete_block(const Block& block);
void release_entries(uintx releasing, Block* volatile* deferred_list);
template<typename F> bool iterate(F f); template<typename F> bool iterate(F f);
template<typename F> bool iterate(F f) const; template<typename F> bool iterate(F f) const;
}; // class Block }; // class Block

@ -594,10 +594,6 @@ BytecodeInterpreter::run(interpreterState istate) {
VERIFY_OOP(rcvr); VERIFY_OOP(rcvr);
} }
#endif #endif
// #define HACK
#ifdef HACK
bool interesting = false;
#endif // HACK
/* QQQ this should be a stack method so we don't know actual direction */ /* QQQ this should be a stack method so we don't know actual direction */
guarantee(istate->msg() == initialize || guarantee(istate->msg() == initialize ||
@ -649,19 +645,6 @@ BytecodeInterpreter::run(interpreterState istate) {
os::breakpoint(); os::breakpoint();
} }
#ifdef HACK
{
ResourceMark rm;
char *method_name = istate->method()->name_and_sig_as_C_string();
if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
tty->print_cr("entering: depth %d bci: %d",
(istate->_stack_base - istate->_stack),
istate->_bcp - istate->_method->code_base());
interesting = true;
}
}
#endif // HACK
// Lock method if synchronized. // Lock method if synchronized.
if (METHOD->is_synchronized()) { if (METHOD->is_synchronized()) {
// oop rcvr = locals[0].j.r; // oop rcvr = locals[0].j.r;
@ -793,18 +776,6 @@ BytecodeInterpreter::run(interpreterState istate) {
// resume // resume
os::breakpoint(); os::breakpoint();
} }
#ifdef HACK
{
ResourceMark rm;
char *method_name = istate->method()->name_and_sig_as_C_string();
if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
tty->print_cr("resume: depth %d bci: %d",
(istate->_stack_base - istate->_stack) ,
istate->_bcp - istate->_method->code_base());
interesting = true;
}
}
#endif // HACK
// returned from a java call, continue executing. // returned from a java call, continue executing.
if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
goto handle_Pop_Frame; goto handle_Pop_Frame;

@ -66,6 +66,7 @@
LOG_TAG(exceptions) \ LOG_TAG(exceptions) \
LOG_TAG(exit) \ LOG_TAG(exit) \
LOG_TAG(fingerprint) \ LOG_TAG(fingerprint) \
LOG_TAG(free) \
LOG_TAG(freelist) \ LOG_TAG(freelist) \
LOG_TAG(gc) \ LOG_TAG(gc) \
LOG_TAG(handshake) \ LOG_TAG(handshake) \
@ -85,6 +86,7 @@
LOG_TAG(load) /* Trace all classes loaded */ \ LOG_TAG(load) /* Trace all classes loaded */ \
LOG_TAG(loader) \ LOG_TAG(loader) \
LOG_TAG(logging) \ LOG_TAG(logging) \
LOG_TAG(malloc) \
LOG_TAG(mark) \ LOG_TAG(mark) \
LOG_TAG(marking) \ LOG_TAG(marking) \
LOG_TAG(membername) \ LOG_TAG(membername) \

@ -210,18 +210,6 @@ ResourceObj::~ResourceObj() {
} }
#endif // ASSERT #endif // ASSERT
void trace_heap_malloc(size_t size, const char* name, void* p) {
// A lock is not needed here - tty uses a lock internally
tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
}
void trace_heap_free(void* p) {
// A lock is not needed here - tty uses a lock internally
tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p));
}
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
// Non-product code // Non-product code

@ -33,9 +33,6 @@
// Explicit C-heap memory management // Explicit C-heap memory management
void trace_heap_malloc(size_t size, const char* name, void *p);
void trace_heap_free(void *p);
#ifndef PRODUCT #ifndef PRODUCT
// Increments unsigned long value for statistics (not atomic on MP). // Increments unsigned long value for statistics (not atomic on MP).
inline void inc_stat_counter(volatile julong* dest, julong add_value) { inline void inc_stat_counter(volatile julong* dest, julong add_value) {
@ -56,9 +53,6 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags,
const NativeCallStack& stack, const NativeCallStack& stack,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::malloc(size, flags, stack); char* p = (char*) os::malloc(size, flags, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
#endif
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
} }
@ -73,9 +67,6 @@ ALWAYSINLINE char* AllocateHeap(size_t size, MEMFLAGS flags,
ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag, ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC); char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
#endif
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
} }
@ -83,20 +74,13 @@ ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
} }
inline void FreeHeap(void* p) { inline void FreeHeap(void* p) {
#ifdef ASSERT
if (PrintMallocFree) trace_heap_free(p);
#endif
os::free(p); os::free(p);
} }
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size, template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
const NativeCallStack& stack) throw() { const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack); return (void*)AllocateHeap(size, F, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
} }
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() { template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
@ -104,14 +88,9 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
} }
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size, template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() { const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack, return (void*)AllocateHeap(size, F, stack, AllocFailStrategy::RETURN_NULL);
AllocFailStrategy::RETURN_NULL); }
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size, template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant) throw() { const std::nothrow_t& nothrow_constant) throw() {

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -299,23 +299,11 @@ void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant)
// dynamic memory type binding // dynamic memory type binding
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return (void *) AllocateHeap(size, flags, CALLER_PC); return (void *) AllocateHeap(size, flags, CALLER_PC);
#endif
} }
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
#ifdef ASSERT return (void*)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
void* p = os::malloc(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return os::malloc(size, flags, CALLER_PC);
#endif
} }
void Arena::operator delete(void* p) { void Arena::operator delete(void* p) {

@ -124,8 +124,6 @@
#endif // ndef DTRACE_ENABLED #endif // ndef DTRACE_ENABLED
volatile int InstanceKlass::_total_instanceKlass_count = 0;
static inline bool is_class_loader(const Symbol* class_name, static inline bool is_class_loader(const Symbol* class_name,
const ClassFileParser& parser) { const ClassFileParser& parser) {
assert(class_name != NULL, "invariant"); assert(class_name != NULL, "invariant");
@ -193,8 +191,6 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par
// Add all classes to our internal class loader list here, // Add all classes to our internal class loader list here,
// including classes in the bootstrap (NULL) class loader. // including classes in the bootstrap (NULL) class loader.
loader_data->add_class(ik, publicize); loader_data->add_class(ik, publicize);
Atomic::inc(&_total_instanceKlass_count);
return ik; return ik;
} }
@ -2241,9 +2237,6 @@ void InstanceKlass::release_C_heap_structures() {
// class can't be referenced anymore). // class can't be referenced anymore).
if (_array_name != NULL) _array_name->decrement_refcount(); if (_array_name != NULL) _array_name->decrement_refcount();
if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension); if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension);
assert(_total_instanceKlass_count >= 1, "Sanity check");
Atomic::dec(&_total_instanceKlass_count);
} }
void InstanceKlass::set_source_debug_extension(const char* array, int length) { void InstanceKlass::set_source_debug_extension(const char* array, int length) {

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -135,10 +135,7 @@ class InstanceKlass: public Klass {
initialization_error // error happened during initialization initialization_error // error happened during initialization
}; };
static int number_of_instance_classes() { return _total_instanceKlass_count; }
private: private:
static volatile int _total_instanceKlass_count;
static InstanceKlass* allocate_instance_klass(const ClassFileParser& parser, TRAPS); static InstanceKlass* allocate_instance_klass(const ClassFileParser& parser, TRAPS);
protected: protected:

@ -3226,7 +3226,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
n->Opcode() == Op_EncodeISOArray) { n->Opcode() == Op_EncodeISOArray) {
// get the memory projection // get the memory projection
n = n->find_out_with(Op_SCMemProj); n = n->find_out_with(Op_SCMemProj);
assert(n->Opcode() == Op_SCMemProj, "memory projection required"); assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
} else { } else {
assert(n->is_Mem(), "memory node required."); assert(n->is_Mem(), "memory node required.");
Node *addr = n->in(MemNode::Address); Node *addr = n->in(MemNode::Address);
@ -3250,7 +3250,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
} else if (n->is_LoadStore()) { } else if (n->is_LoadStore()) {
// get the memory projection // get the memory projection
n = n->find_out_with(Op_SCMemProj); n = n->find_out_with(Op_SCMemProj);
assert(n->Opcode() == Op_SCMemProj, "memory projection required"); assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
} }
} }
// push user on appropriate worklist // push user on appropriate worklist

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2264,7 +2264,7 @@ void Parse::do_one_bytecode() {
ciMethodData* methodData = method()->method_data(); ciMethodData* methodData = method()->method_data();
if (!methodData->is_mature()) break; if (!methodData->is_mature()) break;
ciProfileData* data = methodData->bci_to_data(bci()); ciProfileData* data = methodData->bci_to_data(bci());
assert( data->is_JumpData(), "" ); assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
int taken = ((ciJumpData*)data)->taken(); int taken = ((ciJumpData*)data)->taken();
taken = method()->scale_count(taken); taken = method()->scale_count(taken);
target_block->set_count(taken); target_block->set_count(taken);

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -459,7 +459,7 @@ void Parse::profile_taken_branch(int target_bci, bool force_update) {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(cur_bci); ciProfileData* data = md->bci_to_data(cur_bci);
assert(data->is_JumpData(), "need JumpData for taken branch"); assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
increment_md_counter_at(md, data, JumpData::taken_offset()); increment_md_counter_at(md, data, JumpData::taken_offset());
} }
@ -470,6 +470,7 @@ void Parse::profile_taken_branch(int target_bci, bool force_update) {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
if (osr_site) { if (osr_site) {
ciProfileData* data = md->bci_to_data(cur_bci); ciProfileData* data = md->bci_to_data(cur_bci);
assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
int limit = (CompileThreshold int limit = (CompileThreshold
* (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
@ -495,7 +496,7 @@ void Parse::profile_not_taken_branch(bool force_update) {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_BranchData(), "need BranchData for not taken branch"); assert(data != NULL && data->is_BranchData(), "need BranchData for not taken branch");
increment_md_counter_at(md, data, BranchData::not_taken_offset()); increment_md_counter_at(md, data, BranchData::not_taken_offset());
} }
@ -526,7 +527,7 @@ void Parse::profile_generic_call() {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_CounterData(), "need CounterData for not taken branch"); assert(data != NULL && data->is_CounterData(), "need CounterData for not taken branch");
increment_md_counter_at(md, data, CounterData::count_offset()); increment_md_counter_at(md, data, CounterData::count_offset());
} }
@ -537,7 +538,7 @@ void Parse::profile_receiver_type(Node* receiver) {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); assert(data != NULL && data->is_ReceiverTypeData(), "need ReceiverTypeData here");
// Skip if we aren't tracking receivers // Skip if we aren't tracking receivers
if (TypeProfileWidth < 1) { if (TypeProfileWidth < 1) {
@ -568,7 +569,7 @@ void Parse::profile_ret(int target_bci) {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_RetData(), "need RetData for ret"); assert(data != NULL && data->is_RetData(), "need RetData for ret");
ciRetData* ret_data = (ciRetData*)data->as_RetData(); ciRetData* ret_data = (ciRetData*)data->as_RetData();
// Look for the target_bci is already in the table // Look for the target_bci is already in the table
@ -601,7 +602,7 @@ void Parse::profile_null_checkcast() {
ciMethodData* md = method()->method_data(); ciMethodData* md = method()->method_data();
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_BitData(), "need BitData for checkcast"); assert(data != NULL && data->is_BitData(), "need BitData for checkcast");
set_md_flag_at(md, data, BitData::null_seen_byte_constant()); set_md_flag_at(md, data, BitData::null_seen_byte_constant());
} }
@ -613,7 +614,7 @@ void Parse::profile_switch_case(int table_index) {
assert(md != NULL, "expected valid ciMethodData"); assert(md != NULL, "expected valid ciMethodData");
ciProfileData* data = md->bci_to_data(bci()); ciProfileData* data = md->bci_to_data(bci());
assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); assert(data != NULL && data->is_MultiBranchData(), "need MultiBranchData for switch case");
if (table_index >= 0) { if (table_index >= 0) {
increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
} else { } else {

@ -597,12 +597,10 @@ void JvmtiExport::enter_primordial_phase() {
} }
void JvmtiExport::enter_early_start_phase() { void JvmtiExport::enter_early_start_phase() {
JvmtiManageCapabilities::recompute_always_capabilities();
set_early_vmstart_recorded(true); set_early_vmstart_recorded(true);
} }
void JvmtiExport::enter_start_phase() { void JvmtiExport::enter_start_phase() {
JvmtiManageCapabilities::recompute_always_capabilities();
JvmtiEnvBase::set_phase(JVMTI_PHASE_START); JvmtiEnvBase::set_phase(JVMTI_PHASE_START);
} }

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,9 @@
#include "prims/jvmtiGetLoadedClasses.hpp" #include "prims/jvmtiGetLoadedClasses.hpp"
#include "runtime/thread.hpp" #include "runtime/thread.hpp"
#include "utilities/stack.inline.hpp" #include "utilities/stack.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// The closure for GetLoadedClasses // The closure for GetLoadedClasses
@ -38,6 +41,20 @@ private:
JvmtiEnv* _env; JvmtiEnv* _env;
Thread* _cur_thread; Thread* _cur_thread;
// Tell the GC to keep this klass alive
static void ensure_klass_alive(oop o) {
// A klass that was previously considered dead can be looked up in the
// CLD/SD, and its _java_mirror or _class_loader can be stored in a root
// or a reachable object making it alive again. The SATB part of G1 needs
// to get notified about this potential resurrection, otherwise the marking
// might not find the object.
#if INCLUDE_ALL_GCS
if (UseG1GC && o != NULL) {
G1SATBCardTableModRefBS::enqueue(o);
}
#endif
}
public: public:
LoadedClassesClosure(Thread* thread, JvmtiEnv* env) : _cur_thread(thread), _env(env) { LoadedClassesClosure(Thread* thread, JvmtiEnv* env) : _cur_thread(thread), _env(env) {
assert(_cur_thread == Thread::current(), "must be current thread"); assert(_cur_thread == Thread::current(), "must be current thread");
@ -46,6 +63,7 @@ public:
void do_klass(Klass* k) { void do_klass(Klass* k) {
// Collect all jclasses // Collect all jclasses
_classStack.push((jclass) _env->jni_reference(Handle(_cur_thread, k->java_mirror()))); _classStack.push((jclass) _env->jni_reference(Handle(_cur_thread, k->java_mirror())));
ensure_klass_alive(k->java_mirror());
} }
int extract(jclass* result_list) { int extract(jclass* result_list) {

@ -57,9 +57,6 @@ jvmtiCapabilities JvmtiManageCapabilities::acquired_capabilities;
void JvmtiManageCapabilities::initialize() { void JvmtiManageCapabilities::initialize() {
always_capabilities = init_always_capabilities(); always_capabilities = init_always_capabilities();
if (JvmtiEnv::get_phase() != JVMTI_PHASE_ONLOAD) {
recompute_always_capabilities();
}
onload_capabilities = init_onload_capabilities(); onload_capabilities = init_onload_capabilities();
always_solo_capabilities = init_always_solo_capabilities(); always_solo_capabilities = init_always_solo_capabilities();
onload_solo_capabilities = init_onload_solo_capabilities(); onload_solo_capabilities = init_onload_solo_capabilities();
@ -68,19 +65,6 @@ void JvmtiManageCapabilities::initialize() {
memset(&acquired_capabilities, 0, sizeof(acquired_capabilities)); memset(&acquired_capabilities, 0, sizeof(acquired_capabilities));
} }
// if the capability sets are initialized in the onload phase then
// it happens before class data sharing (CDS) is initialized. If it
// turns out that CDS gets disabled then we must adjust the always
// capabilities. To ensure a consistent view of the capabililties
// anything we add here should already be in the onload set.
void JvmtiManageCapabilities::recompute_always_capabilities() {
if (!UseSharedSpaces) {
jvmtiCapabilities jc = always_capabilities;
jc.can_generate_all_class_hook_events = 1;
always_capabilities = jc;
}
}
// corresponding init functions // corresponding init functions
jvmtiCapabilities JvmtiManageCapabilities::init_always_capabilities() { jvmtiCapabilities JvmtiManageCapabilities::init_always_capabilities() {
@ -94,6 +78,7 @@ jvmtiCapabilities JvmtiManageCapabilities::init_always_capabilities() {
jc.can_get_synthetic_attribute = 1; jc.can_get_synthetic_attribute = 1;
jc.can_get_monitor_info = 1; jc.can_get_monitor_info = 1;
jc.can_get_constant_pool = 1; jc.can_get_constant_pool = 1;
jc.can_generate_all_class_hook_events = 1;
jc.can_generate_monitor_events = 1; jc.can_generate_monitor_events = 1;
jc.can_generate_garbage_collection_events = 1; jc.can_generate_garbage_collection_events = 1;
jc.can_generate_compiled_method_load_events = 1; jc.can_generate_compiled_method_load_events = 1;
@ -126,7 +111,6 @@ jvmtiCapabilities JvmtiManageCapabilities::init_onload_capabilities() {
jc.can_get_source_debug_extension = 1; jc.can_get_source_debug_extension = 1;
jc.can_access_local_variables = 1; jc.can_access_local_variables = 1;
jc.can_maintain_original_method_order = 1; jc.can_maintain_original_method_order = 1;
jc.can_generate_all_class_hook_events = 1;
jc.can_generate_single_step_events = 1; jc.can_generate_single_step_events = 1;
jc.can_generate_exception_events = 1; jc.can_generate_exception_events = 1;
jc.can_generate_frame_pop_events = 1; jc.can_generate_frame_pop_events = 1;

@ -64,9 +64,6 @@ private:
public: public:
static void initialize(); static void initialize();
// may have to adjust always capabilities when VM initialization has completed
static void recompute_always_capabilities();
// queries and actions // queries and actions
static void get_potential_capabilities(const jvmtiCapabilities *current, static void get_potential_capabilities(const jvmtiCapabilities *current,
const jvmtiCapabilities *prohibited, const jvmtiCapabilities *prohibited,

@ -514,7 +514,9 @@ static SpecialFlag const special_jvm_flags[] = {
{ "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "CheckEndorsedAndExtDirs", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "CheckEndorsedAndExtDirs", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "CompilerThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
{ "VMThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in: // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() }, { "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@ -527,6 +529,8 @@ static SpecialFlag const special_jvm_flags[] = {
{ "ConvertYieldToSleep", JDK_Version::jdk(9), JDK_Version::jdk(10), JDK_Version::jdk(11) }, { "ConvertYieldToSleep", JDK_Version::jdk(9), JDK_Version::jdk(10), JDK_Version::jdk(11) },
{ "MinSleepInterval", JDK_Version::jdk(9), JDK_Version::jdk(10), JDK_Version::jdk(11) }, { "MinSleepInterval", JDK_Version::jdk(9), JDK_Version::jdk(10), JDK_Version::jdk(11) },
{ "CheckAssertionStatusDirectives",JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "CheckAssertionStatusDirectives",JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "PrintMallocFree", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "PrintMalloc", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "PermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() }, { "PermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
{ "MaxPermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() }, { "MaxPermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
{ "SharedReadWriteSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedReadWriteSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "code/compiledIC.hpp" #include "code/compiledIC.hpp"
#include "code/nmethod.hpp" #include "code/nmethod.hpp"
#include "code/scopeDesc.hpp" #include "code/scopeDesc.hpp"
@ -312,10 +313,10 @@ void CounterDecay::decay() {
// and hence GC's will not be going on, all Java mutators are suspended // and hence GC's will not be going on, all Java mutators are suspended
// at this point and hence SystemDictionary_lock is also not needed. // at this point and hence SystemDictionary_lock is also not needed.
assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
int nclasses = InstanceKlass::number_of_instance_classes(); size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
int classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
CounterHalfLifeTime); CounterHalfLifeTime);
for (int i = 0; i < classes_per_tick; i++) { for (size_t i = 0; i < classes_per_tick; i++) {
InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class(); InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
if (k != NULL) { if (k != NULL) {
k->methods_do(do_method); k->methods_do(do_method);

@ -893,18 +893,12 @@ public:
develop(bool, TraceJavaAssertions, false, \ develop(bool, TraceJavaAssertions, false, \
"Trace java language assertions") \ "Trace java language assertions") \
\ \
notproduct(bool, PrintMallocFree, false, \
"Trace calls to C heap malloc/free allocation") \
\
notproduct(bool, VerifyCodeCache, false, \ notproduct(bool, VerifyCodeCache, false, \
"Verify code cache on memory allocation/deallocation") \ "Verify code cache on memory allocation/deallocation") \
\ \
develop(bool, UseMallocOnly, false, \ develop(bool, UseMallocOnly, false, \
"Use only malloc/free for allocation (no resource area/arena)") \ "Use only malloc/free for allocation (no resource area/arena)") \
\ \
develop(bool, PrintMalloc, false, \
"Print all malloc/free calls") \
\
develop(bool, PrintMallocStatistics, false, \ develop(bool, PrintMallocStatistics, false, \
"Print malloc/free statistics") \ "Print malloc/free statistics") \
\ \
@ -3545,7 +3539,7 @@ public:
"(-1 means no change)") \ "(-1 means no change)") \
range(-1, 127) \ range(-1, 127) \
\ \
product(bool, CompilerThreadHintNoPreempt, true, \ product(bool, CompilerThreadHintNoPreempt, false, \
"(Solaris only) Give compiler threads an extra quanta") \ "(Solaris only) Give compiler threads an extra quanta") \
\ \
product(bool, VMThreadHintNoPreempt, false, \ product(bool, VMThreadHintNoPreempt, false, \

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectedHeap.inline.hpp"
@ -116,10 +117,10 @@ void MemProfiler::do_trace() {
} }
// Print trace line in log // Print trace line in log
fprintf(_log_fp, "%6.1f,%5d,%5d," UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",", fprintf(_log_fp, "%6.1f,%5d," SIZE_FORMAT_W(5) "," UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",",
os::elapsedTime(), os::elapsedTime(),
jtiwh.length(), jtiwh.length(),
InstanceKlass::number_of_instance_classes(), ClassLoaderDataGraph::num_instance_classes(),
Universe::heap()->used() / K, Universe::heap()->used() / K,
Universe::heap()->capacity() / K); Universe::heap()->capacity() / K);
} }

@ -253,10 +253,10 @@ void mutex_init() {
// of some places which hold other locks while releasing a handle, including // of some places which hold other locks while releasing a handle, including
// the Patching_lock, which is of "special" rank. As a temporary workaround, // the Patching_lock, which is of "special" rank. As a temporary workaround,
// lower the JNI oopstorage lock ranks to make them super-special. // lower the JNI oopstorage lock ranks to make them super-special.
def(JNIGlobalAlloc_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never); def(JNIGlobalAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIGlobalActive_lock , PaddedMutex , special-2, true, Monitor::_safepoint_check_never); def(JNIGlobalActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNIWeakAlloc_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never); def(JNIWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIWeakActive_lock , PaddedMutex , special-2, true, Monitor::_safepoint_check_never); def(JNIWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions
def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);

@ -33,6 +33,7 @@
#include "code/icBuffer.hpp" #include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp" #include "code/vtableStubs.hpp"
#include "gc/shared/vmGCOperations.hpp" #include "gc/shared/vmGCOperations.hpp"
#include "logging/log.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "logging/logStream.hpp" #include "logging/logStream.hpp"
@ -610,9 +611,12 @@ char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
static void verify_memory(void* ptr) { static void verify_memory(void* ptr) {
GuardedMemory guarded(ptr); GuardedMemory guarded(ptr);
if (!guarded.verify_guards()) { if (!guarded.verify_guards()) {
tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees); LogTarget(Warning, malloc, free) lt;
tty->print_cr("## memory stomp:"); ResourceMark rm;
guarded.print_on(tty); LogStream ls(lt);
ls.print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
ls.print_cr("## memory stomp:");
guarded.print_on(&ls);
fatal("memory stomping error"); fatal("memory stomping error");
} }
} }
@ -684,13 +688,10 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
ptr = guarded.get_user_ptr(); ptr = guarded.get_user_ptr();
#endif #endif
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr)); log_warning(malloc, free)("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
breakpoint(); breakpoint();
} }
debug_only(if (paranoid) verify_memory(ptr)); debug_only(if (paranoid) verify_memory(ptr));
if (PrintMalloc && tty != NULL) {
tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
}
// we do not track guard memory // we do not track guard memory
return MemTracker::record_malloc((address)ptr, size, memflags, stack, level); return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
@ -727,7 +728,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
return os::malloc(size, memflags, stack); return os::malloc(size, memflags, stack);
} }
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught " PTR_FORMAT, p2i(memblock)); log_warning(malloc, free)("os::realloc caught " PTR_FORMAT, p2i(memblock));
breakpoint(); breakpoint();
} }
// NMT support // NMT support
@ -735,18 +736,15 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
verify_memory(membase); verify_memory(membase);
// always move the block // always move the block
void* ptr = os::malloc(size, memflags, stack); void* ptr = os::malloc(size, memflags, stack);
if (PrintMalloc && tty != NULL) {
tty->print_cr("os::realloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, p2i(memblock), p2i(ptr));
}
// Copy to new memory if malloc didn't fail // Copy to new memory if malloc didn't fail
if ( ptr != NULL ) { if (ptr != NULL ) {
GuardedMemory guarded(MemTracker::malloc_base(memblock)); GuardedMemory guarded(MemTracker::malloc_base(memblock));
// Guard's user data contains NMT header // Guard's user data contains NMT header
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock); size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
memcpy(ptr, memblock, MIN2(size, memblock_size)); memcpy(ptr, memblock, MIN2(size, memblock_size));
if (paranoid) verify_memory(MemTracker::malloc_base(ptr)); if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr)); log_warning(malloc, free)("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
breakpoint(); breakpoint();
} }
os::free(memblock); os::free(memblock);
@ -761,7 +759,7 @@ void os::free(void *memblock) {
#ifdef ASSERT #ifdef ASSERT
if (memblock == NULL) return; if (memblock == NULL) return;
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, p2i(memblock)); log_warning(malloc, free)("os::free caught " PTR_FORMAT, p2i(memblock));
breakpoint(); breakpoint();
} }
void* membase = MemTracker::record_free(memblock); void* membase = MemTracker::record_free(memblock);
@ -771,9 +769,6 @@ void os::free(void *memblock) {
size_t size = guarded.get_user_size(); size_t size = guarded.get_user_size();
inc_stat_counter(&free_bytes, size); inc_stat_counter(&free_bytes, size);
membase = guarded.release_for_freeing(); membase = guarded.release_for_freeing();
if (PrintMalloc && tty != NULL) {
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
}
::free(membase); ::free(membase);
#else #else
void* membase = MemTracker::record_free(memblock); void* membase = MemTracker::record_free(memblock);
@ -1754,7 +1749,7 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
bool os::uncommit_memory(char* addr, size_t bytes) { bool os::uncommit_memory(char* addr, size_t bytes) {
bool res; bool res;
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); Tracker tkr(Tracker::uncommit);
res = pd_uncommit_memory(addr, bytes); res = pd_uncommit_memory(addr, bytes);
if (res) { if (res) {
tkr.record((address)addr, bytes); tkr.record((address)addr, bytes);
@ -1768,7 +1763,7 @@ bool os::uncommit_memory(char* addr, size_t bytes) {
bool os::release_memory(char* addr, size_t bytes) { bool os::release_memory(char* addr, size_t bytes) {
bool res; bool res;
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); Tracker tkr(Tracker::release);
res = pd_release_memory(addr, bytes); res = pd_release_memory(addr, bytes);
if (res) { if (res) {
tkr.record((address)addr, bytes); tkr.record((address)addr, bytes);
@ -1805,7 +1800,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
bool os::unmap_memory(char *addr, size_t bytes) { bool os::unmap_memory(char *addr, size_t bytes) {
bool result; bool result;
if (MemTracker::tracking_level() > NMT_minimal) { if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); Tracker tkr(Tracker::release);
result = pd_unmap_memory(addr, bytes); result = pd_unmap_memory(addr, bytes);
if (result) { if (result) {
tkr.record((address)addr, bytes); tkr.record((address)addr, bytes);

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
@ -180,7 +181,8 @@ bool MemBaseline::baseline_allocation_sites() {
bool MemBaseline::baseline(bool summaryOnly) { bool MemBaseline::baseline(bool summaryOnly) {
reset(); reset();
_class_count = InstanceKlass::number_of_instance_classes(); _instance_class_count = ClassLoaderDataGraph::num_instance_classes();
_array_class_count = ClassLoaderDataGraph::num_array_classes();
if (!baseline_summary()) { if (!baseline_summary()) {
return false; return false;

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,8 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
VirtualMemorySnapshot _virtual_memory_snapshot; VirtualMemorySnapshot _virtual_memory_snapshot;
MetaspaceSnapshot _metaspace_snapshot; MetaspaceSnapshot _metaspace_snapshot;
size_t _class_count; size_t _instance_class_count;
size_t _array_class_count;
// Allocation sites information // Allocation sites information
// Malloc allocation sites // Malloc allocation sites
@ -89,7 +90,7 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
// create a memory baseline // create a memory baseline
MemBaseline(): MemBaseline():
_baseline_type(Not_baselined), _baseline_type(Not_baselined),
_class_count(0) { _instance_class_count(0), _array_class_count(0) {
} }
bool baseline(bool summaryOnly = true); bool baseline(bool summaryOnly = true);
@ -160,7 +161,17 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
size_t class_count() const { size_t class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined"); assert(baseline_type() != Not_baselined, "Not yet baselined");
return _class_count; return _instance_class_count + _array_class_count;
}
size_t instance_class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _instance_class_count;
}
size_t array_class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _array_class_count;
} }
size_t thread_count() const { size_t thread_count() const {
@ -172,7 +183,8 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
void reset() { void reset() {
_baseline_type = Not_baselined; _baseline_type = Not_baselined;
// _malloc_memory_snapshot and _virtual_memory_snapshot are copied over. // _malloc_memory_snapshot and _virtual_memory_snapshot are copied over.
_class_count = 0; _instance_class_count = 0;
_array_class_count = 0;
_malloc_sites.clear(); _malloc_sites.clear();
_virtual_memory_sites.clear(); _virtual_memory_sites.clear();

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -145,7 +145,10 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
if (flag == mtClass) { if (flag == mtClass) {
// report class count // report class count
out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count); out->print_cr("%27s (classes #" SIZE_FORMAT ")",
" ", (_instance_class_count + _array_class_count));
out->print_cr("%27s ( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
" ", _instance_class_count, _array_class_count);
} else if (flag == mtThread) { } else if (flag == mtThread) {
// report thread count // report thread count
out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count()); out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
@ -459,6 +462,17 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count())); out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count()));
} }
out->print_cr(")"); out->print_cr(")");
out->print("%27s ( instance classes #" SIZE_FORMAT, " ", _current_baseline.instance_class_count());
if (_current_baseline.instance_class_count() != _early_baseline.instance_class_count()) {
out->print(" %+d", (int)(_current_baseline.instance_class_count() - _early_baseline.instance_class_count()));
}
out->print(", array classes #" SIZE_FORMAT, _current_baseline.array_class_count());
if (_current_baseline.array_class_count() != _early_baseline.array_class_count()) {
out->print(" %+d", (int)(_current_baseline.array_class_count() - _early_baseline.array_class_count()));
}
out->print_cr(")");
} else if (flag == mtThread) { } else if (flag == mtThread) {
// report thread count // report thread count
out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count()); out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count());

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,8 @@ class MemSummaryReporter : public MemReporterBase {
private: private:
MallocMemorySnapshot* _malloc_snapshot; MallocMemorySnapshot* _malloc_snapshot;
VirtualMemorySnapshot* _vm_snapshot; VirtualMemorySnapshot* _vm_snapshot;
size_t _class_count; size_t _instance_class_count;
size_t _array_class_count;
public: public:
// This constructor is for normal reporting from a recent baseline. // This constructor is for normal reporting from a recent baseline.
@ -102,7 +103,8 @@ class MemSummaryReporter : public MemReporterBase {
size_t scale = K) : MemReporterBase(output, scale), size_t scale = K) : MemReporterBase(output, scale),
_malloc_snapshot(baseline.malloc_memory_snapshot()), _malloc_snapshot(baseline.malloc_memory_snapshot()),
_vm_snapshot(baseline.virtual_memory_snapshot()), _vm_snapshot(baseline.virtual_memory_snapshot()),
_class_count(baseline.class_count()) { } _instance_class_count(baseline.instance_class_count()),
_array_class_count(baseline.array_class_count()) { }
// Generate summary report // Generate summary report

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,14 @@
class Tracker : public StackObj { class Tracker : public StackObj {
public: public:
Tracker() { } enum TrackerType {
void record(address addr, size_t size) { } uncommit,
release
};
Tracker(enum TrackerType type) : _type(type) { }
void record(address addr, size_t size);
private:
enum TrackerType _type;
}; };
class MemTracker : AllStatic { class MemTracker : AllStatic {
@ -63,8 +69,6 @@ class MemTracker : AllStatic {
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
static inline Tracker get_virtual_memory_release_tracker() { return Tracker(); }
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
static inline void record_thread_stack(void* addr, size_t size) { } static inline void record_thread_stack(void* addr, size_t size) { }
static inline void release_thread_stack(void* addr, size_t size) { } static inline void release_thread_stack(void* addr, size_t size) { }
@ -227,16 +231,6 @@ class MemTracker : AllStatic {
} }
} }
static inline Tracker get_virtual_memory_uncommit_tracker() {
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::uncommit);
}
static inline Tracker get_virtual_memory_release_tracker() {
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::release);
}
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return; if (tracking_level() < NMT_summary) return;
if (addr != NULL) { if (addr != NULL) {

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -947,18 +947,11 @@ void ostream_exit() {
delete classlist_file; delete classlist_file;
} }
#endif #endif
{ if (tty != defaultStream::instance) {
// we temporaly disable PrintMallocFree here delete tty;
// as otherwise it'll lead to using of almost deleted }
// tty or defaultStream::instance in logging facility if (defaultStream::instance != NULL) {
// of HeapFree(), see 6391258 delete defaultStream::instance;
DEBUG_ONLY(FlagSetting fs(PrintMallocFree, false);)
if (tty != defaultStream::instance) {
delete tty;
}
if (defaultStream::instance != NULL) {
delete defaultStream::instance;
}
} }
tty = NULL; tty = NULL;
xtty = NULL; xtty = NULL;

@ -37,7 +37,7 @@ import static java.util.Objects.requireNonNull;
* unless the argument is specified to be unused or specified to accept a * unless the argument is specified to be unused or specified to accept a
* {@code null} value. * {@code null} value.
* *
* @since 10 * @since 11
*/ */
public final class ConstantBootstraps { public final class ConstantBootstraps {
// implements the upcall from the JVM, MethodHandleNatives.linkDynamicConstant: // implements the upcall from the JVM, MethodHandleNatives.linkDynamicConstant:

@ -143,7 +143,7 @@ final class DataPatchProcessor {
int alignment = data.getAlignment(); int alignment = data.getAlignment();
byte[] value = new byte[size]; byte[] value = new byte[size];
ByteBuffer buffer = ByteBuffer.wrap(value).order(ByteOrder.nativeOrder()); ByteBuffer buffer = ByteBuffer.wrap(value).order(ByteOrder.nativeOrder());
DataSection.emit(buffer, data, p -> { DataSection.emit(buffer, data, (p, c) -> {
}); });
String targetSymbol = "data.M" + methodInfo.getCodeId() + "." + dataOffset; String targetSymbol = "data.M" + methodInfo.getCodeId() + "." + dataOffset;
Symbol relocationSymbol = binaryContainer.getSymbol(targetSymbol); Symbol relocationSymbol = binaryContainer.getSymbol(targetSymbol);

@ -82,6 +82,24 @@ suite = {
"javaCompliance" : "1.8", "javaCompliance" : "1.8",
"workingSets" : "API,SDK", "workingSets" : "API,SDK",
}, },
"org.graalvm.collections" : {
"subDir" : "share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "org.graalvm.word",
"javaCompliance" : "1.8",
"workingSets" : "API,SDK",
},
"org.graalvm.collections.test" : {
"subDir" : "share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
"org.graalvm.collections",
],
"checkstyle" : "org.graalvm.word",
"javaCompliance" : "1.8",
"workingSets" : "API,SDK,Test",
},
# ------------- Graal ------------- # ------------- Graal -------------
@ -190,6 +208,9 @@ suite = {
"org.graalvm.util" : { "org.graalvm.util" : {
"subDir" : "share/classes", "subDir" : "share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
"dependencies" : [
"org.graalvm.collections",
],
"checkstyle" : "org.graalvm.compiler.graph", "checkstyle" : "org.graalvm.compiler.graph",
"javaCompliance" : "1.8", "javaCompliance" : "1.8",
"workingSets" : "API,Graal", "workingSets" : "API,Graal",
@ -201,6 +222,7 @@ suite = {
"dependencies" : [ "dependencies" : [
"mx:JUNIT", "mx:JUNIT",
"org.graalvm.util", "org.graalvm.util",
"org.graalvm.compiler.core.test",
], ],
"checkstyle" : "org.graalvm.compiler.graph", "checkstyle" : "org.graalvm.compiler.graph",
"javaCompliance" : "1.8", "javaCompliance" : "1.8",
@ -970,10 +992,11 @@ suite = {
"workingSets" : "Graal,SPARC", "workingSets" : "Graal,SPARC",
}, },
"org.graalvm.compiler.core.sparc.test" : { "org.graalvm.compiler.hotspot.sparc.test" : {
"subDir" : "share/classes", "subDir" : "share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
"dependencies" : [ "dependencies" : [
"org.graalvm.compiler.hotspot",
"org.graalvm.compiler.lir.jtt", "org.graalvm.compiler.lir.jtt",
"JVMCI_HOTSPOT" "JVMCI_HOTSPOT"
], ],
@ -1007,6 +1030,7 @@ suite = {
"subDir" : "share/classes", "subDir" : "share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
"dependencies" : [ "dependencies" : [
"org.graalvm.collections",
"org.graalvm.compiler.debug", "org.graalvm.compiler.debug",
"org.graalvm.word", "org.graalvm.word",
], ],
@ -1037,7 +1061,6 @@ suite = {
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
"dependencies" : [ "dependencies" : [
"org.graalvm.compiler.debug", "org.graalvm.compiler.debug",
"org.graalvm.util",
"mx:JUNIT", "mx:JUNIT",
], ],
"checkstyle" : "org.graalvm.compiler.graph", "checkstyle" : "org.graalvm.compiler.graph",
@ -1225,11 +1248,11 @@ suite = {
"org.graalvm.compiler.asm.amd64.test", "org.graalvm.compiler.asm.amd64.test",
"org.graalvm.compiler.core.aarch64.test", "org.graalvm.compiler.core.aarch64.test",
"org.graalvm.compiler.core.amd64.test", "org.graalvm.compiler.core.amd64.test",
"org.graalvm.compiler.core.sparc.test",
"org.graalvm.compiler.debug.test", "org.graalvm.compiler.debug.test",
"org.graalvm.compiler.hotspot.aarch64.test", "org.graalvm.compiler.hotspot.aarch64.test",
"org.graalvm.compiler.hotspot.amd64.test", "org.graalvm.compiler.hotspot.amd64.test",
"org.graalvm.compiler.hotspot.lir.test", "org.graalvm.compiler.hotspot.lir.test",
"org.graalvm.compiler.hotspot.sparc.test",
"org.graalvm.compiler.options.test", "org.graalvm.compiler.options.test",
"org.graalvm.compiler.jtt", "org.graalvm.compiler.jtt",
"org.graalvm.compiler.lir.jtt", "org.graalvm.compiler.lir.jtt",

@ -0,0 +1,126 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import java.util.Arrays;
import java.util.Iterator;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.EconomicSet;
import org.graalvm.collections.Equivalence;
import org.graalvm.collections.UnmodifiableEconomicSet;
import org.junit.Assert;
import org.junit.Test;
public class EconomicMapImplTest {
@Test(expected = UnsupportedOperationException.class)
public void testRemoveNull() {
EconomicMap<Integer, Integer> map = EconomicMap.create(10);
map.removeKey(null);
}
@Test
public void testInitFromHashSet() {
UnmodifiableEconomicSet<Integer> set = new UnmodifiableEconomicSet<Integer>() {
@Override
public boolean contains(Integer element) {
return element == 0;
}
@Override
public int size() {
return 1;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public Iterator<Integer> iterator() {
return new Iterator<Integer>() {
private boolean visited = false;
@Override
public boolean hasNext() {
return !visited;
}
@Override
public Integer next() {
if (visited) {
return null;
} else {
visited = true;
return 1;
}
}
};
}
};
EconomicSet<Integer> newSet = EconomicSet.create(Equivalence.DEFAULT, set);
Assert.assertEquals(newSet.size(), 1);
}
@Test
public void testCopyHash() {
EconomicSet<Integer> set = EconomicSet.create(Equivalence.IDENTITY);
set.addAll(Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
EconomicSet<Integer> newSet = EconomicSet.create(Equivalence.IDENTITY, set);
Assert.assertEquals(newSet.size(), 10);
newSet.remove(8);
newSet.remove(9);
Assert.assertEquals(newSet.size(), 8);
}
@Test
public void testNewEquivalence() {
EconomicSet<Integer> set = EconomicSet.create(new Equivalence() {
@Override
public boolean equals(Object a, Object b) {
return false;
}
@Override
public int hashCode(Object o) {
return 0;
}
});
set.addAll(Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
Assert.assertTrue(set.add(new Integer(0)));
}
@Test(expected = UnsupportedOperationException.class)
public void testMapPutNull() {
EconomicMap<Integer, Integer> map = EconomicMap.create();
map.put(null, null);
}
}

@ -0,0 +1,231 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Objects;
import java.util.Random;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.Equivalence;
import org.graalvm.collections.MapCursor;
import org.graalvm.collections.UnmodifiableMapCursor;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class EconomicMapLargeTest {
@Parameter(value = 0) public EconomicMap<Object, Object> testMap;
@Parameter(value = 1) public EconomicMap<Object, Object> referenceMap;
@Parameter(value = 2) public String name;
@Parameters(name = "{2}")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[]{EconomicMap.create(Equivalence.DEFAULT), EconomicMap.create(Equivalence.DEFAULT), "EconomicMap"},
new Object[]{EconomicMap.create(Equivalence.IDENTITY), EconomicMap.create(Equivalence.IDENTITY), "EconomicMap(IDENTITY)"},
new Object[]{EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE), EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE),
"EconomicMap(IDENTITY_WITH_SYSTEM_HASHCODE)"},
new Object[]{EconomicMap.create(Equivalence.DEFAULT), EconomicMap.wrapMap(new LinkedHashMap<>()), "EconomicMap<->wrapMap"},
new Object[]{EconomicMap.wrapMap(new LinkedHashMap<>()), EconomicMap.wrapMap(new LinkedHashMap<>()), "wrapMap"});
}
private static int[] createRandomRange(Random random, int count) {
int[] result = new int[count];
for (int i = 0; i < count; ++i) {
int range = random.nextInt(14);
if (range == 0 || range > 10) {
range = Integer.MAX_VALUE;
} else if (range == 10) {
range = 100;
}
result[i] = range;
}
return result;
}
private static final class BadHashClass {
private int value;
BadHashClass(int randomInt) {
this.value = randomInt;
}
@Override
public int hashCode() {
return 0;
}
@Override
public boolean equals(Object other) {
if (other instanceof BadHashClass) {
BadHashClass badHashClass = (BadHashClass) other;
return badHashClass.value == value;
}
return false;
}
}
interface MapAction {
Object perform(EconomicMap<Object, Object> map, int randomInt);
}
static final Object EXISTING_VALUE = new Object();
static final MapAction[] INCREASE_ACTIONS = new MapAction[]{
(map, randomInt) -> map.put(randomInt, "value"),
(map, randomInt) -> map.get(randomInt)
};
static final MapAction[] ACTIONS = new MapAction[]{
(map, randomInt) -> map.removeKey(randomInt),
(map, randomInt) -> map.put(randomInt, "value"),
(map, randomInt) -> map.put(randomInt, null),
(map, randomInt) -> map.put(EXISTING_VALUE, randomInt),
(map, randomInt) -> {
if (randomInt == 0) {
map.clear();
}
return map.isEmpty();
},
(map, randomInt) -> map.containsKey(randomInt),
(map, randomInt) -> map.get(randomInt),
(map, randomInt) -> map.put(new BadHashClass(randomInt), "unique"),
(map, randomInt) -> {
if (randomInt == 0) {
map.replaceAll((key, value) -> Objects.toString(value) + "!");
}
return map.isEmpty();
}
};
@Test
public void testVeryLarge() {
testMap.clear();
referenceMap.clear();
Random random = new Random(0);
for (int i = 0; i < 200000; ++i) {
for (int j = 0; j < INCREASE_ACTIONS.length; ++j) {
int nextInt = random.nextInt(10000000);
MapAction action = INCREASE_ACTIONS[j];
Object result = action.perform(testMap, nextInt);
Object referenceResult = action.perform(referenceMap, nextInt);
Assert.assertEquals(result, referenceResult);
}
}
}
/**
* Tests a sequence of random operations on the map.
*/
@Test
public void testAddRemove() {
testMap.clear();
referenceMap.clear();
for (int seed = 0; seed < 10; ++seed) {
Random random = new Random(seed);
int[] ranges = createRandomRange(random, ACTIONS.length);
int value = random.nextInt(10000);
for (int i = 0; i < value; ++i) {
for (int j = 0; j < ACTIONS.length; ++j) {
if (random.nextInt(ranges[j]) == 0) {
int nextInt = random.nextInt(100);
MapAction action = ACTIONS[j];
Object result = action.perform(testMap, nextInt);
Object referenceResult = action.perform(referenceMap, nextInt);
Assert.assertEquals(result, referenceResult);
if (j % 100 == 0) {
checkEquality(testMap, referenceMap);
}
}
}
if (random.nextInt(20) == 0) {
removeElement(random.nextInt(100), testMap, referenceMap);
}
}
}
}
private static void removeElement(int index, EconomicMap<?, ?> map, EconomicMap<?, ?> referenceMap) {
Assert.assertEquals(referenceMap.size(), map.size());
MapCursor<?, ?> cursor = map.getEntries();
MapCursor<?, ?> referenceCursor = referenceMap.getEntries();
int z = 0;
while (cursor.advance()) {
Assert.assertTrue(referenceCursor.advance());
Assert.assertEquals(referenceCursor.getKey(), cursor.getKey());
Assert.assertEquals(referenceCursor.getValue(), cursor.getValue());
if (index == z) {
cursor.remove();
referenceCursor.remove();
}
++z;
}
Assert.assertFalse(referenceCursor.advance());
}
private static void checkEquality(EconomicMap<?, ?> map, EconomicMap<?, ?> referenceMap) {
Assert.assertEquals(referenceMap.size(), map.size());
// Check entries.
UnmodifiableMapCursor<?, ?> cursor = map.getEntries();
UnmodifiableMapCursor<?, ?> referenceCursor = referenceMap.getEntries();
while (cursor.advance()) {
Assert.assertTrue(referenceCursor.advance());
Assert.assertEquals(referenceCursor.getKey(), cursor.getKey());
Assert.assertEquals(referenceCursor.getValue(), cursor.getValue());
}
// Check keys.
Iterator<?> iterator = map.getKeys().iterator();
Iterator<?> referenceIterator = referenceMap.getKeys().iterator();
while (iterator.hasNext()) {
Assert.assertTrue(referenceIterator.hasNext());
Assert.assertEquals(iterator.next(), referenceIterator.next());
}
// Check values.
iterator = map.getValues().iterator();
referenceIterator = referenceMap.getValues().iterator();
while (iterator.hasNext()) {
Assert.assertTrue(referenceIterator.hasNext());
Assert.assertEquals(iterator.next(), referenceIterator.next());
}
Assert.assertFalse(referenceIterator.hasNext());
}
}

@ -0,0 +1,72 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import java.util.LinkedHashMap;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.UnmodifiableEconomicMap;
import org.junit.Assert;
import org.junit.Test;
public class EconomicMapTest {
@Test
public void testMapGetDefault() {
EconomicMap<Integer, Integer> map = EconomicMap.create();
map.put(0, 1);
Assert.assertEquals(map.get(0, 2), Integer.valueOf(1));
Assert.assertEquals(map.get(1, 2), Integer.valueOf(2));
}
@Test
public void testMapPutAll() {
EconomicMap<Integer, Integer> map = EconomicMap.create();
EconomicMap<Integer, Integer> newMap = EconomicMap.wrapMap(new LinkedHashMap<>());
newMap.put(1, 1);
newMap.put(2, 4);
map.putAll(newMap);
Assert.assertEquals(map.size(), 2);
UnmodifiableEconomicMap<Integer, Integer> unmodifiableEconomicMap = EconomicMap.create(newMap);
map.removeKey(1);
map.put(2, 2);
map.put(3, 9);
map.putAll(unmodifiableEconomicMap);
Assert.assertEquals(map.size(), 3);
Assert.assertEquals(map.get(2), Integer.valueOf(4));
}
@Test
public void testToString() {
EconomicMap<Integer, Integer> map = EconomicMap.create();
map.put(0, 0);
map.put(1, 1);
Assert.assertEquals(map.toString(), "map(size=2, {(0,0),(1,1)})");
}
}

@ -0,0 +1,148 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import org.graalvm.collections.EconomicSet;
import org.graalvm.collections.Equivalence;
import org.junit.Assert;
import org.junit.Test;
public class EconomicSetTest {
@Test
public void testUtilities() {
EconomicSet<Integer> set = EconomicSet.create(0);
set.add(0);
Assert.assertTrue(set.add(1));
Assert.assertEquals(set.size(), 2);
Assert.assertFalse(set.add(1));
Assert.assertEquals(set.size(), 2);
set.remove(1);
Assert.assertEquals(set.size(), 1);
set.remove(2);
Assert.assertEquals(set.size(), 1);
Assert.assertTrue(set.add(1));
set.clear();
Assert.assertEquals(set.size(), 0);
}
@Test
public void testAddAll() {
EconomicSet<Integer> set = EconomicSet.create();
set.addAll(Arrays.asList(0, 1, 0));
Assert.assertEquals(set.size(), 2);
EconomicSet<Integer> newSet = EconomicSet.create();
newSet.addAll(Arrays.asList(1, 2));
Assert.assertEquals(newSet.size(), 2);
newSet.addAll(set);
Assert.assertEquals(newSet.size(), 3);
}
@Test
public void testRemoveAll() {
EconomicSet<Integer> set = EconomicSet.create();
set.addAll(Arrays.asList(0, 1));
set.removeAll(Arrays.asList(1, 2));
Assert.assertEquals(set.size(), 1);
set.removeAll(EconomicSet.create(set));
Assert.assertEquals(set.size(), 0);
}
@Test
public void testRetainAll() {
EconomicSet<Integer> set = EconomicSet.create();
set.addAll(Arrays.asList(0, 1, 2));
EconomicSet<Integer> newSet = EconomicSet.create();
newSet.addAll(Arrays.asList(2, 3));
set.retainAll(newSet);
Assert.assertEquals(set.size(), 1);
}
@Test
public void testToArray() {
EconomicSet<Integer> set = EconomicSet.create();
set.addAll(Arrays.asList(0, 1));
Assert.assertArrayEquals(set.toArray(new Integer[2]), new Integer[]{0, 1});
}
@Test
public void testToString() {
EconomicSet<Integer> set = EconomicSet.create();
set.addAll(Arrays.asList(0, 1));
Assert.assertEquals(set.toString(), "set(size=2, {0,1})");
}
@Test(expected = UnsupportedOperationException.class)
public void testToUnalignedArray() {
Assert.assertArrayEquals(EconomicSet.create().toArray(new Integer[2]), new Integer[0]);
}
@Test
public void testSetRemoval() {
ArrayList<Integer> initialList = new ArrayList<>();
ArrayList<Integer> removalList = new ArrayList<>();
ArrayList<Integer> finalList = new ArrayList<>();
EconomicSet<Integer> set = EconomicSet.create(Equivalence.IDENTITY);
set.add(1);
set.add(2);
set.add(3);
set.add(4);
set.add(5);
set.add(6);
set.add(7);
set.add(8);
set.add(9);
Iterator<Integer> i1 = set.iterator();
while (i1.hasNext()) {
initialList.add(i1.next());
}
int size = 0;
Iterator<Integer> i2 = set.iterator();
while (i2.hasNext()) {
Integer elem = i2.next();
if (size++ < 8) {
i2.remove();
}
removalList.add(elem);
}
Iterator<Integer> i3 = set.iterator();
while (i3.hasNext()) {
finalList.add(i3.next());
}
Assert.assertEquals(initialList, removalList);
Assert.assertEquals(1, finalList.size());
Assert.assertEquals(new Integer(9), finalList.get(0));
}
}

@ -0,0 +1,60 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import org.graalvm.collections.Equivalence;
import org.junit.Assert;
import org.junit.Test;
public class EquivalenceTest {
private static final String TEST_STRING = "Graal";
private static final String TEST_STRING2 = "Graal2";
@Test
public void testDEFAULT() {
Assert.assertTrue(Equivalence.DEFAULT.equals(TEST_STRING, new String(TEST_STRING)));
Assert.assertEquals(Equivalence.DEFAULT.hashCode(TEST_STRING), Equivalence.DEFAULT.hashCode(new String(TEST_STRING)));
Assert.assertFalse(Equivalence.DEFAULT.equals(TEST_STRING, TEST_STRING2));
Assert.assertNotEquals(Equivalence.DEFAULT.hashCode(TEST_STRING), Equivalence.DEFAULT.hashCode(TEST_STRING2));
}
@Test
public void testIDENTITY() {
Assert.assertFalse(Equivalence.IDENTITY.equals(TEST_STRING, new String(TEST_STRING)));
Assert.assertEquals(Equivalence.IDENTITY.hashCode(TEST_STRING), Equivalence.IDENTITY.hashCode(new String(TEST_STRING)));
Assert.assertFalse(Equivalence.IDENTITY.equals(TEST_STRING, TEST_STRING2));
Assert.assertNotEquals(Equivalence.IDENTITY.hashCode(TEST_STRING), Equivalence.IDENTITY.hashCode(TEST_STRING2));
}
@Test
public void testIDENTITYWITHSYSTEMHASHCODE() {
Assert.assertFalse(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.equals(TEST_STRING, new String(TEST_STRING)));
Assert.assertNotEquals(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.hashCode(TEST_STRING), Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.hashCode(new String(TEST_STRING)));
Assert.assertFalse(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.equals(TEST_STRING, TEST_STRING2));
Assert.assertNotEquals(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.hashCode(TEST_STRING), Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE.hashCode(TEST_STRING2));
}
}

@ -0,0 +1,52 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections.test;
import org.graalvm.collections.Pair;
import org.junit.Assert;
import org.junit.Test;
public class PairTest {
@Test
public void testCreate() {
Assert.assertEquals(Pair.create(null, null), Pair.empty());
Assert.assertNotEquals(Pair.create(null, null), null);
Assert.assertEquals(Pair.createLeft(null), Pair.empty());
Assert.assertEquals(Pair.createRight(null), Pair.empty());
Assert.assertEquals(Pair.create(1, null), Pair.createLeft(1));
Assert.assertEquals(Pair.create(null, 1), Pair.createRight(1));
}
@Test
public void testUtilities() {
Pair<Integer, Integer> pair = Pair.create(1, null);
Assert.assertEquals(pair.getLeft(), Integer.valueOf(1));
Assert.assertEquals(pair.getRight(), null);
Assert.assertEquals(pair.toString(), "(1, null)");
Assert.assertEquals(pair.hashCode(), Pair.createLeft(1).hashCode());
}
}

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,21 +22,34 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import org.graalvm.util.impl.EconomicMapImpl;
/** /**
* Memory efficient map data structure. * Memory efficient map data structure.
*
* @since 1.0
*/ */
public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> { public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
/**
* Associates {@code value} with {@code key} in this map. If the map previously contained a
* mapping for {@code key}, the old value is replaced by {@code value}.
*
* @return the previous value associated with {@code key}, or {@code null} if there was no
* mapping for {@code key}.
* @since 1.0
*/
V put(K key, V value); V put(K key, V value);
/**
* Copies all of the mappings from {@code other} to this map.
*
* @since 1.0
*/
default void putAll(EconomicMap<K, V> other) { default void putAll(EconomicMap<K, V> other) {
MapCursor<K, V> e = other.getEntries(); MapCursor<K, V> e = other.getEntries();
while (e.advance()) { while (e.advance()) {
@ -42,15 +57,11 @@ public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
} }
} }
void clear(); /**
* Copies all of the mappings from {@code other} to this map.
V removeKey(K key); *
* @since 1.0
@Override */
MapCursor<K, V> getEntries();
void replaceAll(BiFunction<? super K, ? super V, ? extends V> function);
default void putAll(UnmodifiableEconomicMap<? extends K, ? extends V> other) { default void putAll(UnmodifiableEconomicMap<? extends K, ? extends V> other) {
UnmodifiableMapCursor<? extends K, ? extends V> entry = other.getEntries(); UnmodifiableMapCursor<? extends K, ? extends V> entry = other.getEntries();
while (entry.advance()) { while (entry.advance()) {
@ -58,9 +69,45 @@ public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
} }
} }
/**
* Removes all of the mappings from this map. The map will be empty after this call returns.
*
* @since 1.0
*/
void clear();
/**
* Removes the mapping for {@code key} from this map if it is present. The map will not contain
* a mapping for {@code key} once the call returns.
*
* @return the previous value associated with {@code key}, or {@code null} if there was no
* mapping for {@code key}.
* @since 1.0
*/
V removeKey(K key);
/**
* Returns a {@link MapCursor} view of the mappings contained in this map.
*
* @since 1.0
*/
@Override
MapCursor<K, V> getEntries();
/**
* Replaces each entry's value with the result of invoking {@code function} on that entry until
* all entries have been processed or the function throws an exception. Exceptions thrown by the
* function are relayed to the caller.
*
* @since 1.0
*/
void replaceAll(BiFunction<? super K, ? super V, ? extends V> function);
/** /**
* Creates a new map that guarantees insertion order on the key set with the default * Creates a new map that guarantees insertion order on the key set with the default
* {@link Equivalence#DEFAULT} comparison strategy for keys. * {@link Equivalence#DEFAULT} comparison strategy for keys.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create() { static <K, V> EconomicMap<K, V> create() {
return EconomicMap.create(Equivalence.DEFAULT); return EconomicMap.create(Equivalence.DEFAULT);
@ -70,6 +117,8 @@ public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
* Creates a new map that guarantees insertion order on the key set with the default * Creates a new map that guarantees insertion order on the key set with the default
* {@link Equivalence#DEFAULT} comparison strategy for keys and initializes with a specified * {@link Equivalence#DEFAULT} comparison strategy for keys and initializes with a specified
* capacity. * capacity.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create(int initialCapacity) { static <K, V> EconomicMap<K, V> create(int initialCapacity) {
return EconomicMap.create(Equivalence.DEFAULT, initialCapacity); return EconomicMap.create(Equivalence.DEFAULT, initialCapacity);
@ -78,15 +127,19 @@ public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
/** /**
* Creates a new map that guarantees insertion order on the key set with the given comparison * Creates a new map that guarantees insertion order on the key set with the given comparison
* strategy for keys. * strategy for keys.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create(Equivalence strategy) { static <K, V> EconomicMap<K, V> create(Equivalence strategy) {
return EconomicMapImpl.create(strategy); return EconomicMapImpl.create(strategy, false);
} }
/** /**
* Creates a new map that guarantees insertion order on the key set with the default * Creates a new map that guarantees insertion order on the key set with the default
* {@link Equivalence#DEFAULT} comparison strategy for keys and copies all elements from the * {@link Equivalence#DEFAULT} comparison strategy for keys and copies all elements from the
* specified existing map. * specified existing map.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create(UnmodifiableEconomicMap<K, V> m) { static <K, V> EconomicMap<K, V> create(UnmodifiableEconomicMap<K, V> m) {
return EconomicMap.create(Equivalence.DEFAULT, m); return EconomicMap.create(Equivalence.DEFAULT, m);
@ -95,21 +148,27 @@ public interface EconomicMap<K, V> extends UnmodifiableEconomicMap<K, V> {
/** /**
* Creates a new map that guarantees insertion order on the key set and copies all elements from * Creates a new map that guarantees insertion order on the key set and copies all elements from
* the specified existing map. * the specified existing map.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create(Equivalence strategy, UnmodifiableEconomicMap<K, V> m) { static <K, V> EconomicMap<K, V> create(Equivalence strategy, UnmodifiableEconomicMap<K, V> m) {
return EconomicMapImpl.create(strategy, m); return EconomicMapImpl.create(strategy, m, false);
} }
/** /**
* Creates a new map that guarantees insertion order on the key set and initializes with a * Creates a new map that guarantees insertion order on the key set and initializes with a
* specified capacity. * specified capacity.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> create(Equivalence strategy, int initialCapacity) { static <K, V> EconomicMap<K, V> create(Equivalence strategy, int initialCapacity) {
return EconomicMapImpl.create(strategy, initialCapacity); return EconomicMapImpl.create(strategy, initialCapacity, false);
} }
/** /**
* Wraps an existing {@link java.util.Map} as an {@link org.graalvm.util.EconomicMap}. * Wraps an existing {@link Map} as an {@link EconomicMap}.
*
* @since 1.0
*/ */
static <K, V> EconomicMap<K, V> wrapMap(Map<K, V> map) { static <K, V> EconomicMap<K, V> wrapMap(Map<K, V> map) {
return new EconomicMap<K, V>() { return new EconomicMap<K, V>() {

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,19 +22,12 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util.impl; package org.graalvm.collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.Objects; import java.util.Objects;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import org.graalvm.util.Equivalence;
import org.graalvm.util.EconomicMap;
import org.graalvm.util.EconomicSet;
import org.graalvm.util.UnmodifiableEconomicMap;
import org.graalvm.util.UnmodifiableEconomicSet;
import org.graalvm.util.MapCursor;
/** /**
* Implementation of a map with a memory-efficient structure that always preserves insertion order * Implementation of a map with a memory-efficient structure that always preserves insertion order
* when iterating over keys. Particularly efficient when number of entries is 0 or smaller equal * when iterating over keys. Particularly efficient when number of entries is 0 or smaller equal
@ -58,7 +53,7 @@ import org.graalvm.util.MapCursor;
* map falls below a specific threshold, the map will be compressed via the * map falls below a specific threshold, the map will be compressed via the
* {@link #maybeCompress(int)} method. * {@link #maybeCompress(int)} method.
*/ */
public final class EconomicMapImpl<K, V> implements EconomicMap<K, V>, EconomicSet<K> { final class EconomicMapImpl<K, V> implements EconomicMap<K, V>, EconomicSet<K> {
/** /**
* Initial number of key/value pair entries that is allocated in the first entries array. * Initial number of key/value pair entries that is allocated in the first entries array.
@ -135,45 +130,46 @@ public final class EconomicMapImpl<K, V> implements EconomicMap<K, V>, EconomicS
return map; return map;
} }
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy) { public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy)); return intercept(new EconomicMapImpl<>(strategy, isSet));
} }
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, int initialCapacity) { public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, int initialCapacity, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, initialCapacity)); return intercept(new EconomicMapImpl<>(strategy, initialCapacity, isSet));
} }
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicMap<K, V> other) { public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicMap<K, V> other, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, other)); return intercept(new EconomicMapImpl<>(strategy, other, isSet));
} }
public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicSet<K> other) { public static <K, V> EconomicMapImpl<K, V> create(Equivalence strategy, UnmodifiableEconomicSet<K> other, boolean isSet) {
return intercept(new EconomicMapImpl<>(strategy, other)); return intercept(new EconomicMapImpl<>(strategy, other, isSet));
} }
private EconomicMapImpl(Equivalence strategy) { private EconomicMapImpl(Equivalence strategy, boolean isSet) {
if (strategy == Equivalence.IDENTITY) { if (strategy == Equivalence.IDENTITY) {
this.strategy = null; this.strategy = null;
} else { } else {
this.strategy = strategy; this.strategy = strategy;
} }
this.isSet = isSet;
} }
private EconomicMapImpl(Equivalence strategy, int initialCapacity) { private EconomicMapImpl(Equivalence strategy, int initialCapacity, boolean isSet) {
this(strategy); this(strategy, isSet);
init(initialCapacity); init(initialCapacity);
} }
private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicMap<K, V> other) { private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicMap<K, V> other, boolean isSet) {
this(strategy); this(strategy, isSet);
if (!initFrom(other)) { if (!initFrom(other)) {
init(other.size()); init(other.size());
putAll(other); putAll(other);
} }
} }
private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicSet<K> other) { private EconomicMapImpl(Equivalence strategy, UnmodifiableEconomicSet<K> other, boolean isSet) {
this(strategy); this(strategy, isSet);
if (!initFrom(other)) { if (!initFrom(other)) {
init(other.size()); init(other.size());
addAll(other); addAll(other);
@ -807,13 +803,22 @@ public final class EconomicMapImpl<K, V> implements EconomicMap<K, V>, EconomicS
return object; return object;
} }
private final boolean isSet;
@Override @Override
public String toString() { public String toString() {
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
builder.append("map(size=").append(size()).append(", {"); builder.append(isSet ? "set(size=" : "map(size=").append(size()).append(", {");
String sep = "";
MapCursor<K, V> cursor = getEntries(); MapCursor<K, V> cursor = getEntries();
while (cursor.advance()) { while (cursor.advance()) {
builder.append("(").append(cursor.getKey()).append(",").append(cursor.getValue()).append("),"); builder.append(sep);
if (isSet) {
builder.append(cursor.getKey());
} else {
builder.append("(").append(cursor.getKey()).append(",").append(cursor.getValue()).append(")");
}
sep = ",";
} }
builder.append("})"); builder.append("})");
return builder.toString(); return builder.toString();

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,56 +22,109 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
import java.util.Iterator; import java.util.Iterator;
import org.graalvm.util.impl.EconomicMapImpl;
/** /**
* Memory efficient set data structure. * Memory efficient set data structure.
*
* @since 1.0
*/ */
public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> { public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> {
/**
* Adds {@code element} to this set if it is not already present.
*
* @return {@code true} if this set did not already contain {@code element}.
* @since 1.0
*/
boolean add(E element); boolean add(E element);
/**
* Removes {@code element} from this set if it is present. This set will not contain
* {@code element} once the call returns.
*
* @since 1.0
*/
void remove(E element); void remove(E element);
/**
* Removes all of the elements from this set. The set will be empty after this call returns.
*
* @since 1.0
*/
void clear(); void clear();
default void addAll(EconomicSet<E> values) { /**
addAll(values.iterator()); * Adds all of the elements in {@code other} to this set if they're not already present.
*
* @since 1.0
*/
default void addAll(EconomicSet<E> other) {
addAll(other.iterator());
} }
/**
* Adds all of the elements in {@code values} to this set if they're not already present.
*
* @since 1.0
*/
default void addAll(Iterable<E> values) { default void addAll(Iterable<E> values) {
addAll(values.iterator()); addAll(values.iterator());
} }
default void addAll(Iterator<E> values) { /**
while (values.hasNext()) { * Adds all of the elements enumerated by {@code iterator} to this set if they're not already
add(values.next()); * present.
*
* @since 1.0
*/
default void addAll(Iterator<E> iterator) {
while (iterator.hasNext()) {
add(iterator.next());
} }
} }
default void removeAll(EconomicSet<E> values) { /**
removeAll(values.iterator()); * Removes from this set all of its elements that are contained in {@code other}.
*
* @since 1.0
*/
default void removeAll(EconomicSet<E> other) {
removeAll(other.iterator());
} }
/**
* Removes from this set all of its elements that are contained in {@code values}.
*
* @since 1.0
*/
default void removeAll(Iterable<E> values) { default void removeAll(Iterable<E> values) {
removeAll(values.iterator()); removeAll(values.iterator());
} }
default void removeAll(Iterator<E> values) { /**
while (values.hasNext()) { * Removes from this set all of its elements that are enumerated by {@code iterator}.
remove(values.next()); *
* @since 1.0
*/
default void removeAll(Iterator<E> iterator) {
while (iterator.hasNext()) {
remove(iterator.next());
} }
} }
default void retainAll(EconomicSet<E> values) { /**
* Removes from this set all of its elements that are not contained in {@code other}.
*
* @since 1.0
*/
default void retainAll(EconomicSet<E> other) {
Iterator<E> iterator = iterator(); Iterator<E> iterator = iterator();
while (iterator.hasNext()) { while (iterator.hasNext()) {
E key = iterator.next(); E key = iterator.next();
if (!values.contains(key)) { if (!other.contains(key)) {
iterator.remove(); iterator.remove();
} }
} }
@ -78,6 +133,8 @@ public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> {
/** /**
* Creates a new set guaranteeing insertion order when iterating over its elements with the * Creates a new set guaranteeing insertion order when iterating over its elements with the
* default {@link Equivalence#DEFAULT} comparison strategy. * default {@link Equivalence#DEFAULT} comparison strategy.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create() { static <E> EconomicSet<E> create() {
return EconomicSet.create(Equivalence.DEFAULT); return EconomicSet.create(Equivalence.DEFAULT);
@ -85,15 +142,19 @@ public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> {
/** /**
* Creates a new set guaranteeing insertion order when iterating over its elements. * Creates a new set guaranteeing insertion order when iterating over its elements.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create(Equivalence strategy) { static <E> EconomicSet<E> create(Equivalence strategy) {
return EconomicMapImpl.create(strategy); return EconomicMapImpl.create(strategy, true);
} }
/** /**
* Creates a new set guaranteeing insertion order when iterating over its elements with the * Creates a new set guaranteeing insertion order when iterating over its elements with the
* default {@link Equivalence#DEFAULT} comparison strategy and inserts all elements of the * default {@link Equivalence#DEFAULT} comparison strategy and inserts all elements of the
* specified collection. * specified collection.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create(int initialCapacity) { static <E> EconomicSet<E> create(int initialCapacity) {
return EconomicSet.create(Equivalence.DEFAULT, initialCapacity); return EconomicSet.create(Equivalence.DEFAULT, initialCapacity);
@ -103,6 +164,8 @@ public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> {
* Creates a new set guaranteeing insertion order when iterating over its elements with the * Creates a new set guaranteeing insertion order when iterating over its elements with the
* default {@link Equivalence#DEFAULT} comparison strategy and inserts all elements of the * default {@link Equivalence#DEFAULT} comparison strategy and inserts all elements of the
* specified collection. * specified collection.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create(UnmodifiableEconomicSet<E> c) { static <E> EconomicSet<E> create(UnmodifiableEconomicSet<E> c) {
return EconomicSet.create(Equivalence.DEFAULT, c); return EconomicSet.create(Equivalence.DEFAULT, c);
@ -111,16 +174,20 @@ public interface EconomicSet<E> extends UnmodifiableEconomicSet<E> {
/** /**
* Creates a new set guaranteeing insertion order when iterating over its elements and * Creates a new set guaranteeing insertion order when iterating over its elements and
* initializes with the given capacity. * initializes with the given capacity.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create(Equivalence strategy, int initialCapacity) { static <E> EconomicSet<E> create(Equivalence strategy, int initialCapacity) {
return EconomicMapImpl.create(strategy, initialCapacity); return EconomicMapImpl.create(strategy, initialCapacity, true);
} }
/** /**
* Creates a new set guaranteeing insertion order when iterating over its elements and inserts * Creates a new set guaranteeing insertion order when iterating over its elements and inserts
* all elements of the specified collection. * all elements of the specified collection.
*
* @since 1.0
*/ */
static <E> EconomicSet<E> create(Equivalence strategy, UnmodifiableEconomicSet<E> c) { static <E> EconomicSet<E> create(Equivalence strategy, UnmodifiableEconomicSet<E> c) {
return EconomicMapImpl.create(strategy, c); return EconomicMapImpl.create(strategy, c, true);
} }
} }

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,11 +22,13 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
/** /**
* Strategy for comparing two objects. Default predefined strategies are {@link #DEFAULT}, * Strategy for comparing two objects. Default predefined strategies are {@link #DEFAULT},
* {@link #IDENTITY}, and {@link #IDENTITY_WITH_SYSTEM_HASHCODE}. * {@link #IDENTITY}, and {@link #IDENTITY_WITH_SYSTEM_HASHCODE}.
*
* @since 1.0
*/ */
public abstract class Equivalence { public abstract class Equivalence {
@ -32,6 +36,8 @@ public abstract class Equivalence {
* Default equivalence calling {@link #equals(Object)} to check equality and {@link #hashCode()} * Default equivalence calling {@link #equals(Object)} to check equality and {@link #hashCode()}
* for obtaining hash values. Do not change the logic of this class as it may be inlined in * for obtaining hash values. Do not change the logic of this class as it may be inlined in
* other places. * other places.
*
* @since 1.0
*/ */
public static final Equivalence DEFAULT = new Equivalence() { public static final Equivalence DEFAULT = new Equivalence() {
@ -49,6 +55,8 @@ public abstract class Equivalence {
/** /**
* Identity equivalence using {@code ==} to check equality and {@link #hashCode()} for obtaining * Identity equivalence using {@code ==} to check equality and {@link #hashCode()} for obtaining
* hash values. Do not change the logic of this class as it may be inlined in other places. * hash values. Do not change the logic of this class as it may be inlined in other places.
*
* @since 1.0
*/ */
public static final Equivalence IDENTITY = new Equivalence() { public static final Equivalence IDENTITY = new Equivalence() {
@ -67,6 +75,8 @@ public abstract class Equivalence {
* Identity equivalence using {@code ==} to check equality and * Identity equivalence using {@code ==} to check equality and
* {@link System#identityHashCode(Object)} for obtaining hash values. Do not change the logic of * {@link System#identityHashCode(Object)} for obtaining hash values. Do not change the logic of
* this class as it may be inlined in other places. * this class as it may be inlined in other places.
*
* @since 1.0
*/ */
public static final Equivalence IDENTITY_WITH_SYSTEM_HASHCODE = new Equivalence() { public static final Equivalence IDENTITY_WITH_SYSTEM_HASHCODE = new Equivalence() {
@ -83,11 +93,24 @@ public abstract class Equivalence {
/** /**
* Subclass for creating custom equivalence definitions. * Subclass for creating custom equivalence definitions.
*
* @since 1.0
*/ */
protected Equivalence() { protected Equivalence() {
} }
/**
* Returns {@code true} if the non-{@code null} arguments are equal to each other and
* {@code false} otherwise.
*
* @since 1.0
*/
public abstract boolean equals(Object a, Object b); public abstract boolean equals(Object a, Object b);
/**
* Returns the hash code of a non-{@code null} argument {@code o}.
*
* @since 1.0
*/
public abstract int hashCode(Object o); public abstract int hashCode(Object o);
} }

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,16 +22,20 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
/** /**
* Cursor to iterate over a mutable map. * Cursor to iterate over a mutable map.
*
* @since 1.0
*/ */
public interface MapCursor<K, V> extends UnmodifiableMapCursor<K, V> { public interface MapCursor<K, V> extends UnmodifiableMapCursor<K, V> {
/** /**
* Remove the current entry from the map. May only be called once. After calling * Remove the current entry from the map. May only be called once. After calling
* {@link #remove()}, it is no longer valid to call {@link #getKey()} or {@link #getValue()} on * {@link #remove()}, it is no longer valid to call {@link #getKey()} or {@link #getValue()} on
* the current entry. * the current entry.
*
* @since 1.0
*/ */
void remove(); void remove();
} }

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,24 +22,39 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
import java.util.Objects; import java.util.Objects;
/** /**
* Utility class representing a pair of values. * Utility class representing a pair of values.
*
* @since 1.0
*/ */
public final class Pair<L, R> { public final class Pair<L, R> {
private static final Pair<Object, Object> EMPTY = new Pair<>(null, null); private static final Pair<Object, Object> EMPTY = new Pair<>(null, null);
private final L left; private final L left;
private final R right; private final R right;
/**
* Returns an empty pair.
*
* @since 1.0
*/
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public static <L, R> Pair<L, R> empty() { public static <L, R> Pair<L, R> empty() {
return (Pair<L, R>) EMPTY; return (Pair<L, R>) EMPTY;
} }
/**
* Constructs a pair with its left value being {@code left}, or returns an empty pair if
* {@code left} is null.
*
* @return the constructed pair or an empty pair if {@code left} is null.
* @since 1.0
*/
public static <L, R> Pair<L, R> createLeft(L left) { public static <L, R> Pair<L, R> createLeft(L left) {
if (left == null) { if (left == null) {
return empty(); return empty();
@ -46,6 +63,13 @@ public final class Pair<L, R> {
} }
} }
/**
* Constructs a pair with its right value being {@code right}, or returns an empty pair if
* {@code right} is null.
*
* @return the constructed pair or an empty pair if {@code right} is null.
* @since 1.0
*/
public static <L, R> Pair<L, R> createRight(R right) { public static <L, R> Pair<L, R> createRight(R right) {
if (right == null) { if (right == null) {
return empty(); return empty();
@ -54,6 +78,13 @@ public final class Pair<L, R> {
} }
} }
/**
* Constructs a pair with its left value being {@code left}, and its right value being
* {@code right}, or returns an empty pair if both inputs are null.
*
* @return the constructed pair or an empty pair if both inputs are null.
* @since 1.0
*/
public static <L, R> Pair<L, R> create(L left, R right) { public static <L, R> Pair<L, R> create(L left, R right) {
if (right == null && left == null) { if (right == null && left == null) {
return empty(); return empty();
@ -67,19 +98,39 @@ public final class Pair<L, R> {
this.right = right; this.right = right;
} }
/**
* Returns the left value of this pair.
*
* @since 1.0
*/
public L getLeft() { public L getLeft() {
return left; return left;
} }
/**
* Returns the right value of this pair.
*
* @since 1.0
*/
public R getRight() { public R getRight() {
return right; return right;
} }
/**
* {@inheritDoc}
*
* @since 1.0
*/
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hashCode(left) + 31 * Objects.hashCode(right); return Objects.hashCode(left) + 31 * Objects.hashCode(right);
} }
/**
* {@inheritDoc}
*
* @since 1.0
*/
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
@ -95,6 +146,11 @@ public final class Pair<L, R> {
return false; return false;
} }
/**
* {@inheritDoc}
*
* @since 1.0
*/
@Override @Override
public String toString() { public String toString() {
return String.format("(%s, %s)", left, right); return String.format("(%s, %s)", left, right);

@ -0,0 +1,97 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections;
/**
* Unmodifiable memory efficient map data structure.
*
* @since 1.0
*/
public interface UnmodifiableEconomicMap<K, V> {
/**
* Returns the value to which {@code key} is mapped, or {@code null} if this map contains no
* mapping for {@code key}.
*
* @since 1.0
*/
V get(K key);
/**
* Returns the value to which {@code key} is mapped, or {@code defaultValue} if this map
* contains no mapping for {@code key}.
*
* @since 1.0
*/
default V get(K key, V defaultValue) {
V v = get(key);
if (v == null) {
return defaultValue;
}
return v;
}
/**
* Returns {@code true} if this map contains a mapping for {@code key}.
*
* @since 1.0
*/
boolean containsKey(K key);
/**
* Returns the number of key-value mappings in this map.
*
* @since 1.0
*/
int size();
/**
* Returns {@code true} if this map contains no key-value mappings.
*
* @since 1.0
*/
boolean isEmpty();
/**
* Returns a {@link Iterable} view of the values contained in this map.
*
* @since 1.0
*/
Iterable<V> getValues();
/**
* Returns a {@link Iterable} view of the keys contained in this map.
*
* @since 1.0
*/
Iterable<K> getKeys();
/**
* Returns a {@link UnmodifiableMapCursor} view of the mappings contained in this map.
*
* @since 1.0
*/
UnmodifiableMapCursor<K, V> getEntries();
}

@ -0,0 +1,77 @@
/*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.collections;
/**
* Unmodifiable memory efficient set data structure.
*
* @since 1.0
*/
public interface UnmodifiableEconomicSet<E> extends Iterable<E> {
/**
* Returns {@code true} if this set contains a mapping for the {@code element}.
*
* @since 1.0
*/
boolean contains(E element);
/**
* Returns the number of elements in this set.
*
* @since 1.0
*/
int size();
/**
* Returns {@code true} if this set contains no elements.
*
* @since 1.0
*/
boolean isEmpty();
/**
* Stores all of the elements in this set into {@code target}. An
* {@link UnsupportedOperationException} will be thrown if the length of {@code target} does not
* match the size of this set.
*
* @return an array containing all the elements in this set.
* @throws UnsupportedOperationException if the length of {@code target} does not equal the size
* of this set.
* @since 1.0
*/
default E[] toArray(E[] target) {
if (target.length != size()) {
throw new UnsupportedOperationException("Length of target array must equal the size of the set.");
}
int index = 0;
for (E element : this) {
target[index++] = element;
}
return target;
}
}

@ -4,7 +4,9 @@
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as * under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. * published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* *
* This code is distributed in the hope that it will be useful, but WITHOUT * This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -20,26 +22,33 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.collections;
/** /**
* Cursor to iterate over a map without changing its contents. * Cursor to iterate over a map without changing its contents.
*
* @since 1.0
*/ */
public interface UnmodifiableMapCursor<K, V> { public interface UnmodifiableMapCursor<K, V> {
/** /**
* Advances to the next entry. * Advances to the next entry.
* *
* @return {@code true} if a next entry exists, {@code false} if there is no next entry. * @return {@code true} if a next entry exists, {@code false} if there is no next entry.
* @since 1.0
*/ */
boolean advance(); boolean advance();
/** /**
* The key of the current entry. * The key of the current entry.
*
* @since 1.0
*/ */
K getKey(); K getKey();
/** /**
* The value of the current entry. * The value of the current entry.
*
* @since 1.0
*/ */
V getValue(); V getValue();
} }

@ -0,0 +1,33 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* The Graal-SDK collections package contains memory efficient data structures.
*
* @see org.graalvm.collections.EconomicMap
* @see org.graalvm.collections.EconomicSet
*
* @since 1.0
*/
package org.graalvm.collections;

@ -50,6 +50,21 @@ public class ProbabilityDirectiveTest extends GraalCompilerTest {
test("branchProbabilitySnippet", 5); test("branchProbabilitySnippet", 5);
} }
public static int branchProbabilitySnippet2(int arg) {
if (!GraalDirectives.injectBranchProbability(0.125, arg <= 0)) {
GraalDirectives.controlFlowAnchor(); // prevent removal of the if
return 2;
} else {
GraalDirectives.controlFlowAnchor(); // prevent removal of the if
return 1;
}
}
@Test
public void testBranchProbability2() {
test("branchProbabilitySnippet2", 5);
}
@Override @Override
protected boolean checkLowTierGraph(StructuredGraph graph) { protected boolean checkLowTierGraph(StructuredGraph graph) {
NodeIterable<IfNode> ifNodes = graph.getNodes(IfNode.TYPE); NodeIterable<IfNode> ifNodes = graph.getNodes(IfNode.TYPE);

@ -49,6 +49,13 @@ public final class GraalDirectives {
public static void deoptimizeAndInvalidate() { public static void deoptimizeAndInvalidate() {
} }
/**
* Directive for the compiler to fall back to the bytecode interpreter at this point, invalidate
* the compiled code, record a speculation and reprofile the method.
*/
public static void deoptimizeAndInvalidateWithSpeculation() {
}
/** /**
* Returns a boolean value indicating whether the method is executed in Graal-compiled code. * Returns a boolean value indicating whether the method is executed in Graal-compiled code.
*/ */

@ -26,15 +26,52 @@ import java.lang.annotation.ElementType;
import java.lang.annotation.Retention; import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy; import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target; import java.lang.annotation.Target;
import java.lang.reflect.Array;
import jdk.vm.ci.meta.Signature; import jdk.vm.ci.meta.Signature;
/** /**
* Denotes a method whose body is used by a compiler as the substitute (or intrinsification) of * Denotes a method whose body is used by a compiler as the substitute (or intrinsification) of
* another method. The exact method used to do the substitution is compiler dependent but every * another method. The exact mechanism used to do the substitution is compiler dependent but every
* compiler should require substitute methods to be annotated with {@link MethodSubstitution}. In * compiler should require substitute methods to be annotated with {@link MethodSubstitution}. In
* addition, a compiler is recommended to implement {@link MethodSubstitutionRegistry} to advertise * addition, a compiler is recommended to implement {@link MethodSubstitutionRegistry} to advertise
* the mechanism by which it supports registration of method substitutes. * the mechanism by which it supports registration of method substitutes.
*
* A compiler may support partial intrinsification where only a part of a method is implemented by
* the compiler. The unsupported path is expressed by a call to either the original or substitute
* method from within the substitute method. Such as call is a <i>partial intrinsic exit</i>.
*
* For example, here's a HotSpot specific intrinsic for {@link Array#newInstance(Class, int)} that
* only handles the case where the VM representation of the array class to be instantiated already
* exists:
*
* <pre>
* &#64;MethodSubstitution
* public static Object newInstance(Class<?> componentType, int length) {
* if (componentType == null || loadKlassFromObject(componentType, arrayKlassOffset(INJECTED_VMCONFIG), CLASS_ARRAY_KLASS_LOCATION).isNull()) {
* // Array class not yet created - exit the intrinsic and call the original method
* return newInstance(componentType, length);
* }
* return DynamicNewArrayNode.newArray(GraalDirectives.guardingNonNull(componentType), length, JavaKind.Object);
* }
* </pre>
*
* Here's the same intrinsification where the exit is expressed as a call to the original method:
*
* <pre>
* &#64;MethodSubstitution
* public static Object newInstance(Class<?> componentType, int length) {
* if (componentType == null || loadKlassFromObject(componentType, arrayKlassOffset(INJECTED_VMCONFIG), CLASS_ARRAY_KLASS_LOCATION).isNull()) {
* // Array class not yet created - exit the intrinsic and call the original method
* return java.lang.reflect.newInstance(componentType, length);
* }
* return DynamicNewArrayNode.newArray(GraalDirectives.guardingNonNull(componentType), length, JavaKind.Object);
* }
* </pre>
*
* A condition for a partial intrinsic exit is that it is uses the unmodified parameters of the
* substitute as arguments to the partial intrinsic exit call. There must also be no side effecting
* instruction between the start of the substitute method and the partial intrinsic exit.
*/ */
@Retention(RetentionPolicy.RUNTIME) @Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD) @Target(ElementType.METHOD)

@ -90,4 +90,12 @@ public interface SnippetReflectionProvider {
* if this provider cannot provide a value of the requested type * if this provider cannot provide a value of the requested type
*/ */
<T> T getInjectedNodeIntrinsicParameter(Class<T> type); <T> T getInjectedNodeIntrinsicParameter(Class<T> type);
/**
* Get the original Java class corresponding to a {@link ResolvedJavaType}.
*
* @param type the type for which the original Java class is requested
* @return the original Java class corresponding to the {@code type} parameter
*/
Class<?> originalClass(ResolvedJavaType type);
} }

@ -22,9 +22,19 @@
*/ */
package org.graalvm.compiler.api.runtime; package org.graalvm.compiler.api.runtime;
import jdk.vm.ci.common.JVMCIError;
public interface GraalRuntime { public interface GraalRuntime {
String getName(); String getName();
<T> T getCapability(Class<T> clazz); <T> T getCapability(Class<T> clazz);
default <T> T getRequiredCapability(Class<T> clazz) {
T ret = getCapability(clazz);
if (ret == null) {
throw new JVMCIError("The VM does not expose the required Graal capability %s.", clazz.getName());
}
return ret;
}
} }

@ -208,7 +208,6 @@ public class AMD64Assembler extends Assembler {
} }
private static class VexOpcode { private static class VexOpcode {
private static final int VEX_OPCODE_NONE = 0x0;
private static final int VEX_OPCODE_0F = 0x1; private static final int VEX_OPCODE_0F = 0x1;
private static final int VEX_OPCODE_0F_38 = 0x2; private static final int VEX_OPCODE_0F_38 = 0x2;
private static final int VEX_OPCODE_0F_3A = 0x3; private static final int VEX_OPCODE_0F_3A = 0x3;
@ -861,9 +860,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
boolean rexVexW = (size == QWORD) ? true : false; boolean rexVexW = (size == QWORD) ? true : false;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
@ -881,20 +897,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
int encode; int encode;
if (noNds) { if (noNds) {
encode = asm.simdPrefixAndEncode(dst, Register.None, src, pre, opc, attributes); encode = asm.simdPrefixAndEncode(dst, Register.None, src, pre, opc, attributes);
@ -938,9 +940,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
boolean rexVexW = (size == QWORD) ? true : false; boolean rexVexW = (size == QWORD) ? true : false;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
@ -958,20 +977,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
if (noNds) { if (noNds) {
asm.simdPrefix(dst, Register.None, src, pre, opc, attributes); asm.simdPrefix(dst, Register.None, src, pre, opc, attributes);
} else { } else {
@ -1055,8 +1060,7 @@ public class AMD64Assembler extends Assembler {
opc = VexOpcode.VEX_OPCODE_0F_3A; opc = VexOpcode.VEX_OPCODE_0F_3A;
break; break;
default: default:
opc = VexOpcode.VEX_OPCODE_NONE; throw GraalError.shouldNotReachHere("invalid VEX instruction prefix");
break;
} }
int encode; int encode;
encode = asm.simdPrefixAndEncode(dst, nds, src, pre, opc, attributes); encode = asm.simdPrefixAndEncode(dst, nds, src, pre, opc, attributes);
@ -1096,8 +1100,7 @@ public class AMD64Assembler extends Assembler {
opc = VexOpcode.VEX_OPCODE_0F_3A; opc = VexOpcode.VEX_OPCODE_0F_3A;
break; break;
default: default:
opc = VexOpcode.VEX_OPCODE_NONE; throw GraalError.shouldNotReachHere("invalid VEX instruction prefix");
break;
} }
asm.simdPrefix(dst, nds, src, pre, opc, attributes); asm.simdPrefix(dst, nds, src, pre, opc, attributes);
asm.emitByte(op); asm.emitByte(op);
@ -1163,9 +1166,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
boolean rexVexW = (size == QWORD) ? true : false; boolean rexVexW = (size == QWORD) ? true : false;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
@ -1183,20 +1203,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
int encode; int encode;
if (noNds) { if (noNds) {
encode = asm.simdPrefixAndEncode(src, Register.None, dst, pre, opc, attributes); encode = asm.simdPrefixAndEncode(src, Register.None, dst, pre, opc, attributes);
@ -1222,9 +1228,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
boolean rexVexW = (size == QWORD) ? true : false; boolean rexVexW = (size == QWORD) ? true : false;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, rexVexW, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
@ -1242,20 +1265,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
asm.simdPrefix(src, Register.None, dst, pre, opc, attributes); asm.simdPrefix(src, Register.None, dst, pre, opc, attributes);
asm.emitByte(op); asm.emitByte(op);
asm.emitOperandHelper(src, dst, 0); asm.emitOperandHelper(src, dst, 0);
@ -1390,9 +1399,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
switch (curPrefix) { switch (curPrefix) {
@ -1409,20 +1435,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
int encode; int encode;
if (noNds) { if (noNds) {
encode = asm.simdPrefixAndEncode(dst, Register.None, src, pre, opc, attributes); encode = asm.simdPrefixAndEncode(dst, Register.None, src, pre, opc, attributes);
@ -1453,9 +1465,26 @@ public class AMD64Assembler extends Assembler {
break; break;
} }
int opc = 0;
if (isSimd) {
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
isSimd = false;
break;
}
}
if (isSimd) { if (isSimd) {
int pre; int pre;
int opc;
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target); AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, asm.target);
int curPrefix = size.sizePrefix | prefix1; int curPrefix = size.sizePrefix | prefix1;
switch (curPrefix) { switch (curPrefix) {
@ -1472,21 +1501,6 @@ public class AMD64Assembler extends Assembler {
pre = VexSimdPrefix.VEX_SIMD_NONE; pre = VexSimdPrefix.VEX_SIMD_NONE;
break; break;
} }
switch (prefix2) {
case P_0F:
opc = VexOpcode.VEX_OPCODE_0F;
break;
case P_0F38:
opc = VexOpcode.VEX_OPCODE_0F_38;
break;
case P_0F3A:
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
opc = VexOpcode.VEX_OPCODE_NONE;
break;
}
if (noNds) { if (noNds) {
asm.simdPrefix(dst, Register.None, src, pre, opc, attributes); asm.simdPrefix(dst, Register.None, src, pre, opc, attributes);
} else { } else {

@ -33,9 +33,9 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import org.graalvm.collections.EconomicSet;
import org.graalvm.compiler.core.common.CompilationIdentifier; import org.graalvm.compiler.core.common.CompilationIdentifier;
import org.graalvm.compiler.graph.NodeSourcePosition; import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.util.EconomicSet;
import jdk.vm.ci.code.DebugInfo; import jdk.vm.ci.code.DebugInfo;
import jdk.vm.ci.code.StackSlot; import jdk.vm.ci.code.StackSlot;

@ -40,7 +40,7 @@ public final class DataSection implements Iterable<Data> {
public interface Patches { public interface Patches {
void registerPatch(VMConstant c); void registerPatch(int position, VMConstant c);
} }
public abstract static class Data { public abstract static class Data {

@ -27,7 +27,6 @@ import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
import org.graalvm.compiler.core.common.NumUtil; import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.core.common.type.AbstractPointerStamp; import org.graalvm.compiler.core.common.type.AbstractPointerStamp;
import org.graalvm.compiler.core.common.type.IntegerStamp; import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.PrimitiveStamp;
import org.graalvm.compiler.debug.DebugContext; import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.nodes.NodeView; import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph; import org.graalvm.compiler.nodes.StructuredGraph;
@ -107,7 +106,9 @@ public class AMD64AddressLowering extends AddressLowering {
ret.setBase(add.getX()); ret.setBase(add.getX());
ret.setIndex(considerNegation(graph, add.getY(), isBaseNegated)); ret.setIndex(considerNegation(graph, add.getY(), isBaseNegated));
return true; return true;
} else if (ret.getBase() == null && ret.getIndex() instanceof AddNode) { }
if (ret.getBase() == null && ret.getIndex() instanceof AddNode) {
AddNode add = (AddNode) ret.getIndex(); AddNode add = (AddNode) ret.getIndex();
ret.setBase(considerNegation(graph, add.getX(), isIndexNegated)); ret.setBase(considerNegation(graph, add.getX(), isIndexNegated));
ret.setIndex(add.getY()); ret.setIndex(add.getY());
@ -188,7 +189,7 @@ public class AMD64AddressLowering extends AddressLowering {
return improveConstDisp(address, node, c, null, shift, negateExtractedDisplacement); return improveConstDisp(address, node, c, null, shift, negateExtractedDisplacement);
} else { } else {
if (node.stamp(NodeView.DEFAULT) instanceof IntegerStamp) { if (node.stamp(NodeView.DEFAULT) instanceof IntegerStamp) {
assert PrimitiveStamp.getBits(node.stamp(NodeView.DEFAULT)) == ADDRESS_BITS; assert IntegerStamp.getBits(node.stamp(NodeView.DEFAULT)) == ADDRESS_BITS;
/* /*
* we can't swallow zero-extends because of multiple reasons: * we can't swallow zero-extends because of multiple reasons:

@ -0,0 +1,98 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.amd64;
import jdk.vm.ci.code.Register;
import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.debug.CounterKey;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.nodeinfo.NodeInfo;
import org.graalvm.compiler.nodes.CompressionNode;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.FloatingNode;
import org.graalvm.compiler.nodes.spi.LIRLowerable;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_0;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_0;
public abstract class AMD64CompressAddressLowering extends AMD64AddressLowering {
private static final CounterKey counterFoldedUncompressDuringAddressLowering = DebugContext.counter("FoldedUncompressDuringAddressLowering");
@Override
protected final boolean improve(StructuredGraph graph, DebugContext debug, AMD64AddressNode addr, boolean isBaseNegated, boolean isIndexNegated) {
if (super.improve(graph, debug, addr, isBaseNegated, isIndexNegated)) {
return true;
}
if (!isBaseNegated && !isIndexNegated && addr.getScale() == AMD64Address.Scale.Times1) {
ValueNode base = addr.getBase();
ValueNode index = addr.getIndex();
if (tryToImproveUncompression(addr, index, base) || tryToImproveUncompression(addr, base, index)) {
counterFoldedUncompressDuringAddressLowering.increment(debug);
return true;
}
}
return false;
}
private boolean tryToImproveUncompression(AMD64AddressNode addr, ValueNode value, ValueNode other) {
if (value instanceof CompressionNode) {
CompressionNode compression = (CompressionNode) value;
if (compression.getOp() == CompressionNode.CompressionOp.Uncompress && improveUncompression(addr, compression, other)) {
return true;
}
}
return false;
}
protected abstract boolean improveUncompression(AMD64AddressNode addr, CompressionNode compression, ValueNode other);
@NodeInfo(cycles = CYCLES_0, size = SIZE_0)
public static class HeapBaseNode extends FloatingNode implements LIRLowerable {
public static final NodeClass<HeapBaseNode> TYPE = NodeClass.create(HeapBaseNode.class);
private final Register heapBaseRegister;
public HeapBaseNode(Register heapBaseRegister) {
super(TYPE, StampFactory.pointer());
this.heapBaseRegister = heapBaseRegister;
}
@Override
public void generate(NodeLIRBuilderTool generator) {
LIRKind kind = generator.getLIRGeneratorTool().getLIRKind(stamp(NodeView.DEFAULT));
generator.setResult(this, heapBaseRegister.asValue(kind));
}
}
}

@ -26,14 +26,14 @@ package org.graalvm.compiler.core.amd64;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD; import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.WORD; import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.WORD;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.Equivalence;
import org.graalvm.compiler.core.common.LIRKind; import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.lir.VirtualStackSlot; import org.graalvm.compiler.lir.VirtualStackSlot;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
import org.graalvm.compiler.lir.amd64.AMD64Move.AMD64PushPopStackMove; import org.graalvm.compiler.lir.amd64.AMD64Move.AMD64PushPopStackMove;
import org.graalvm.compiler.lir.framemap.FrameMapBuilder; import org.graalvm.compiler.lir.framemap.FrameMapBuilder;
import org.graalvm.compiler.lir.gen.LIRGeneratorTool.MoveFactory; import org.graalvm.compiler.lir.gen.LIRGeneratorTool.MoveFactory;
import org.graalvm.util.Equivalence;
import org.graalvm.util.EconomicMap;
import jdk.vm.ci.amd64.AMD64Kind; import jdk.vm.ci.amd64.AMD64Kind;
import jdk.vm.ci.code.Architecture; import jdk.vm.ci.code.Architecture;

@ -44,6 +44,7 @@ import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
import org.graalvm.compiler.core.common.LIRKind; import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.NumUtil; import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.calc.Condition; import org.graalvm.compiler.core.common.calc.Condition;
import org.graalvm.compiler.core.gen.NodeLIRBuilder; import org.graalvm.compiler.core.gen.NodeLIRBuilder;
import org.graalvm.compiler.core.gen.NodeMatchRules; import org.graalvm.compiler.core.gen.NodeMatchRules;
@ -128,7 +129,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
} }
protected ComplexMatchResult emitCompareBranchMemory(IfNode ifNode, CompareNode compare, ValueNode value, LIRLowerableAccess access) { protected ComplexMatchResult emitCompareBranchMemory(IfNode ifNode, CompareNode compare, ValueNode value, LIRLowerableAccess access) {
Condition cond = compare.condition(); Condition cond = compare.condition().asCondition();
AMD64Kind kind = getMemoryKind(access); AMD64Kind kind = getMemoryKind(access);
boolean matchedAsConstant = false; // For assertion checking boolean matchedAsConstant = false; // For assertion checking
@ -303,7 +304,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
@MatchRule("(If (FloatEquals=compare value ValueCompareAndSwap=cas))") @MatchRule("(If (FloatEquals=compare value ValueCompareAndSwap=cas))")
@MatchRule("(If (IntegerEquals=compare value ValueCompareAndSwap=cas))") @MatchRule("(If (IntegerEquals=compare value ValueCompareAndSwap=cas))")
public ComplexMatchResult ifCompareValueCas(IfNode root, CompareNode compare, ValueNode value, ValueCompareAndSwapNode cas) { public ComplexMatchResult ifCompareValueCas(IfNode root, CompareNode compare, ValueNode value, ValueCompareAndSwapNode cas) {
assert compare.condition() == Condition.EQ; assert compare.condition() == CanonicalCondition.EQ;
if (value == cas.getExpectedValue() && cas.usages().count() == 1) { if (value == cas.getExpectedValue() && cas.usages().count() == 1) {
return builder -> { return builder -> {
LIRKind kind = getLirKind(cas); LIRKind kind = getLirKind(cas);
@ -326,7 +327,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
@MatchRule("(If (IntegerEquals=compare value LogicCompareAndSwap=cas))") @MatchRule("(If (IntegerEquals=compare value LogicCompareAndSwap=cas))")
public ComplexMatchResult ifCompareLogicCas(IfNode root, CompareNode compare, ValueNode value, LogicCompareAndSwapNode cas) { public ComplexMatchResult ifCompareLogicCas(IfNode root, CompareNode compare, ValueNode value, LogicCompareAndSwapNode cas) {
JavaConstant constant = value.asJavaConstant(); JavaConstant constant = value.asJavaConstant();
assert compare.condition() == Condition.EQ; assert compare.condition() == CanonicalCondition.EQ;
if (constant != null && cas.usages().count() == 1) { if (constant != null && cas.usages().count() == 1) {
long constantValue = constant.asLong(); long constantValue = constant.asLong();
boolean successIsTrue; boolean successIsTrue;

@ -232,6 +232,9 @@ public final class GraalOptions {
@Option(help = "", type = OptionType.Debug) @Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptScheduleOutOfLoops = new OptionKey<>(true); public static final OptionKey<Boolean> OptScheduleOutOfLoops = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> GuardPriorities = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug) @Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptEliminateGuards = new OptionKey<>(true); public static final OptionKey<Boolean> OptEliminateGuards = new OptionKey<>(true);
@ -271,4 +274,7 @@ public final class GraalOptions {
@Option(help = "Enable experimental Trace Register Allocation.", type = OptionType.Debug) @Option(help = "Enable experimental Trace Register Allocation.", type = OptionType.Debug)
public static final OptionKey<Boolean> TraceRA = new OptionKey<>(false); public static final OptionKey<Boolean> TraceRA = new OptionKey<>(false);
@Option(help = "How to trace inlining decisions, one of: None, Linear, Tree", type = OptionType.Debug)
public static final OptionKey<TraceInliningMode> TraceInlining = new OptionKey<>(TraceInliningMode.None);
} }

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -20,29 +20,20 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.compiler.core.common;
/** public enum TraceInliningMode {
* Unmodifiable memory efficient set data structure. None(false),
*/ Linear(true),
public interface UnmodifiableEconomicSet<E> extends Iterable<E> { Tree(true);
boolean contains(E element); private final boolean tracing;
int size(); TraceInliningMode(boolean tracing) {
this.tracing = tracing;
}
boolean isEmpty(); public boolean isTracing() {
return tracing;
default E[] toArray(E[] target) {
if (target.length != size()) {
throw new UnsupportedOperationException("Length of target array must equal the size of the set.");
}
int index = 0;
for (E element : this) {
target[index++] = element;
}
return target;
} }
} }

@ -22,9 +22,9 @@
*/ */
package org.graalvm.compiler.core.common.alloc; package org.graalvm.compiler.core.common.alloc;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.Equivalence;
import org.graalvm.compiler.core.common.GraalOptions; import org.graalvm.compiler.core.common.GraalOptions;
import org.graalvm.util.EconomicMap;
import org.graalvm.util.Equivalence;
import jdk.vm.ci.code.Register; import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.Register.RegisterCategory; import jdk.vm.ci.code.Register.RegisterCategory;

@ -0,0 +1,52 @@
/*
* Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.common.calc;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.meta.PrimitiveConstant;
public enum CanonicalCondition {
EQ(Condition.EQ),
LT(Condition.LT),
BT(Condition.BT);
private final Condition condition;
CanonicalCondition(Condition condition) {
assert condition.isCanonical();
this.condition = condition;
}
public Condition asCondition() {
return condition;
}
public boolean foldCondition(Constant lt, Constant rt, ConstantReflectionProvider constantReflection, boolean unorderedIsTrue) {
return asCondition().foldCondition(lt, rt, constantReflection, unorderedIsTrue);
}
public boolean foldCondition(PrimitiveConstant lp, PrimitiveConstant rp, boolean unorderedIsTrue) {
return asCondition().foldCondition(lp, rp, unorderedIsTrue);
}
}

@ -115,6 +115,55 @@ public enum Condition {
throw new IllegalArgumentException(this.toString()); throw new IllegalArgumentException(this.toString());
} }
public static final class CanonicalizedCondition {
private final CanonicalCondition canonicalCondition;
private final boolean mirror;
private final boolean negate;
private CanonicalizedCondition(CanonicalCondition canonicalCondition, boolean mirror, boolean negate) {
this.canonicalCondition = canonicalCondition;
this.mirror = mirror;
this.negate = negate;
}
public CanonicalCondition getCanonicalCondition() {
return canonicalCondition;
}
public boolean mustMirror() {
return mirror;
}
public boolean mustNegate() {
return negate;
}
}
public CanonicalizedCondition canonicalize() {
CanonicalCondition canonicalCondition;
switch (this) {
case EQ:
case NE:
canonicalCondition = CanonicalCondition.EQ;
break;
case LT:
case LE:
case GT:
case GE:
canonicalCondition = CanonicalCondition.LT;
break;
case BT:
case BE:
case AT:
case AE:
canonicalCondition = CanonicalCondition.BT;
break;
default:
throw new IllegalArgumentException(this.toString());
}
return new CanonicalizedCondition(canonicalCondition, canonicalMirror(), canonicalNegate());
}
/** /**
* Given a condition and its negation, this method returns true for one of the two and false for * Given a condition and its negation, this method returns true for one of the two and false for
* the other one. This can be used to keep comparisons in a canonical form. * the other one. This can be used to keep comparisons in a canonical form.
@ -151,7 +200,7 @@ public enum Condition {
* Returns true if the condition needs to be mirrored to get to a canonical condition. The * Returns true if the condition needs to be mirrored to get to a canonical condition. The
* result of the mirroring operation might still need to be negated to achieve a canonical form. * result of the mirroring operation might still need to be negated to achieve a canonical form.
*/ */
public boolean canonicalMirror() { private boolean canonicalMirror() {
switch (this) { switch (this) {
case EQ: case EQ:
return false; return false;
@ -181,7 +230,7 @@ public enum Condition {
* Returns true if the condition needs to be negated to get to a canonical condition. The result * Returns true if the condition needs to be negated to get to a canonical condition. The result
* of the negation might still need to be mirrored to achieve a canonical form. * of the negation might still need to be mirrored to achieve a canonical form.
*/ */
public boolean canonicalNegate() { private boolean canonicalNegate() {
switch (this) { switch (this) {
case EQ: case EQ:
return false; return false;

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -20,32 +20,13 @@
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*/ */
package org.graalvm.util; package org.graalvm.compiler.core.common.spi;
/** import jdk.vm.ci.meta.JavaKind;
* Unmodifiable memory efficient map data structure.
*/
public interface UnmodifiableEconomicMap<K, V> {
V get(K key); public interface ArrayOffsetProvider {
default V get(K key, V defaultValue) { int arrayBaseOffset(JavaKind elementKind);
V v = get(key);
if (v == null) {
return defaultValue;
}
return v;
}
boolean containsKey(K key); int arrayScalingFactor(JavaKind elementKind);
int size();
boolean isEmpty();
Iterable<V> getValues();
Iterable<K> getKeys();
UnmodifiableMapCursor<K, V> getEntries();
} }

@ -40,4 +40,5 @@ public interface CodeGenProviders {
ConstantReflectionProvider getConstantReflection(); ConstantReflectionProvider getConstantReflection();
ArrayOffsetProvider getArrayOffsetProvider();
} }

@ -28,6 +28,9 @@ import java.util.Arrays;
import java.util.Objects; import java.util.Objects;
import java.util.function.Function; import java.util.function.Function;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaKind;
import org.graalvm.compiler.core.common.calc.FloatConvert; import org.graalvm.compiler.core.common.calc.FloatConvert;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.Add; import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.Add;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.And; import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.And;
@ -51,9 +54,6 @@ import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Not;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Sqrt; import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Sqrt;
import org.graalvm.util.CollectionsUtil; import org.graalvm.util.CollectionsUtil;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaKind;
/** /**
* Information about arithmetic operations. * Information about arithmetic operations.
*/ */

@ -25,8 +25,8 @@ package org.graalvm.compiler.core.common.util;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.graalvm.util.EconomicMap; import org.graalvm.collections.EconomicMap;
import org.graalvm.util.Equivalence; import org.graalvm.collections.Equivalence;
/** /**
* Creates an array of T objects order by the occurrence frequency of each object. The most * Creates an array of T objects order by the occurrence frequency of each object. The most

@ -58,6 +58,9 @@ import javax.tools.FileObject;
import javax.tools.JavaFileObject; import javax.tools.JavaFileObject;
import javax.tools.StandardLocation; import javax.tools.StandardLocation;
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.EconomicSet;
import org.graalvm.collections.Equivalence;
import org.graalvm.compiler.core.gen.NodeMatchRules; import org.graalvm.compiler.core.gen.NodeMatchRules;
import org.graalvm.compiler.core.match.ComplexMatchResult; import org.graalvm.compiler.core.match.ComplexMatchResult;
import org.graalvm.compiler.core.match.MatchRule; import org.graalvm.compiler.core.match.MatchRule;
@ -70,9 +73,6 @@ import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.Position; import org.graalvm.compiler.graph.Position;
import org.graalvm.compiler.nodes.ValueNode; import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.serviceprovider.ServiceProvider; import org.graalvm.compiler.serviceprovider.ServiceProvider;
import org.graalvm.util.Equivalence;
import org.graalvm.util.EconomicMap;
import org.graalvm.util.EconomicSet;
/** /**
* Processes classes annotated with {@link MatchRule}. A {@link MatchStatementSet} service is * Processes classes annotated with {@link MatchRule}. A {@link MatchStatementSet} service is

@ -29,6 +29,7 @@ import static jdk.vm.ci.sparc.SPARCKind.WORD;
import static jdk.vm.ci.sparc.SPARCKind.XWORD; import static jdk.vm.ci.sparc.SPARCKind.XWORD;
import org.graalvm.compiler.core.common.LIRKind; import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.calc.Condition; import org.graalvm.compiler.core.common.calc.Condition;
import org.graalvm.compiler.core.gen.NodeMatchRules; import org.graalvm.compiler.core.gen.NodeMatchRules;
import org.graalvm.compiler.core.match.ComplexMatchResult; import org.graalvm.compiler.core.match.ComplexMatchResult;
@ -147,7 +148,7 @@ public class SPARCNodeMatchRules extends NodeMatchRules {
@MatchRule("(If (IntegerEquals=compare value LogicCompareAndSwap=cas))") @MatchRule("(If (IntegerEquals=compare value LogicCompareAndSwap=cas))")
public ComplexMatchResult ifCompareLogicCas(IfNode root, CompareNode compare, ValueNode value, LogicCompareAndSwapNode cas) { public ComplexMatchResult ifCompareLogicCas(IfNode root, CompareNode compare, ValueNode value, LogicCompareAndSwapNode cas) {
JavaConstant constant = value.asJavaConstant(); JavaConstant constant = value.asJavaConstant();
assert compare.condition() == Condition.EQ; assert compare.condition() == CanonicalCondition.EQ;
if (constant != null && cas.usages().count() == 1) { if (constant != null && cas.usages().count() == 1) {
long constantValue = constant.asLong(); long constantValue = constant.asLong();
boolean successIsTrue; boolean successIsTrue;

@ -123,19 +123,19 @@ public class ConditionalEliminationTest11 extends ConditionalEliminationTestBase
public static int test6Snippet(int a) { public static int test6Snippet(int a) {
if ((a & 8) != 0) { if ((a & 8) != 0) {
GraalDirectives.deoptimizeAndInvalidate(); GraalDirectives.deoptimize();
} }
if ((a & 15) != 15) { if ((a & 15) != 15) {
GraalDirectives.deoptimizeAndInvalidate(); GraalDirectives.deoptimize();
} }
return 0; return 0;
} }
public static int reference6Snippet(int a) { public static int reference6Snippet(int a) {
if ((a & 8) != 0) { if ((a & 8) != 0) {
GraalDirectives.deoptimizeAndInvalidate(); GraalDirectives.deoptimize();
} }
GraalDirectives.deoptimizeAndInvalidate(); GraalDirectives.deoptimize();
return 0; return 0;
} }

@ -74,7 +74,6 @@ public class ConditionalEliminationTestBase extends GraalCompilerTest {
new IterativeConditionalEliminationPhase(canonicalizer, true).apply(graph, context); new IterativeConditionalEliminationPhase(canonicalizer, true).apply(graph, context);
canonicalizer.apply(graph, context); canonicalizer.apply(graph, context);
canonicalizer.apply(graph, context); canonicalizer.apply(graph, context);
new ConvertDeoptimizeToGuardPhase().apply(graph, context);
} catch (Throwable t) { } catch (Throwable t) {
debug.handle(t); debug.handle(t);
} }
@ -86,7 +85,6 @@ public class ConditionalEliminationTestBase extends GraalCompilerTest {
} }
canonicalizer.apply(referenceGraph, context); canonicalizer.apply(referenceGraph, context);
canonicalizer.apply(referenceGraph, context); canonicalizer.apply(referenceGraph, context);
new ConvertDeoptimizeToGuardPhase().apply(graph, context);
} catch (Throwable t) { } catch (Throwable t) {
debug.handle(t); debug.handle(t);
} }

@ -25,14 +25,12 @@ package org.graalvm.compiler.core.test;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_IGNORED; import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_IGNORED;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_IGNORED; import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_IGNORED;
import org.graalvm.compiler.nodes.NodeView;
import org.junit.Test;
import org.graalvm.compiler.api.directives.GraalDirectives; import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.graph.NodeClass; import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.loop.InductionVariable; import org.graalvm.compiler.loop.InductionVariable;
import org.graalvm.compiler.loop.LoopsData; import org.graalvm.compiler.loop.LoopsData;
import org.graalvm.compiler.nodeinfo.NodeInfo; import org.graalvm.compiler.nodeinfo.NodeInfo;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph; import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.ValueNode; import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.FloatingNode; import org.graalvm.compiler.nodes.calc.FloatingNode;
@ -42,6 +40,7 @@ import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registration; import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registration;
import org.graalvm.compiler.nodes.spi.LIRLowerable; import org.graalvm.compiler.nodes.spi.LIRLowerable;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool; import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
import org.junit.Test;
import jdk.vm.ci.meta.JavaKind; import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.ResolvedJavaMethod; import jdk.vm.ci.meta.ResolvedJavaMethod;
@ -117,6 +116,21 @@ public class CountedLoopTest extends GraalCompilerTest {
test("incrementSnippet", 0, 256, 3); test("incrementSnippet", 0, 256, 3);
} }
@Test
public void increment4() {
test("incrementSnippet", -10, Integer.MAX_VALUE, 1);
}
@Test
public void increment5() {
test("incrementSnippet", 256, 256, 1);
}
@Test
public void increment6() {
test("incrementSnippet", 257, 256, 1);
}
public static Result incrementEqSnippet(int start, int limit, int step) { public static Result incrementEqSnippet(int start, int limit, int step) {
int i; int i;
int inc = ((step - 1) & 0xFFFF) + 1; // make sure this value is always strictly positive int inc = ((step - 1) & 0xFFFF) + 1; // make sure this value is always strictly positive
@ -144,6 +158,21 @@ public class CountedLoopTest extends GraalCompilerTest {
test("incrementEqSnippet", 0, 256, 3); test("incrementEqSnippet", 0, 256, 3);
} }
@Test
public void incrementEq4() {
test("incrementEqSnippet", -10, 0, Integer.MAX_VALUE);
}
@Test
public void incrementEq5() {
test("incrementEqSnippet", 256, 256, 1);
}
@Test
public void incrementEq6() {
test("incrementEqSnippet", 257, 256, 1);
}
public static Result decrementSnippet(int start, int limit, int step) { public static Result decrementSnippet(int start, int limit, int step) {
int i; int i;
int dec = ((step - 1) & 0xFFFF) + 1; // make sure this value is always strictly positive int dec = ((step - 1) & 0xFFFF) + 1; // make sure this value is always strictly positive
@ -198,6 +227,11 @@ public class CountedLoopTest extends GraalCompilerTest {
test("decrementEqSnippet", 256, 0, 3); test("decrementEqSnippet", 256, 0, 3);
} }
@Test
public void decrementEq4() {
test("decrementEqSnippet", -10, 0, Integer.MAX_VALUE);
}
public static Result twoVariablesSnippet() { public static Result twoVariablesSnippet() {
Result ret = new Result(); Result ret = new Result();
int j = 0; int j = 0;

@ -27,10 +27,10 @@ import java.nio.file.DirectoryStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import org.graalvm.collections.EconomicMap;
import org.graalvm.compiler.debug.DebugOptions; import org.graalvm.compiler.debug.DebugOptions;
import org.graalvm.compiler.options.OptionKey; import org.graalvm.compiler.options.OptionKey;
import org.graalvm.compiler.options.OptionValues; import org.graalvm.compiler.options.OptionValues;
import org.graalvm.util.EconomicMap;
import org.junit.Test; import org.junit.Test;
/** /**

@ -561,7 +561,7 @@ public abstract class GraalCompilerTest extends GraalTest {
* @return a scheduled textual dump of {@code graph} . * @return a scheduled textual dump of {@code graph} .
*/ */
protected static String getScheduledGraphString(StructuredGraph graph) { protected static String getScheduledGraphString(StructuredGraph graph) {
SchedulePhase schedule = new SchedulePhase(SchedulingStrategy.EARLIEST); SchedulePhase schedule = new SchedulePhase(SchedulingStrategy.EARLIEST_WITH_GUARD_ORDER);
schedule.apply(graph); schedule.apply(graph);
ScheduleResult scheduleResult = graph.getLastSchedule(); ScheduleResult scheduleResult = graph.getLastSchedule();

@ -22,14 +22,14 @@
*/ */
package org.graalvm.compiler.core.test; package org.graalvm.compiler.core.test;
import org.graalvm.collections.EconomicMap;
import org.graalvm.compiler.debug.DebugContext; import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.DebugOptions;
import org.graalvm.compiler.debug.DebugContext.Scope; import org.graalvm.compiler.debug.DebugContext.Scope;
import org.graalvm.compiler.debug.DebugOptions;
import org.graalvm.compiler.nodes.StructuredGraph; import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions; import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.options.OptionKey; import org.graalvm.compiler.options.OptionKey;
import org.graalvm.compiler.options.OptionValues; import org.graalvm.compiler.options.OptionValues;
import org.graalvm.util.EconomicMap;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;

Some files were not shown because too many files have changed in this diff Show More