This commit is contained in:
Andy Herrick 2019-12-09 13:02:05 -05:00
commit ade72f4965
42 changed files with 890 additions and 559 deletions

@ -45,6 +45,7 @@ CT_MODULESOURCEPATH := $(call GetModuleSrcPath)
CT_DATA_DESCRIPTION += $(TOPDIR)/make/data/symbols/symbols
COMPILECREATESYMBOLS_ADD_EXPORTS := \
--add-exports java.base/jdk.internal=java.compiler.interim,jdk.compiler.interim \
--add-exports jdk.compiler.interim/com.sun.tools.javac.api=ALL-UNNAMED \
--add-exports jdk.compiler.interim/com.sun.tools.javac.code=ALL-UNNAMED \
--add-exports jdk.compiler.interim/com.sun.tools.javac.util=ALL-UNNAMED \
@ -58,6 +59,7 @@ $(eval $(call SetupJavaCompilation, COMPILE_CREATE_SYMBOLS, \
INCLUDES := build/tools/symbolgenerator com/sun/tools/classfile, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/create_symbols, \
ADD_JAVAC_FLAGS := $(INTERIM_LANGTOOLS_ARGS) \
--patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \
$(COMPILECREATESYMBOLS_ADD_EXPORTS), \
))

@ -7232,7 +7232,7 @@ void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, in
}
void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support");
assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);

@ -374,22 +374,41 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
__ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
__ jcc(Assembler::equal, bad_call);
#ifdef _LP64
Register tmp1 = rscratch1;
Register tmp2 = rscratch2;
#else
Register tmp1 = rax;
Register tmp2 = rcx;
__ push(tmp1);
__ push(tmp2);
#endif // _LP64
// Pointer chase to the method holder to find out if the method is concurrently unloading.
Label method_live;
__ load_method_holder_cld(rscratch1, rbx);
__ load_method_holder_cld(tmp1, rbx);
// Is it a strong CLD?
__ movl(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
__ cmpptr(rscratch2, 0);
// Is it a strong CLD?
__ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
__ jcc(Assembler::greater, method_live);
// Is it a weak but alive CLD?
__ movptr(rscratch1, Address(rscratch1, ClassLoaderData::holder_offset()));
__ resolve_weak_handle(rscratch1, rscratch2);
__ cmpptr(rscratch1, 0);
// Is it a weak but alive CLD?
__ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
__ resolve_weak_handle(tmp1, tmp2);
__ cmpptr(tmp1, 0);
__ jcc(Assembler::notEqual, method_live);
#ifndef _LP64
__ pop(tmp2);
__ pop(tmp1);
#endif
__ bind(bad_call);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(method_live);
#ifndef _LP64
__ pop(tmp2);
__ pop(tmp1);
#endif
}

@ -395,52 +395,6 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler
__ block_comment("load_reference_barrier_native { ");
}
#ifdef _LP64
void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
// Use default version
BarrierSetAssembler::c2i_entry_barrier(masm);
}
#else
void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) {
return;
}
Label bad_call;
__ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
__ jcc(Assembler::equal, bad_call);
Register tmp1 = rax;
Register tmp2 = rcx;
__ push(tmp1);
__ push(tmp2);
// Pointer chase to the method holder to find out if the method is concurrently unloading.
Label method_live;
__ load_method_holder_cld(tmp1, rbx);
// Is it a strong CLD?
__ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
__ jcc(Assembler::greater, method_live);
// Is it a weak but alive CLD?
__ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
__ resolve_weak_handle(tmp1, tmp2);
__ cmpptr(tmp1, 0);
__ jcc(Assembler::notEqual, method_live);
__ pop(tmp2);
__ pop(tmp1);
__ bind(bad_call);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(method_live);
__ pop(tmp2);
__ pop(tmp1);
}
#endif
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
if (ShenandoahStoreValEnqueueBarrier) {
storeval_barrier_impl(masm, dst, tmp);

@ -86,8 +86,6 @@ public:
Address dst, Register val, Register tmp1, Register tmp2);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void barrier_stubs_init();
};

@ -8945,34 +8945,6 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
shrl(len, 4);
jcc(Assembler::zero, L_tail_restore);
// Fold total 512 bits of polynomial on each iteration
if (VM_Version::supports_vpclmulqdq()) {
Label Parallel_loop, L_No_Parallel;
cmpl(len, 8);
jccb(Assembler::less, L_No_Parallel);
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
evmovdquq(xmm1, Address(buf, 0), Assembler::AVX_512bit);
movdl(xmm5, crc);
evpxorq(xmm1, xmm1, xmm5, Assembler::AVX_512bit);
addptr(buf, 64);
subl(len, 7);
evshufi64x2(xmm0, xmm0, xmm0, 0x00, Assembler::AVX_512bit); //propagate the mask from 128 bits to 512 bits
BIND(Parallel_loop);
fold_128bit_crc32_avx512(xmm1, xmm0, xmm5, buf, 0);
addptr(buf, 64);
subl(len, 4);
jcc(Assembler::greater, Parallel_loop);
vextracti64x2(xmm2, xmm1, 0x01);
vextracti64x2(xmm3, xmm1, 0x02);
vextracti64x2(xmm4, xmm1, 0x03);
jmp(L_fold_512b);
BIND(L_No_Parallel);
}
// Fold crc into first bytes of vector
movdqa(xmm1, Address(buf, 0));
movdl(rax, xmm1);

@ -691,7 +691,7 @@ void VM_Version::get_processor_features() {
_features &= ~CPU_AVX512BW;
_features &= ~CPU_AVX512VL;
_features &= ~CPU_AVX512_VPOPCNTDQ;
_features &= ~CPU_VPCLMULQDQ;
_features &= ~CPU_AVX512_VPCLMULQDQ;
_features &= ~CPU_VAES;
}

@ -245,7 +245,7 @@ class VM_Version : public Abstract_VM_Version {
: 1,
gfni : 1,
vaes : 1,
vpclmulqdq : 1,
avx512_vpclmulqdq : 1,
avx512_vnni : 1,
avx512_bitalg : 1,
: 1,
@ -338,7 +338,7 @@ protected:
#define CPU_FMA ((uint64_t)UCONST64(0x800000000)) // FMA instructions
#define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction
#define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
#define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
#define CPU_AVX512_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
#define CPU_VAES ((uint64_t)UCONST64(0x8000000000)) // Vector AES instructions
#define CPU_VNNI ((uint64_t)UCONST64(0x10000000000)) // Vector Neural Network Instructions
@ -561,8 +561,8 @@ enum Extended_Family {
result |= CPU_AVX512VL;
if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
result |= CPU_AVX512_VPOPCNTDQ;
if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0)
result |= CPU_VPCLMULQDQ;
if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
result |= CPU_AVX512_VPCLMULQDQ;
if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
result |= CPU_VAES;
if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0)
@ -855,7 +855,7 @@ public:
static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
static bool supports_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; }
static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; }
static bool supports_vaes() { return (_features & CPU_VAES) != 0; }
static bool supports_vnni() { return (_features & CPU_VNNI) != 0; }

@ -597,12 +597,15 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
Node* adr = access.addr().node();
Node* obj = access.base();
bool anonymous = (decorators & C2_UNSAFE_ACCESS) != 0;
bool mismatched = (decorators & C2_MISMATCHED) != 0;
bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool is_unordered = (decorators & MO_UNORDERED) != 0;
bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
bool is_mixed = !in_heap && !in_native;
bool need_cpu_mem_bar = !is_unordered || mismatched || is_mixed;
Node* top = Compile::current()->top();
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;

@ -98,6 +98,7 @@
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
@ -2366,6 +2367,10 @@ void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
_collection_set.iterate(cl);
}
void G1CollectedHeap::collection_set_par_iterate_all(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
_collection_set.par_iterate(cl, hr_claimer, worker_id, workers()->active_workers());
}
void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
_collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id, workers()->active_workers());
}
@ -4079,7 +4084,6 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
assert(free_list != NULL, "pre-condition");
if (G1VerifyBitmaps) {
MemRegion mr(hr->bottom(), hr->end());
@ -4094,7 +4098,9 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
}
hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
_policy->remset_tracker()->update_at_free(hr);
free_list->add_ordered(hr);
if (free_list != NULL) {
free_list->add_ordered(hr);
}
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
@ -4128,281 +4134,282 @@ void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
}
class G1FreeCollectionSetTask : public AbstractGangTask {
private:
// Closure applied to all regions in the collection set to do work that needs to
// be done serially in a single thread.
class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
private:
G1EvacuationInfo* _evacuation_info;
const size_t* _surviving_young_words;
// Bytes used in successfully evacuated regions before the evacuation.
size_t _before_used_bytes;
// Bytes used in unsucessfully evacuated regions before the evacuation
size_t _after_used_bytes;
size_t _bytes_allocated_in_old_since_last_gc;
size_t _failure_used_words;
size_t _failure_waste_words;
FreeRegionList _local_free_list;
// Helper class to keep statistics for the collection set freeing
class FreeCSetStats {
size_t _before_used_bytes; // Usage in regions successfully evacutate
size_t _after_used_bytes; // Usage in regions failing evacuation
size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
size_t _failure_used_words; // Live size in failed regions
size_t _failure_waste_words; // Wasted size in failed regions
size_t _rs_length; // Remembered set size
uint _regions_freed; // Number of regions freed
public:
G1SerialFreeCollectionSetClosure(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
HeapRegionClosure(),
_evacuation_info(evacuation_info),
_surviving_young_words(surviving_young_words),
_before_used_bytes(0),
_after_used_bytes(0),
_bytes_allocated_in_old_since_last_gc(0),
_failure_used_words(0),
_failure_waste_words(0),
_local_free_list("Local Region List for CSet Freeing") {
FreeCSetStats() :
_before_used_bytes(0),
_after_used_bytes(0),
_bytes_allocated_in_old_since_last_gc(0),
_failure_used_words(0),
_failure_waste_words(0),
_rs_length(0),
_regions_freed(0) { }
void merge_stats(FreeCSetStats* other) {
assert(other != NULL, "invariant");
_before_used_bytes += other->_before_used_bytes;
_after_used_bytes += other->_after_used_bytes;
_bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
_failure_used_words += other->_failure_used_words;
_failure_waste_words += other->_failure_waste_words;
_rs_length += other->_rs_length;
_regions_freed += other->_regions_freed;
}
virtual bool do_heap_region(HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
void report(G1CollectedHeap* g1h, G1EvacuationInfo* evacuation_info) {
evacuation_info->set_regions_freed(_regions_freed);
evacuation_info->increment_collectionset_used_after(_after_used_bytes);
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
g1h->clear_region_attr(r);
g1h->decrement_summary_bytes(_before_used_bytes);
g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
G1Policy *policy = g1h->policy();
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
policy->record_rs_length(_rs_length);
policy->cset_regions_freed();
}
void account_failed_region(HeapRegion* r) {
size_t used_words = r->marked_bytes() / HeapWordSize;
_failure_used_words += used_words;
_failure_waste_words += HeapRegion::GrainWords - used_words;
_after_used_bytes += r->used();
// When moving a young gen region to old gen, we "allocate" that whole
// region there. This is in addition to any already evacuated objects.
// Notify the policy about that. Old gen regions do not cause an
// additional allocation: both the objects still in the region and the
// ones already moved are accounted for elsewhere.
if (r->is_young()) {
_bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
}
void account_evacuated_region(HeapRegion* r) {
_before_used_bytes += r->used();
_regions_freed += 1;
}
void account_rs_length(HeapRegion* r) {
_rs_length += r->rem_set()->occupied();
}
};
// Closure applied to all regions in the collection set.
class FreeCSetClosure : public HeapRegionClosure {
// Helper to send JFR events for regions.
class JFREventForRegion {
EventGCPhaseParallel _event;
public:
JFREventForRegion(HeapRegion* region, uint worker_id) : _event() {
_event.set_gcId(GCId::current());
_event.set_gcWorkerId(worker_id);
if (region->is_young()) {
_event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
} else {
_event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
}
}
~JFREventForRegion() {
_event.commit();
}
};
// Helper to do timing for region work.
class TimerForRegion {
Tickspan& _time;
Ticks _start_time;
public:
TimerForRegion(Tickspan& time) : _time(time), _start_time(Ticks::now()) { }
~TimerForRegion() {
_time += Ticks::now() - _start_time;
}
};
// FreeCSetClosure members
G1CollectedHeap* _g1h;
const size_t* _surviving_young_words;
uint _worker_id;
Tickspan _young_time;
Tickspan _non_young_time;
FreeCSetStats* _stats;
void assert_in_cset(HeapRegion* r) {
assert(r->young_index_in_cset() != 0 &&
(uint)r->young_index_in_cset() <= _g1h->collection_set()->young_region_length(),
"Young index %u is wrong for region %u of type %s with %u young regions",
r->young_index_in_cset(), r->hrm_index(), r->get_type_str(), _g1h->collection_set()->young_region_length());
}
void handle_evacuated_region(HeapRegion* r) {
assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
stats()->account_evacuated_region(r);
// Free the region and and its remembered set.
_g1h->free_region(r, NULL, false /* skip_remset */, true /* skip_hot_card_cache */, true /* locked */);
}
void handle_failed_region(HeapRegion* r) {
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
stats()->account_failed_region(r);
// Update the region state due to the failed evacuation.
r->handle_evacuation_failure();
// Add region to old set, need to hold lock.
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_g1h->old_set_add(r);
}
Tickspan& timer_for_region(HeapRegion* r) {
return r->is_young() ? _young_time : _non_young_time;
}
FreeCSetStats* stats() {
return _stats;
}
public:
FreeCSetClosure(const size_t* surviving_young_words,
uint worker_id,
FreeCSetStats* stats) :
HeapRegionClosure(),
_g1h(G1CollectedHeap::heap()),
_surviving_young_words(surviving_young_words),
_worker_id(worker_id),
_young_time(),
_non_young_time(),
_stats(stats) { }
virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
JFREventForRegion event(r, _worker_id);
TimerForRegion timer(timer_for_region(r));
_g1h->clear_region_attr(r);
stats()->account_rs_length(r);
if (r->is_young()) {
assert(r->young_index_in_cset() != 0 && (uint)r->young_index_in_cset() <= g1h->collection_set()->young_region_length(),
"Young index %u is wrong for region %u of type %s with %u young regions",
r->young_index_in_cset(),
r->hrm_index(),
r->get_type_str(),
g1h->collection_set()->young_region_length());
size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
r->record_surv_words_in_group(words_survived);
}
if (!r->evacuation_failed()) {
assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
_before_used_bytes += r->used();
g1h->free_region(r,
&_local_free_list,
true, /* skip_remset */
true, /* skip_hot_card_cache */
true /* locked */);
assert_in_cset(r);
r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
} else {
r->uninstall_surv_rate_group();
r->clear_young_index_in_cset();
r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
if (r->is_young()) {
_bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
// The region is now considered to be old.
r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
size_t used_words = r->marked_bytes() / HeapWordSize;
_failure_used_words += used_words;
_failure_waste_words += HeapRegion::GrainWords - used_words;
g1h->old_set_add(r);
_after_used_bytes += r->used();
_g1h->hot_card_cache()->reset_card_counts(r);
}
if (r->evacuation_failed()) {
handle_failed_region(r);
} else {
handle_evacuated_region(r);
}
assert(!_g1h->is_on_master_free_list(r), "sanity");
return false;
}
void complete_work() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_evacuation_info->set_regions_freed(_local_free_list.length());
_evacuation_info->increment_collectionset_used_after(_after_used_bytes);
g1h->prepend_to_freelist(&_local_free_list);
g1h->decrement_summary_bytes(_before_used_bytes);
G1Policy* policy = g1h->policy();
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
void report_timing(Tickspan parallel_time) {
G1GCPhaseTimes* pt = _g1h->phase_times();
pt->record_time_secs(G1GCPhaseTimes::ParFreeCSet, _worker_id, parallel_time.seconds());
if (_young_time.value() > 0) {
pt->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, _worker_id, _young_time.seconds());
}
if (_non_young_time.value() > 0) {
pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
}
}
};
G1CollectionSet* _collection_set;
G1SerialFreeCollectionSetClosure _cl;
const size_t* _surviving_young_words;
// G1FreeCollectionSetTask members
G1CollectedHeap* _g1h;
G1EvacuationInfo* _evacuation_info;
FreeCSetStats* _worker_stats;
HeapRegionClaimer _claimer;
const size_t* _surviving_young_words;
uint _active_workers;
size_t _rs_length;
volatile jint _serial_work_claim;
struct WorkItem {
uint region_idx;
bool is_young;
bool evacuation_failed;
WorkItem(HeapRegion* r) {
region_idx = r->hrm_index();
is_young = r->is_young();
evacuation_failed = r->evacuation_failed();
}
};
volatile size_t _parallel_work_claim;
size_t _num_work_items;
WorkItem* _work_items;
void do_serial_work() {
// Need to grab the lock to be allowed to modify the old region list.
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_collection_set->iterate(&_cl);
FreeCSetStats* worker_stats(uint worker) {
return &_worker_stats[worker];
}
void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion* r = g1h->region_at(region_idx);
assert(!g1h->is_on_master_free_list(r), "sanity");
Atomic::add(&_rs_length, r->rem_set()->occupied());
if (!is_young) {
g1h->hot_card_cache()->reset_card_counts(r);
}
if (!evacuation_failed) {
r->rem_set()->clear_locked();
void report_statistics() {
// Merge the accounting
FreeCSetStats total_stats;
for (uint worker = 0; worker < _active_workers; worker++) {
total_stats.merge_stats(worker_stats(worker));
}
total_stats.report(_g1h, _evacuation_info);
}
class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
private:
size_t _cur_idx;
WorkItem* _work_items;
public:
G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
virtual bool do_heap_region(HeapRegion* r) {
_work_items[_cur_idx++] = WorkItem(r);
return false;
}
};
void prepare_work() {
G1PrepareFreeCollectionSetClosure cl(_work_items);
_collection_set->iterate(&cl);
}
void complete_work() {
_cl.complete_work();
G1Policy* policy = G1CollectedHeap::heap()->policy();
policy->record_rs_length(_rs_length);
policy->cset_regions_freed();
}
public:
G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
AbstractGangTask("G1 Free Collection Set"),
_collection_set(collection_set),
_cl(evacuation_info, surviving_young_words),
_surviving_young_words(surviving_young_words),
_rs_length(0),
_serial_work_claim(0),
_parallel_work_claim(0),
_num_work_items(collection_set->region_length()),
_work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
prepare_work();
G1FreeCollectionSetTask(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words, uint active_workers) :
AbstractGangTask("G1 Free Collection Set"),
_g1h(G1CollectedHeap::heap()),
_evacuation_info(evacuation_info),
_worker_stats(NEW_C_HEAP_ARRAY(FreeCSetStats, active_workers, mtGC)),
_claimer(active_workers),
_surviving_young_words(surviving_young_words),
_active_workers(active_workers) {
for (uint worker = 0; worker < active_workers; worker++) {
::new (&_worker_stats[worker]) FreeCSetStats();
}
}
~G1FreeCollectionSetTask() {
complete_work();
FREE_C_HEAP_ARRAY(WorkItem, _work_items);
Ticks serial_time = Ticks::now();
report_statistics();
for (uint worker = 0; worker < _active_workers; worker++) {
_worker_stats[worker].~FreeCSetStats();
}
FREE_C_HEAP_ARRAY(FreeCSetStats, _worker_stats);
_g1h->phase_times()->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
}
// Chunk size for work distribution. The chosen value has been determined experimentally
// to be a good tradeoff between overhead and achievable parallelism.
static uint chunk_size() { return 32; }
virtual void work(uint worker_id) {
G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times();
EventGCPhaseParallel event;
Ticks start = Ticks::now();
FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id));
_g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
// Claim serial work.
if (_serial_work_claim == 0) {
jint value = Atomic::add(&_serial_work_claim, 1) - 1;
if (value == 0) {
double serial_time = os::elapsedTime();
do_serial_work();
timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
}
}
// Start parallel work.
double young_time = 0.0;
bool has_young_time = false;
double non_young_time = 0.0;
bool has_non_young_time = false;
while (true) {
size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
size_t cur = end - chunk_size();
if (cur >= _num_work_items) {
break;
}
EventGCPhaseParallel event;
double start_time = os::elapsedTime();
end = MIN2(end, _num_work_items);
for (; cur < end; cur++) {
bool is_young = _work_items[cur].is_young;
do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
double end_time = os::elapsedTime();
double time_taken = end_time - start_time;
if (is_young) {
young_time += time_taken;
has_young_time = true;
event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
} else {
non_young_time += time_taken;
has_non_young_time = true;
event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
}
start_time = end_time;
}
}
if (has_young_time) {
timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
}
if (has_non_young_time) {
timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);
}
// Report the total parallel time along with some more detailed metrics.
cl.report_timing(Ticks::now() - start);
event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ParFreeCSet));
}
};
void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
_eden.clear();
double free_cset_start_time = os::elapsedTime();
// The free collections set is split up in two tasks, the first
// frees the collection set and records what regions are free,
// and the second one rebuilds the free list. This proved to be
// more efficient than adding a sorted list to another.
Ticks free_cset_start_time = Ticks::now();
{
uint const num_regions = _collection_set.region_length();
uint const num_chunks = MAX2(num_regions / G1FreeCollectionSetTask::chunk_size(), 1U);
uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
uint const num_cs_regions = _collection_set.region_length();
uint const num_workers = clamp(num_cs_regions, 1u, workers()->active_workers());
G1FreeCollectionSetTask cl(&evacuation_info, surviving_young_words, num_workers);
G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
cl.name(), num_workers, num_regions);
log_debug(gc, ergo)("Running %s using %u workers for collection set length %u (%u)",
cl.name(), num_workers, num_cs_regions, num_regions());
workers()->run_task(&cl, num_workers);
}
phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
Ticks free_cset_end_time = Ticks::now();
phase_times()->record_total_free_cset_time_ms((free_cset_end_time - free_cset_start_time).seconds() * 1000.0);
// Now rebuild the free region list.
hrm()->rebuild_free_list(workers());
phase_times()->record_total_rebuild_freelist_time_ms((Ticks::now() - free_cset_end_time).seconds() * 1000.0);
collection_set->clear();
}

@ -1201,6 +1201,11 @@ public:
void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer* hrclaimer) const;
// Iterate over all regions in the collection set in parallel.
void collection_set_par_iterate_all(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
uint worker_id);
// Iterate over all regions currently in the current collection set.
void collection_set_iterate_all(HeapRegionClosure* blk);

@ -201,6 +201,13 @@ void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
}
}
void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
uint worker_id,
uint total_workers) const {
iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id, total_workers);
}
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
assert_at_safepoint();
@ -215,18 +222,25 @@ void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
uint worker_id,
uint total_workers) const {
assert_at_safepoint();
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id, total_workers);
}
size_t len = increment_length();
if (len == 0) {
void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
size_t offset,
size_t length,
uint worker_id,
uint total_workers) const {
assert_at_safepoint();
if (length == 0) {
return;
}
size_t start_pos = (worker_id * len) / total_workers;
size_t start_pos = (worker_id * length) / total_workers;
size_t cur_pos = start_pos;
do {
uint region_idx = _collection_set_regions[cur_pos + _inc_part_start];
uint region_idx = _collection_set_regions[cur_pos + offset];
if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
HeapRegion* r = _g1h->region_at(region_idx);
bool result = cl->do_heap_region(r);
@ -234,7 +248,7 @@ void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
}
cur_pos++;
if (cur_pos == len) {
if (cur_pos == length) {
cur_pos = 0;
}
} while (cur_pos != start_pos);

@ -254,6 +254,16 @@ class G1CollectionSet {
// Select the old regions of the initial collection set and determine how many optional
// regions we might be able to evacuate in this pause.
void finalize_old_part(double time_remaining_ms);
// Iterate the part of the collection set given by the offset and length applying the given
// HeapRegionClosure. The worker_id will determine where in the part to start the iteration
// to allow for more efficient parallel iteration.
void iterate_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
size_t offset,
size_t length,
uint worker_id,
uint total_workers) const;
public:
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
~G1CollectionSet();
@ -306,6 +316,10 @@ public:
// Iterate over the entire collection set (all increments calculated so far), applying
// the given HeapRegionClosure on all of them.
void iterate(HeapRegionClosure* cl) const;
void par_iterate(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
uint worker_id,
uint total_workers) const;
void iterate_optional(HeapRegionClosure* cl) const;

@ -131,8 +131,10 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>("Parallel Redirty (ms):", max_gc_threads);
_gc_par_phases[RedirtyCards]->create_thread_work_items("Redirtied Cards:");
_gc_par_phases[ParFreeCSet] = new WorkerDataArray<double>("Parallel Free Collection Set (ms):", max_gc_threads);
_gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>("Young Free Collection Set (ms):", max_gc_threads);
_gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>("Non-Young Free Collection Set (ms):", max_gc_threads);
_gc_par_phases[RebuildFreeList] = new WorkerDataArray<double>("Parallel Rebuild Free List (ms):", max_gc_threads);
reset();
}
@ -167,6 +169,8 @@ void G1GCPhaseTimes::reset() {
_recorded_start_new_cset_time_ms = 0.0;
_recorded_total_free_cset_time_ms = 0.0;
_recorded_serial_free_cset_time_ms = 0.0;
_recorded_total_rebuild_freelist_time_ms = 0.0;
_recorded_serial_rebuild_freelist_time_ms = 0.0;
_cur_fast_reclaim_humongous_time_ms = 0.0;
_cur_region_register_time = 0.0;
_cur_fast_reclaim_humongous_total = 0;
@ -328,11 +332,11 @@ void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase, uint extra_inde
}
}
void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) const {
void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum, uint extra_indent) const {
LogTarget(Trace, gc, phases) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
log_phase(phase, 3, &ls, print_sum);
log_phase(phase, 3 + extra_indent, &ls, print_sum);
}
}
@ -456,6 +460,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
_cur_strong_code_root_purge_time_ms +
_recorded_redirty_logged_cards_time_ms +
_recorded_total_free_cset_time_ms +
_recorded_total_rebuild_freelist_time_ms +
_cur_fast_reclaim_humongous_time_ms +
_cur_expand_heap_time_ms +
_cur_string_deduplication_time_ms;
@ -492,9 +497,14 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
#endif
debug_time("Free Collection Set", _recorded_total_free_cset_time_ms);
trace_time("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
trace_phase(_gc_par_phases[YoungFreeCSet]);
trace_phase(_gc_par_phases[NonYoungFreeCSet]);
trace_time("Serial Free Collection Set", _recorded_serial_free_cset_time_ms);
trace_phase(_gc_par_phases[ParFreeCSet]);
trace_phase(_gc_par_phases[YoungFreeCSet], true, 1);
trace_phase(_gc_par_phases[NonYoungFreeCSet], true, 1);
debug_time("Rebuild Free List", _recorded_total_rebuild_freelist_time_ms);
trace_time("Serial Rebuild Free List ", _recorded_serial_rebuild_freelist_time_ms);
trace_phase(_gc_par_phases[RebuildFreeList]);
if (G1EagerReclaimHumongousObjects) {
debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
@ -566,8 +576,10 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
"StringDedupQueueFixup",
"StringDedupTableFixup",
"RedirtyCards",
"ParFreeCSet",
"YoungFreeCSet",
"NonYoungFreeCSet",
"RebuildFreeList",
"MergePSS"
//GCParPhasesSentinel only used to tell end of enum
};

@ -76,8 +76,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
StringDedupQueueFixup,
StringDedupTableFixup,
RedirtyCards,
ParFreeCSet,
YoungFreeCSet,
NonYoungFreeCSet,
RebuildFreeList,
MergePSS,
GCParPhasesSentinel
};
@ -171,6 +173,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_serial_free_cset_time_ms;
double _recorded_total_rebuild_freelist_time_ms;
double _recorded_serial_rebuild_freelist_time_ms;
double _cur_region_register_time;
double _cur_fast_reclaim_humongous_time_ms;
@ -195,7 +201,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const;
void debug_serial_phase(WorkerDataArray<double>* phase, uint extra_indent = 0) const;
void debug_phase(WorkerDataArray<double>* phase, uint extra_indent = 0) const;
void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true) const;
void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true, uint extra_indent = 0) const;
void info_time(const char* name, double value) const;
void debug_time(const char* name, double value) const;
@ -318,6 +324,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_serial_free_cset_time_ms = time_ms;
}
void record_total_rebuild_freelist_time_ms(double time_ms) {
_recorded_total_rebuild_freelist_time_ms = time_ms;
}
void record_serial_rebuild_freelist_time_ms(double time_ms) {
_recorded_serial_rebuild_freelist_time_ms = time_ms;
}
void record_register_regions(double time_ms, size_t total, size_t candidates) {
_cur_region_register_time = time_ms;
_cur_fast_reclaim_humongous_total = total;
@ -401,6 +415,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return _recorded_total_free_cset_time_ms;
}
double total_rebuild_freelist_time_ms() {
return _recorded_total_rebuild_freelist_time_ms;
}
double non_young_cset_choice_time_ms() {
return _recorded_non_young_cset_choice_time_ms;
}

@ -587,7 +587,7 @@ double G1Policy::other_time_ms(double pause_time_ms) const {
}
double G1Policy::constant_other_time_ms(double pause_time_ms) const {
return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms() - phase_times()->total_rebuild_freelist_time_ms();
}
bool G1Policy::about_to_start_mixed_phase() const {

@ -110,6 +110,19 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
}
}
void HeapRegion::handle_evacuation_failure() {
uninstall_surv_rate_group();
clear_young_index_in_cset();
set_evacuation_failed(false);
set_old();
}
void HeapRegion::unlink_from_list() {
set_next(NULL);
set_prev(NULL);
set_containing_set(NULL);
}
void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions");

@ -464,14 +464,16 @@ public:
void set_prev(HeapRegion* prev) { _prev = prev; }
HeapRegion* prev() { return _prev; }
void unlink_from_list();
// Every region added to a set is tagged with a reference to that
// set. This is used for doing consistency checking to make sure that
// the contents of a set are as they should be and it's only
// available in non-product builds.
#ifdef ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) {
assert((containing_set == NULL && _containing_set != NULL) ||
(containing_set != NULL && _containing_set == NULL),
assert((containing_set != NULL && _containing_set == NULL) ||
containing_set == NULL,
"containing_set: " PTR_FORMAT " "
"_containing_set: " PTR_FORMAT,
p2i(containing_set), p2i(_containing_set));
@ -559,6 +561,9 @@ public:
return (HeapWord *) obj >= next_top_at_mark_start();
}
// Update the region state after a failed evacuation.
void handle_evacuation_failure();
// Iterate over the objects overlapping the given memory region, applying cl
// to all references in the region. This is a helper for
// G1RemSet::refine_card*, and is tightly coupled with them.

@ -614,3 +614,80 @@ bool HeapRegionClaimer::claim_region(uint region_index) {
uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
return old_val == Unclaimed;
}
class G1RebuildFreeListTask : public AbstractGangTask {
HeapRegionManager* _hrm;
FreeRegionList* _worker_freelists;
uint _worker_chunk_size;
uint _num_workers;
public:
G1RebuildFreeListTask(HeapRegionManager* hrm, uint num_workers) :
AbstractGangTask("G1 Rebuild Free List Task"),
_hrm(hrm),
_worker_freelists(NEW_C_HEAP_ARRAY(FreeRegionList, num_workers, mtGC)),
_worker_chunk_size((_hrm->max_length() + num_workers - 1) / num_workers),
_num_workers(num_workers) {
for (uint worker = 0; worker < _num_workers; worker++) {
::new (&_worker_freelists[worker]) FreeRegionList("Appendable Worker Free List");
}
}
~G1RebuildFreeListTask() {
for (uint worker = 0; worker < _num_workers; worker++) {
_worker_freelists[worker].~FreeRegionList();
}
FREE_C_HEAP_ARRAY(FreeRegionList, _worker_freelists);
}
FreeRegionList* worker_freelist(uint worker) {
return &_worker_freelists[worker];
}
// Each worker creates a free list for a chunk of the heap. The chunks won't
// be overlapping so we don't need to do any claiming.
void work(uint worker_id) {
Ticks start_time = Ticks::now();
EventGCPhaseParallel event;
uint start = worker_id * _worker_chunk_size;
uint end = MIN2(start + _worker_chunk_size, _hrm->max_length());
// If start is outside the heap, this worker has nothing to do.
if (start > end) {
return;
}
FreeRegionList *free_list = worker_freelist(worker_id);
for (uint i = start; i < end; i++) {
HeapRegion *region = _hrm->at_or_null(i);
if (region != NULL && region->is_free()) {
// Need to clear old links to allow to be added to new freelist.
region->unlink_from_list();
free_list->add_to_tail(region);
}
}
event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::RebuildFreeList));
G1CollectedHeap::heap()->phase_times()->record_time_secs(G1GCPhaseTimes::RebuildFreeList, worker_id, (Ticks::now() - start_time).seconds());
}
};
void HeapRegionManager::rebuild_free_list(WorkGang* workers) {
// Abandon current free list to allow a rebuild.
_free_list.abandon();
uint const num_workers = clamp(max_length(), 1u, workers->active_workers());
G1RebuildFreeListTask task(this, num_workers);
log_debug(gc, ergo)("Running %s using %u workers for rebuilding free list of %u (%u) regions",
task.name(), num_workers, num_free_regions(), max_length());
workers->run_task(&task, num_workers);
// Link the partial free lists together.
Ticks serial_time = Ticks::now();
for (uint worker = 0; worker < num_workers; worker++) {
_free_list.append_ordered(task.worker_freelist(worker));
}
G1CollectedHeap::heap()->phase_times()->record_serial_rebuild_freelist_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
}

@ -172,6 +172,9 @@ public:
// Insert the given region into the free region list.
inline void insert_into_free_list(HeapRegion* hr);
// Rebuild the free region list from scratch.
void rebuild_free_list(WorkGang* workers);
// Insert the given region list into the global free region list.
void insert_list_into_free_list(FreeRegionList* list) {
_free_list.add_ordered(list);

@ -90,6 +90,12 @@ void FreeRegionList::set_unrealistically_long_length(uint len) {
_unrealistically_long_length = len;
}
void FreeRegionList::abandon() {
check_mt_safety();
clear();
verify_optional();
}
void FreeRegionList::remove_all() {
check_mt_safety();
verify_optional();
@ -112,10 +118,9 @@ void FreeRegionList::remove_all() {
verify_optional();
}
void FreeRegionList::add_ordered(FreeRegionList* from_list) {
void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
check_mt_safety();
from_list->check_mt_safety();
verify_optional();
from_list->verify_optional();
@ -138,6 +143,47 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
hr->set_containing_set(this);
}
#endif // ASSERT
}
void FreeRegionList::add_list_common_end(FreeRegionList* from_list) {
_length += from_list->length();
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void FreeRegionList::append_ordered(FreeRegionList* from_list) {
add_list_common_start(from_list);
if (from_list->is_empty()) {
return;
}
if (is_empty()) {
// Make from_list the current list.
assert_free_region_list(length() == 0 && _tail == NULL, "invariant");
_head = from_list->_head;
_tail = from_list->_tail;
} else {
// Add the from_list to the end of the current list.
assert(_tail->hrm_index() < from_list->_head->hrm_index(), "Should be sorted %u < %u",
_tail->hrm_index(), from_list->_head->hrm_index());
_tail->set_next(from_list->_head);
from_list->_head->set_prev(_tail);
_tail = from_list->_tail;
}
add_list_common_end(from_list);
}
void FreeRegionList::add_ordered(FreeRegionList* from_list) {
add_list_common_start(from_list);
if (from_list->is_empty()) {
return;
}
if (is_empty()) {
assert_free_region_list(length() == 0 && _tail == NULL, "invariant");
@ -178,11 +224,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
}
}
_length += from_list->length();
from_list->clear();
verify_optional();
from_list->verify_optional();
add_list_common_end(from_list);
}
void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {

@ -180,6 +180,10 @@ private:
inline void increase_length(uint node_index);
inline void decrease_length(uint node_index);
// Common checks for adding a list.
void add_list_common_start(FreeRegionList* from_list);
void add_list_common_end(FreeRegionList* from_list);
protected:
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
@ -202,6 +206,8 @@ public:
// Assumes that the list is ordered and will preserve that order. The order
// is determined by hrm_index.
inline void add_ordered(HeapRegion* hr);
// Same restrictions as above, but adds the region last in the list.
inline void add_to_tail(HeapRegion* region_to_add);
// Removes from head or tail based on the given argument.
HeapRegion* remove_region(bool from_head);
@ -212,10 +218,15 @@ public:
// Merge two ordered lists. The result is also ordered. The order is
// determined by hrm_index.
void add_ordered(FreeRegionList* from_list);
void append_ordered(FreeRegionList* from_list);
// It empties the list by removing all regions from it.
void remove_all();
// Abandon current free list. Requires that all regions in the current list
// are taken care of separately, to allow a rebuild.
void abandon();
// Remove all (contiguous) regions from first to first + num_regions -1 from
// this list.
// Num_regions must be > 1.

@ -50,6 +50,26 @@ inline void HeapRegionSetBase::remove(HeapRegion* hr) {
_length--;
}
inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) {
assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL && _tail->hrm_index() < region_to_add->hrm_index()),
"invariant");
// add() will verify the region and check mt safety.
add(region_to_add);
if (_head != NULL) {
// Link into list, next is already NULL, no need to set.
region_to_add->set_prev(_tail);
_tail->set_next(region_to_add);
_tail = region_to_add;
} else {
// Empty list, this region is now the list.
_head = region_to_add;
_tail = region_to_add;
}
increase_length(region_to_add->node_index());
}
inline void FreeRegionList::add_ordered(HeapRegion* hr) {
assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),

@ -43,13 +43,16 @@ void* C2ParseAccess::barrier_set_state() const {
PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
bool C2Access::needs_cpu_membar() const {
bool mismatched = (_decorators & C2_MISMATCHED) != 0;
bool mismatched = (_decorators & C2_MISMATCHED) != 0;
bool is_unordered = (_decorators & MO_UNORDERED) != 0;
bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
bool in_heap = (_decorators & IN_HEAP) != 0;
bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
bool is_read = (_decorators & C2_READ_ACCESS) != 0;
bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
bool in_heap = (_decorators & IN_HEAP) != 0;
bool in_native = (_decorators & IN_NATIVE) != 0;
bool is_mixed = !in_heap && !in_native;
bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
bool is_read = (_decorators & C2_READ_ACCESS) != 0;
bool is_atomic = is_read && is_write;
if (is_atomic) {
@ -63,9 +66,11 @@ bool C2Access::needs_cpu_membar() const {
// the barriers get omitted and the unsafe reference begins to "pollute"
// the alias analysis of the rest of the graph, either Compile::can_alias
// or Compile::must_alias will throw a diagnostic assert.)
if (!in_heap || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
if (is_mixed || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
return true;
}
} else {
assert(!is_mixed, "not unsafe");
}
return false;
@ -80,7 +85,7 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
bool in_native = (decorators & IN_NATIVE) != 0;
assert(!in_native, "not supported yet");
assert(!in_native || (unsafe && !access.is_oop()), "not supported yet");
MemNode::MemOrd mo = access.mem_node_mo();

@ -176,11 +176,11 @@ const DecoratorSet ON_DECORATOR_MASK = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
// === Access Location ===
// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
// Accesses can take place in, e.g. the heap, old or young generation, different native roots, or native memory off the heap.
// The location is important to the GC as it may imply different actions. The following decorators are used:
// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
// be omitted if this decorator is not set.
// * IN_NATIVE: The access is performed in an off-heap data structure pointing into the Java heap.
// * IN_NATIVE: The access is performed in an off-heap data structure.
const DecoratorSet IN_HEAP = UCONST64(1) << 19;
const DecoratorSet IN_NATIVE = UCONST64(1) << 20;
const DecoratorSet IN_DECORATOR_MASK = IN_HEAP | IN_NATIVE;

@ -1595,27 +1595,34 @@ static int linear_search(const Array<Method*>* methods,
bool InstanceKlass::_disable_method_binary_search = false;
int InstanceKlass::quick_search(const Array<Method*>* methods, const Symbol* name) {
NOINLINE int linear_search(const Array<Method*>* methods, const Symbol* name) {
int len = methods->length();
int l = 0;
int h = len - 1;
while (l <= h) {
Method* m = methods->at(l);
if (m->name() == name) {
return l;
}
l++;
}
return -1;
}
inline int InstanceKlass::quick_search(const Array<Method*>* methods, const Symbol* name) {
if (_disable_method_binary_search) {
assert(DynamicDumpSharedSpaces, "must be");
// At the final stage of dynamic dumping, the methods array may not be sorted
// by ascending addresses of their names, so we can't use binary search anymore.
// However, methods with the same name are still laid out consecutively inside the
// methods array, so let's look for the first one that matches.
assert(DynamicDumpSharedSpaces, "must be");
while (l <= h) {
Method* m = methods->at(l);
if (m->name() == name) {
return l;
}
l ++;
}
return -1;
return linear_search(methods, name);
}
int len = methods->length();
int l = 0;
int h = len - 1;
// methods are sorted by ascending addresses of their names, so do binary search
while (l <= h) {
int mid = (l + h) >> 1;

@ -579,7 +579,7 @@ public:
bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
private:
static int quick_search(const Array<Method*>* methods, const Symbol* name);
inline static int quick_search(const Array<Method*>* methods, const Symbol* name);
public:
static void disable_method_binary_search() {

@ -1087,7 +1087,7 @@ Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
Node* thread = _gvn.transform(new ThreadLocalNode());
Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
Node* threadObj = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), thread_type, T_OBJECT, MemNode::unordered));
tls_output = thread;
return threadObj;
}
@ -2447,10 +2447,14 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
offset = ConvL2X(offset);
adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
heap_base_oop = base;
} else if (type == T_OBJECT) {
return false; // off-heap oop accesses are not supported
if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
if (type != T_OBJECT) {
decorators |= IN_NATIVE; // off-heap primitive access
} else {
return false; // off-heap oop accesses are not supported
}
} else {
heap_base_oop = base; // on-heap or mixed access
}
// Can base be NULL? Otherwise, always on-heap access.

@ -404,22 +404,8 @@ abstract class AbstractPlainDatagramSocketImpl extends DatagramSocketImpl
ExtendedSocketOptions.getInstance();
private static final Set<SocketOption<?>> datagramSocketOptions = datagramSocketOptions();
private static final Set<SocketOption<?>> multicastSocketOptions = multicastSocketOptions();
private static Set<SocketOption<?>> datagramSocketOptions() {
HashSet<SocketOption<?>> options = new HashSet<>();
options.add(StandardSocketOptions.SO_SNDBUF);
options.add(StandardSocketOptions.SO_RCVBUF);
options.add(StandardSocketOptions.SO_REUSEADDR);
options.add(StandardSocketOptions.SO_BROADCAST);
options.add(StandardSocketOptions.IP_TOS);
if (isReusePortAvailable())
options.add(StandardSocketOptions.SO_REUSEPORT);
options.addAll(ExtendedSocketOptions.datagramSocketOptions());
return Collections.unmodifiableSet(options);
}
private static Set<SocketOption<?>> multicastSocketOptions() {
HashSet<SocketOption<?>> options = new HashSet<>();
options.add(StandardSocketOptions.SO_SNDBUF);
options.add(StandardSocketOptions.SO_RCVBUF);
@ -437,9 +423,6 @@ abstract class AbstractPlainDatagramSocketImpl extends DatagramSocketImpl
@Override
protected Set<SocketOption<?>> supportedOptions() {
if (isMulticast)
return multicastSocketOptions;
else
return datagramSocketOptions;
}

@ -29,17 +29,15 @@ import java.io.IOException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Set;
import java.net.PortUnreachableException;
/**
* The multicast datagram socket class is useful for sending
* and receiving IP multicast packets. A MulticastSocket is
* and receiving IP multicast packets. A MulticastSocket is
* a (UDP) DatagramSocket, with additional capabilities for
* joining "groups" of other multicast hosts on the internet.
* <P>
* A multicast group is specified by a class D IP address
* and by a standard UDP port number. Class D IP addresses
* are in the range <CODE>224.0.0.0</CODE> to <CODE>239.255.255.255</CODE>,
* are in the range {@code 224.0.0.0} to {@code 239.255.255.255},
* inclusive. The address 224.0.0.0 is reserved and should not be used.
* <P>
* One would join a multicast group by first creating a MulticastSocket
@ -50,9 +48,12 @@ import java.net.PortUnreachableException;
* // join a Multicast group and send the group salutations
* ...
* String msg = "Hello";
* InetAddress group = InetAddress.getByName("228.5.6.7");
* InetAddress mcastaddr = InetAddress.getByName("228.5.6.7");
* InetSocketAddress group = new InetSocketAddress(mcastaddr, port);
* NetworkInterface netIf = NetworkInterface.getByName("bge0");
* MulticastSocket s = new MulticastSocket(6789);
* s.joinGroup(group);
*
* s.joinGroup(group, netIf);
* byte[] msgBytes = msg.getBytes(StandardCharsets.UTF_8);
* DatagramPacket hi = new DatagramPacket(msgBytes, msgBytes.length,
* group, 6789);
@ -63,25 +64,24 @@ import java.net.PortUnreachableException;
* s.receive(recv);
* ...
* // OK, I'm done talking - leave the group...
* s.leaveGroup(group);
* s.leaveGroup(group, netIf);
* </PRE>
*
* When one sends a message to a multicast group, <B>all</B> subscribing
* recipients to that host and port receive the message (within the
* time-to-live range of the packet, see below). The socket needn't
* time-to-live range of the packet, see below). The socket needn't
* be a member of the multicast group to send messages to it.
* <P>
* When a socket subscribes to a multicast group/port, it receives
* datagrams sent by other hosts to the group/port, as do all other
* members of the group and port. A socket relinquishes membership
* in a group by the leaveGroup(InetAddress addr) method. <B>
* Multiple MulticastSocket's</B> may subscribe to a multicast group
* in a group by the leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf)
* method.
* <B>Multiple MulticastSockets</B> may subscribe to a multicast group
* and port concurrently, and they will all receive group datagrams.
* <P>
* Currently applets are not allowed to use multicast sockets.
*
* @author Pavani Diwanji
* @since 1.1
* @since 1.1
*/
public class MulticastSocket extends DatagramSocket {
@ -287,20 +287,21 @@ public class MulticastSocket extends DatagramSocket {
* {@code setInterface} or {@code setNetworkInterface}.
*
* <p>If there is a security manager, this method first
* calls its {@code checkMulticast} method
* with the {@code mcastaddr} argument
* as its argument.
* calls its {@code checkMulticast} method with the
* {@code mcastaddr} argument as its argument.
*
* @param mcastaddr is the multicast address to join
*
* @throws IOException if there is an error joining, or when the address
* is not a multicast address, or the platform does not support
* multicasting
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the join.
*
* @see SecurityManager#checkMulticast(InetAddress)
* @param mcastaddr is the multicast address to join
* @throws IOException if there is an error joining,
* or when the address is not a multicast address,
* or the platform does not support multicasting
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the join.
* @deprecated This method does not accept the network interface on
* which to join the multicast group. Use
* {@link #joinGroup(SocketAddress, NetworkInterface)} instead.
* @see SecurityManager#checkMulticast(InetAddress)
*/
@Deprecated(since="14")
public void joinGroup(InetAddress mcastaddr) throws IOException {
if (isClosed()) {
throw new SocketException("Socket is closed");
@ -334,18 +335,20 @@ public class MulticastSocket extends DatagramSocket {
* {@code setInterface} or {@code setNetworkInterface}.
*
* <p>If there is a security manager, this method first
* calls its {@code checkMulticast} method
* with the {@code mcastaddr} argument
* as its argument.
* calls its {@code checkMulticast} method with the
* {@code mcastaddr} argument as its argument.
*
* @param mcastaddr is the multicast address to leave
* @throws IOException if there is an error leaving
* or when the address is not a multicast address.
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the operation.
*
* @see SecurityManager#checkMulticast(InetAddress)
* @param mcastaddr is the multicast address to leave
* @throws IOException if there is an error leaving
* or when the address is not a multicast address.
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the operation.
* @deprecated This method does not accept the network interface on which
* to leave the multicast group. Use
* {@link #leaveGroup(SocketAddress, NetworkInterface)} instead.
* @see SecurityManager#checkMulticast(InetAddress)
*/
@Deprecated(since="14")
public void leaveGroup(InetAddress mcastaddr) throws IOException {
if (isClosed()) {
throw new SocketException("Socket is closed");
@ -372,22 +375,23 @@ public class MulticastSocket extends DatagramSocket {
* with the {@code mcastaddr} argument
* as its argument.
*
* @param mcastaddr is the multicast address to join
* @param netIf specifies the local interface to receive multicast
* datagram packets, or <i>null</i> to defer to the interface set by
* {@link MulticastSocket#setInterface(InetAddress)} or
* {@link MulticastSocket#setNetworkInterface(NetworkInterface)}
*
* @throws IOException if there is an error joining, or when the address
* is not a multicast address, or the platform does not support
* multicasting
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the join.
* @throws IllegalArgumentException if mcastaddr is null or is a
* SocketAddress subclass not supported by this socket
*
* @see SecurityManager#checkMulticast(InetAddress)
* @since 1.4
* @param mcastaddr is the multicast address to join
* @param netIf specifies the local interface to receive multicast
* datagram packets, or {@code null} to defer to the interface set by
* {@link MulticastSocket#setInterface(InetAddress)} or
* {@link MulticastSocket#setNetworkInterface(NetworkInterface)}.
* If {@code null}, and no interface has been set, the behaviour is
* unspecified: any interface may be selected or the operation may fail
* with a {@code SocketException}.
* @throws IOException if there is an error joining, or when the address
* is not a multicast address, or the platform does not support
* multicasting
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the join.
* @throws IllegalArgumentException if mcastaddr is {@code null} or is a
* SocketAddress subclass not supported by this socket
* @see SecurityManager#checkMulticast(InetAddress)
* @since 1.4
*/
public void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf)
throws IOException {
@ -417,24 +421,25 @@ public class MulticastSocket extends DatagramSocket {
* Leave a multicast group on a specified local interface.
*
* <p>If there is a security manager, this method first
* calls its {@code checkMulticast} method
* with the {@code mcastaddr} argument
* as its argument.
* calls its {@code checkMulticast} method with the
* {@code mcastaddr} argument as its argument.
*
* @param mcastaddr is the multicast address to leave
* @param netIf specifies the local interface or <i>null</i> to defer
* to the interface set by
* {@link MulticastSocket#setInterface(InetAddress)} or
* {@link MulticastSocket#setNetworkInterface(NetworkInterface)}
* @throws IOException if there is an error leaving
* or when the address is not a multicast address.
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the operation.
* @throws IllegalArgumentException if mcastaddr is null or is a
* SocketAddress subclass not supported by this socket
*
* @see SecurityManager#checkMulticast(InetAddress)
* @since 1.4
* @param mcastaddr is the multicast address to leave
* @param netIf specifies the local interface or {@code null} to defer
* to the interface set by
* {@link MulticastSocket#setInterface(InetAddress)} or
* {@link MulticastSocket#setNetworkInterface(NetworkInterface)}.
* If {@code null}, and no interface has been set, the behaviour
* is unspecified: any interface may be selected or the operation
* may fail with a {@code SocketException}.
* @throws IOException if there is an error leaving or when the address
* is not a multicast address.
* @throws SecurityException if a security manager exists and its
* {@code checkMulticast} method doesn't allow the operation.
* @throws IllegalArgumentException if mcastaddr is {@code null} or is a
* SocketAddress subclass not supported by this socket.
* @see SecurityManager#checkMulticast(InetAddress)
* @since 1.4
*/
public void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf)
throws IOException {
@ -464,11 +469,16 @@ public class MulticastSocket extends DatagramSocket {
* Set the multicast network interface used by methods
* whose behavior would be affected by the value of the
* network interface. Useful for multihomed hosts.
* @param inf the InetAddress
* @throws SocketException if there is an error in
* the underlying protocol, such as a TCP error.
* @see #getInterface()
*
* @param inf the InetAddress
* @throws SocketException if there is an error in
* the underlying protocol, such as a TCP error.
* @deprecated The InetAddress may not uniquely identify
* the network interface. Use
* {@link #setNetworkInterface(NetworkInterface)} instead.
* @see #getInterface()
*/
@Deprecated(since="14")
public void setInterface(InetAddress inf) throws SocketException {
if (isClosed()) {
throw new SocketException("Socket is closed");
@ -485,15 +495,18 @@ public class MulticastSocket extends DatagramSocket {
* Retrieve the address of the network interface used for
* multicast packets.
*
* @return An {@code InetAddress} representing
* the address of the network interface used for
* multicast packets.
*
* @throws SocketException if there is an error in
* the underlying protocol, such as a TCP error.
*
* @see #setInterface(java.net.InetAddress)
* @return An {@code InetAddress} representing the address
* of the network interface used for multicast packets,
* or if no interface has been set, an {@code InetAddress}
* representing any local address.
* @throws SocketException if there is an error in the
* underlying protocol, such as a TCP error.
* @deprecated The network interface may not be uniquely identified by
* the InetAddress returned.
* Use {@link #getNetworkInterface()} instead.
* @see #setInterface(java.net.InetAddress)
*/
@Deprecated(since="14")
public InetAddress getInterface() throws SocketException {
if (isClosed()) {
throw new SocketException("Socket is closed");
@ -567,11 +580,13 @@ public class MulticastSocket extends DatagramSocket {
/**
* Get the multicast network interface set.
*
* @throws SocketException if there is an error in
* the underlying protocol, such as a TCP error.
* @return the multicast {@code NetworkInterface} currently set
* @see #setNetworkInterface(NetworkInterface)
* @since 1.4
* @throws SocketException if there is an error in
* the underlying protocol, such as a TCP error.
* @return The multicast {@code NetworkInterface} currently set. A placeholder
* NetworkInterface is returned when there is no interface set; it has
* a single InetAddress to represent any local address.
* @see #setNetworkInterface(NetworkInterface)
* @since 1.4
*/
public NetworkInterface getNetworkInterface() throws SocketException {
NetworkInterface ni
@ -594,11 +609,17 @@ public class MulticastSocket extends DatagramSocket {
* <p>Because this option is a hint, applications that want to
* verify what loopback mode is set to should call
* {@link #getLoopbackMode()}
* @param disable {@code true} to disable the LoopbackMode
* @throws SocketException if an error occurs while setting the value
* @since 1.4
* @see #getLoopbackMode
* @param disable {@code true} to disable the LoopbackMode
* @throws SocketException if an error occurs while setting the value
* @since 1.4
* @deprecated Use {@link #setOption(SocketOption, Object)} with
* {@link java.net.StandardSocketOptions#IP_MULTICAST_LOOP}
* instead. The loopback mode is enabled by default,
* {@code MulticastSocket.setOption(StandardSocketOptions.IP_MULTICAST_LOOP, false)}
* disables it.
* @see #getLoopbackMode
*/
@Deprecated(since="14")
public void setLoopbackMode(boolean disable) throws SocketException {
getImpl().setOption(SocketOptions.IP_MULTICAST_LOOP, Boolean.valueOf(disable));
}
@ -606,11 +627,15 @@ public class MulticastSocket extends DatagramSocket {
/**
* Get the setting for local loopback of multicast datagrams.
*
* @throws SocketException if an error occurs while getting the value
* @return true if the LoopbackMode has been disabled
* @since 1.4
* @see #setLoopbackMode
* @throws SocketException if an error occurs while getting the value
* @return true if the LoopbackMode has been disabled
* @since 1.4
* @deprecated Use {@link #getOption(SocketOption)} with
* {@link java.net.StandardSocketOptions#IP_MULTICAST_LOOP}
* instead.
* @see #setLoopbackMode
*/
@Deprecated(since="14")
public boolean getLoopbackMode() throws SocketException {
return ((Boolean)getImpl().getOption(SocketOptions.IP_MULTICAST_LOOP)).booleanValue();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -291,7 +291,6 @@ public final class StandardSocketOptions {
* is system dependent.
*
* @see java.nio.channels.MulticastChannel
* @see MulticastSocket#setInterface
*/
public static final SocketOption<NetworkInterface> IP_MULTICAST_IF =
new StdSocketOption<NetworkInterface>("IP_MULTICAST_IF", NetworkInterface.class);
@ -343,7 +342,6 @@ public final class StandardSocketOptions {
* binding the socket is system dependent.
*
* @see java.nio.channels.MulticastChannel
* @see MulticastSocket#setLoopbackMode
*/
public static final SocketOption<Boolean> IP_MULTICAST_LOOP =
new StdSocketOption<Boolean>("IP_MULTICAST_LOOP", Boolean.class);

@ -25,9 +25,14 @@
package java.net;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import jdk.internal.access.SharedSecrets;
import jdk.internal.access.JavaIOFileDescriptorAccess;
import sun.net.ext.ExtendedSocketOptions;
/**
* This class defines the plain DatagramSocketImpl that is used on
* Windows platforms greater than or equal to Windows Vista. These
@ -230,6 +235,19 @@ class DualStackPlainDatagramSocketImpl extends AbstractPlainDatagramSocketImpl
return returnValue;
}
@Override
protected Set<SocketOption<?>> supportedOptions() {
HashSet<SocketOption<?>> options = new HashSet<>();
options.add(StandardSocketOptions.SO_SNDBUF);
options.add(StandardSocketOptions.SO_RCVBUF);
options.add(StandardSocketOptions.SO_REUSEADDR);
options.add(StandardSocketOptions.SO_BROADCAST);
options.add(StandardSocketOptions.IP_TOS);
options.addAll(ExtendedSocketOptions.datagramSocketOptions());
return Collections.unmodifiableSet(options);
}
/* Multicast specific methods.
* Multicasting on a dual layer TCP/IP stack is always done with
* TwoStacksPlainDatagramSocketImpl. This is to overcome the lack

@ -106,10 +106,19 @@ public final class KerberosPrincipal
*
* <p>If the input name does not contain a realm, the default realm
* is used. The default realm can be specified either in a Kerberos
* configuration file or via the java.security.krb5.realm
* configuration file or via the {@systemproperty java.security.krb5.realm}
* system property. For more information, see the
* {@extLink security_guide_jgss_tutorial Kerberos Requirements}.
* Additionally, if a security manager is
*
* <p>Note that when this class or any other Kerberos-related class is
* initially loaded and initialized, it may read and cache the default
* realm from the Kerberos configuration file or via the
* java.security.krb5.realm system property (the value will be empty if
* no default realm is specified), such that any subsequent calls to set
* or change the default realm by setting the java.security.krb5.realm
* system property may be ignored.
*
* <p>Additionally, if a security manager is
* installed, a {@link ServicePermission} must be granted and the service
* principal of the permission must minimally be inside the
* {@code KerberosPrincipal}'s realm. For example, if the result of
@ -146,10 +155,19 @@ public final class KerberosPrincipal
*
* <p>If the input name does not contain a realm, the default realm
* is used. The default realm can be specified either in a Kerberos
* configuration file or via the java.security.krb5.realm
* configuration file or via the {@systemproperty java.security.krb5.realm}
* system property. For more information, see the
* {@extLink security_guide_jgss_tutorial Kerberos Requirements}.
* Additionally, if a security manager is
*
* <p>Note that when this class or any other Kerberos-related class is
* initially loaded and initialized, it may read and cache the default
* realm from the Kerberos configuration file or via the
* java.security.krb5.realm system property (the value will be empty if
* no default realm is specified), such that any subsequent calls to set
* or change the default realm by setting the java.security.krb5.realm
* system property may be ignored.
*
* <p>Additionally, if a security manager is
* installed, a {@link ServicePermission} must be granted and the service
* principal of the permission must minimally be inside the
* {@code KerberosPrincipal}'s realm. For example, if the result of

@ -38,7 +38,12 @@ main.opt.version=\
\ --version Version information
main.opt.module-path=\
\ -p, --module-path <path> Module path
\ -p, --module-path <path> Module path.\n\
\ If not specified, the JDK's jmods directory\n\
\ will be used, if it exists. If specified,\n\
\ but it does not contain the java.base module,\n\
\ the JDK's jmods directory will be added,\n\
\ if it exists.
main.opt.add-modules=\
\ --add-modules <mod>[,<mod>...] Root modules to resolve
@ -115,7 +120,7 @@ err.launcher.main.class.empty:launcher main class name cannot be empty: {0}
err.launcher.module.name.empty:launcher module name cannot be empty: {0}
err.launcher.value.format:launcher value should be of form <command>=<module>[/<main-class>]: {0}
err.output.must.be.specified:--output must be specified
err.modulepath.must.be.specified:--module-path must be specified
err.modulepath.must.be.specified:--module-path is not specified and this runtime image does not contain jmods directory.
err.mods.must.be.specified:no modules specified to {0}
err.path.not.found=path not found: {0}
err.path.not.valid=invalid path: {0}

@ -145,9 +145,15 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Prepare Heap Roots", Level.DEBUG),
// Free CSet
new LogMessageWithLevel("Free Collection Set", Level.DEBUG),
new LogMessageWithLevel("Free Collection Set Serial", Level.TRACE),
new LogMessageWithLevel("Serial Free Collection Set", Level.TRACE),
new LogMessageWithLevel("Parallel Free Collection Set", Level.TRACE),
new LogMessageWithLevel("Young Free Collection Set", Level.TRACE),
new LogMessageWithLevel("Non-Young Free Collection Set", Level.TRACE),
// Rebuild Free List
new LogMessageWithLevel("Rebuild Free List", Level.DEBUG),
new LogMessageWithLevel("Serial Rebuild Free List", Level.TRACE),
new LogMessageWithLevel("Parallel Rebuild Free List", Level.TRACE),
// Humongous Eager Reclaim
new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
// Merge PSS

@ -112,35 +112,31 @@ public class ClhsdbCDSCore {
if (Platform.isOSX()) {
File coresDir = new File("/cores");
if (!coresDir.isDirectory()) {
throw new Error("cores is not a directory");
cleanup();
throw new Error(coresDir + " is not a directory");
}
// the /cores directory is usually not writable on macOS 10.15
final String osVersion = System.getProperty("os.version");
if (osVersion == null) {
throw new Error("Cannot query the 'os.version' property!");
}
if (!coresDir.canWrite()) {
if (osVersion.startsWith("10.15")) {
throw new SkippedException("/cores is not writable");
} else {
throw new Error("cores does not have write permissions");
}
cleanup();
throw new SkippedException("Directory \"" + coresDir +
"\" is not writable");
}
} else if (Platform.isLinux()) {
// Check if a crash report tool is installed.
File corePatternFile = new File(CORE_PATTERN_FILE_NAME);
Scanner scanner = new Scanner(corePatternFile);
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
line = line.trim();
System.out.println(line);
if (line.startsWith("|")) {
System.out.println(
"\nThis system uses a crash report tool ($cat /proc/sys/kernel/core_pattern).\n" +
"Core files might not be generated. Please reset /proc/sys/kernel/core_pattern\n" +
"to enable core generation. Skipping this test.");
cleanup();
throw new SkippedException("This system uses a crash report tool");
try (Scanner scanner = new Scanner(corePatternFile)) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
line = line.trim();
System.out.println(line);
if (line.startsWith("|")) {
System.out.println(
"\nThis system uses a crash report tool ($cat /proc/sys/kernel/core_pattern).\n" +
"Core files might not be generated. Please reset /proc/sys/kernel/core_pattern\n" +
"to enable core generation. Skipping this test.");
cleanup();
throw new SkippedException("This system uses a crash report tool");
}
}
}
}

@ -45,7 +45,7 @@
*
* @requires vm.debug != true
*
* @run main/othervm
* @run main/othervm/timeout=300
* -XX:ReservedCodeCacheSize=100m
* vm.mlvm.meth.stress.compiler.deoptimize.Test
* -threadsPerCpu 4
@ -67,7 +67,7 @@
*
* @requires vm.debug == true
*
* @run main/othervm
* @run main/othervm/timeout=300
* -XX:ReservedCodeCacheSize=100m
* vm.mlvm.meth.stress.compiler.deoptimize.Test
* -threadsPerCpu 2

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
/**
* @test
* @bug 6916202 7041125
* @library /test/lib
* @summary More cases of invalid ldap filters accepted and processed
* LDAP API does not catch malformed filters that contain two operands
* for the ! operator
@ -87,12 +88,16 @@
import java.io.*;
import javax.naming.*;
import javax.naming.directory.*;
import java.util.Properties;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.Hashtable;
import java.net.Socket;
import java.net.ServerSocket;
import jdk.test.lib.net.URIBuilder;
public class InvalidLdapFilters {
// Should we run the client or server in a separate thread?
//
@ -111,9 +116,13 @@ public class InvalidLdapFilters {
// If the server prematurely exits, serverReady will be set to true
// to avoid infinite hangs.
void doServerSide() throws Exception {
ServerSocket serverSock = new ServerSocket(serverPort);
ServerSocket serverSock = new ServerSocket();
SocketAddress sockAddr = new InetSocketAddress(
InetAddress.getLoopbackAddress(), serverPort);
// Bind server socket
serverSock.bind(sockAddr);
// signal client, it's ready to accecpt connection
// signal client, it's ready to accept connection
serverPort = serverSock.getLocalPort();
serverReady = true;
@ -160,10 +169,16 @@ public class InvalidLdapFilters {
}
// set up the environment for creating the initial context
Hashtable<Object, Object> env = new Hashtable<Object, Object>();
Hashtable<Object, Object> env = new Hashtable<>();
env.put(Context.INITIAL_CONTEXT_FACTORY,
"com.sun.jndi.ldap.LdapCtxFactory");
env.put(Context.PROVIDER_URL, "ldap://localhost:" + serverPort);
String providerUrl = URIBuilder.newBuilder()
.scheme("ldap")
.loopback()
.port(serverPort)
.build()
.toString();
env.put(Context.PROVIDER_URL, providerUrl);
env.put("com.sun.jndi.ldap.read.timeout", "1000");
// env.put(Context.SECURITY_AUTHENTICATION, "simple");

@ -25,10 +25,12 @@
* @test
* @bug 4361783
* @key intermittent
* @summary Test to see if ICMP Port Unreachable on non-connected
* DatagramSocket causes a SocketException "socket closed"
* exception on Windows 2000.
* @summary Test to see if ICMP Port Unreachable on non-connected
* DatagramSocket causes a SocketException "socket closed"
* exception on Windows 2000.
* @run main/othervm PortUnreachable
*/
import java.net.BindException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
@ -56,6 +58,7 @@ public class PortUnreachable {
b = "Greetings from the server".getBytes();
packet = new DatagramPacket(b, b.length, addr, clientPort);
sock.send(packet);
Thread.sleep(500); // give time to the kernel to send packet
sock.close();
} catch (Exception e) {
e.printStackTrace();
@ -70,15 +73,15 @@ public class PortUnreachable {
serverPort);
// it's possible that this method intermittently fails, if some other
// process running on the machine grabs the port we want before us,
// and doesn't release it before the 5 * 500 ms are elapsed...
// and doesn't release it before the 10 * 500 ms are elapsed...
while (serverSocket == null) {
try {
serverSocket = new DatagramSocket(serverPort, InetAddress.getLocalHost());
} catch (BindException bEx) {
if (retryCount++ < 5) {
sleeptime += sleepAtLeast(500);
if (retryCount++ < 10) {
sleeptime += sleepAtLeast(500);
} else {
System.out.println("Give up after 5 retries and " + sleeptime(sleeptime));
System.out.println("Give up after 10 retries and " + sleeptime(sleeptime));
System.out.println("Has some other process grabbed port " + serverPort + "?");
throw bEx;
}
@ -154,6 +157,7 @@ public class PortUnreachable {
clientSock.send(packet);
serverSend();
// try to receive
b = new byte[25];
packet = new DatagramPacket(b, b.length, addr, serverPort);
@ -166,8 +170,20 @@ public class PortUnreachable {
}
public static void main(String[] args) throws Exception {
PortUnreachable test = new PortUnreachable();
test.execute();
}
// A BindException might be thrown intermittently. In that case retry
// 3 times before propagating the exception to finish execution.
int catchCount = 0;
while (true) {
try {
PortUnreachable test = new PortUnreachable();
test.execute();
return;
} catch (BindException bEx) {
if (++catchCount > 3) {
throw bEx;
}
}
}
}
}

@ -0,0 +1,58 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8234148
* @library /test/lib
* @summary checks that the DatagramSocket supportedOptions set contains all
* MulticastSocket socket options
* @run testng SupportedOptionsCheck
*/
import jdk.test.lib.Platform;
import org.testng.annotations.Test;
import java.net.DatagramSocket;
import java.net.StandardSocketOptions;
import java.util.Set;
import static org.testng.Assert.assertTrue;
public class SupportedOptionsCheck {
@Test
public void checkMulticastOptionsAreReturned() throws Exception {
try (DatagramSocket ds = new DatagramSocket())
{
Set<?> options = ds.supportedOptions();
Set<?> multicastOptions = Set.of(
StandardSocketOptions.IP_MULTICAST_IF,
StandardSocketOptions.IP_MULTICAST_TTL,
StandardSocketOptions.IP_MULTICAST_LOOP);
if (!Platform.isWindows())
assertTrue(options.containsAll(multicastOptions));
}
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,7 +22,8 @@
*/
import java.io.IOException;
import java.net.*;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.Set;
import static java.lang.System.out;
import jdk.test.lib.net.IPSupport;
@ -54,39 +55,24 @@ public class SupportedOptionsSet {
static void first() throws IOException {
try (Socket s = new Socket();
ServerSocket ss = new ServerSocket();
DatagramSocket ds = new DatagramSocket();
MulticastSocket ms = new MulticastSocket()) {
ServerSocket ss = new ServerSocket())
{
Set<?> first = s.supportedOptions();
Set<?> second = ss.supportedOptions();
assertNotEqual(first, second,
"Socket and ServerSocket should have different options.");
first = ds.supportedOptions();
second = ms.supportedOptions();
assertNotEqual(first, second,
"DatagramSocket and MulticastSocket should have different options.");
}
}
/** Tests with the order of access to supportedOptions reversed. */
static void second() throws IOException {
try (ServerSocket ss = new ServerSocket();
Socket s = new Socket();
DatagramSocket ds = new DatagramSocket();
MulticastSocket ms = new MulticastSocket()) {
Socket s = new Socket())
{
Set<?> first = ss.supportedOptions();
Set<?> second = s.supportedOptions();
assertNotEqual(first, second,
"ServerSocket and Socket should have different options.");
first = ms.supportedOptions();
second = ds.supportedOptions();
assertNotEqual(first, second,
"MulticastSocket and DatagramSocket should have different options.");
}
}

@ -110,8 +110,10 @@ public class TestG1ParallelPhases {
"StringDedupQueueFixup",
"StringDedupTableFixup",
"RedirtyCards",
"ParFreeCSet",
"NonYoungFreeCSet",
"YoungFreeCSet"
"YoungFreeCSet",
"RebuildFreeList"
);
// Some GC phases may or may not occur depending on environment. Filter them out