8205993: ZGC: Fix typos and incorrect indentations

Reviewed-by: stefank
This commit is contained in:
Per Lidén 2018-06-28 12:14:37 +02:00
parent c61ba730c1
commit 6be8921d13
30 changed files with 62 additions and 61 deletions

@ -170,7 +170,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
log_debug(gc, init)("Failed to create anonymous file in %s (%s)", path.get(),
(err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
@ -224,7 +224,7 @@ int ZBackingFile::create_fd(const char* name) const {
return fd;
}
log_debug(gc, init)("Falling back to searching for an accessible moint point");
log_debug(gc, init)("Falling back to searching for an accessible mount point");
}
return create_file_fd(name);

@ -31,11 +31,11 @@
#include <sys/syscall.h>
#ifndef MPOL_F_NODE
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_NODE (1<<0) // Return next IL mode instead of node mask
#endif
#ifndef MPOL_F_ADDR
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#define MPOL_F_ADDR (1<<1) // Look up VMA using address
#endif
static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {

@ -292,7 +292,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
}
void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
// change to that barrier may affect a dominated barrier so re-push those
// Change to that barrier may affect a dominated barrier so re-push those
Node* val = in(LoadBarrierNode::Oop);
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
@ -526,7 +526,7 @@ Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
phi_mem2->init_req(1, scmemproj2);
kit->set_memory(phi_mem2, alias_idx);
// Merge outer flow - then check if first cas succeded
// Merge outer flow - then check if first CAS succeeded
region->set_req(1, then);
region->set_req(2, region2);
phi->set_req(1, kit->intcon(1));
@ -573,7 +573,7 @@ Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
Node* region2 = new RegionNode(3);
Node* phi2 = new PhiNode(region2, adr_type);
// Check if cmpx succeded
// Check if cmpx succeeded
Node* cmp = gvn.transform(new CmpPNode(cmpx, in_expected));
Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
IfNode* iff = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
@ -610,7 +610,7 @@ Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
phi2->set_req(1, cmpx2);
phi2->set_req(2, barrierdata);
// Merge outer flow - then check if first cas succeded
// Merge outer flow - then check if first cas succeeded
region->set_req(1, then);
region->set_req(2, region2);
phi->set_req(1, cmpx);
@ -802,7 +802,7 @@ void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrier
}
}
// Basic loadbarrier using conventional arg passing
// Basic loadbarrier using conventional argument passing
void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
PhaseIterGVN &igvn = phase->igvn();
@ -862,7 +862,7 @@ void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrie
if (barrier->is_writeback()) {
call->init_req(TypeFunc::Parms+1, in_adr);
} else {
// when slow path is called with a null adr, the healed oop will not be written back
// When slow path is called with a null address, the healed oop will not be written back
call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
}
call = igvn.transform(call);
@ -877,7 +877,7 @@ void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrie
result_region = igvn.transform(result_region);
result_val = igvn.transform(result_val);
if (out_ctrl != NULL) { // added if cond
if (out_ctrl != NULL) { // Added if cond
igvn.replace_node(out_ctrl, result_region);
}
igvn.replace_node(out_res, result_val);
@ -934,7 +934,7 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
Node *new_loadp;
new_loadp = slow_path_surrogate;
// create the final region/phi pair to converge cntl/data paths to downstream code
// Create the final region/phi pair to converge cntl/data paths to downstream code
Node* result_region = igvn.transform(new RegionNode(3));
result_region->set_req(1, then);
result_region->set_req(2, elsen);
@ -943,7 +943,7 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
result_phi->set_req(1, new_loadp);
result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
// finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
// Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
// igvn.replace_node(out_ctrl, result_region);
if (out_ctrl != NULL) { // added if cond
igvn.replace_node(out_ctrl, result_region);
@ -980,7 +980,7 @@ bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
int load_barrier_count = s->load_barrier_count();
LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
// node is unreachable, so don't try to expand it
// Node is unreachable, so don't try to expand it
s->remove_load_barrier_node(n);
continue;
}

@ -35,8 +35,8 @@ private:
bool _writeback; // Controls if the barrier writes the healed oop back to memory
// A swap on a memory location must never write back the healed oop
bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
// before healing, otherwise both the oop and the address must be passed to the
// barrier from the oop
// before healing, otherwise both the oop and the address must be
// passed to the barrier from the oop
static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
void push_dominated_barriers(PhaseIterGVN* igvn) const;

@ -19,7 +19,6 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"

@ -19,7 +19,6 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP

@ -44,7 +44,7 @@
// | |
// | * 3-3 No Reserve Flag (1-bit)
// |
// * 7-5 Unused (3-bits)
// * 7-4 Unused (4-bits)
//
class ZAllocationFlags {

@ -53,7 +53,7 @@ bool ZBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
return true;
}
// Barrier not neeed
// Barrier not needed
return false;
}

@ -141,7 +141,7 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxc
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
// Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
// Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can receive
// calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
// with the motivation that if you're doing Unsafe operations on a Reference.referent
// field, then you're on your own anyway.

@ -51,16 +51,20 @@ inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) {
do {
const bm_word_t new_val = old_val | pair_mask;
if (new_val == old_val) {
// Someone else beat us to it
inc_live = false;
return false; // Someone else beat us to it.
return false;
}
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
if (cur_val == old_val) {
// Success
const bm_word_t marked_mask = bit_mask(bit);
inc_live = !(old_val & marked_mask);
return true; // Success.
return true;
}
old_val = cur_val; // The value changed, try again.
// The value changed, retry
old_val = cur_val;
} while (true);
}

@ -173,7 +173,7 @@ void ZCollectedHeap::collect(GCCause::Cause cause) {
void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
// These collection requests are ignored since ZGC can't run a synchronous
// GC cycle from within the VM thread. This is considered benign, since the
// only GC causes comming in here should be heap dumper and heap inspector.
// only GC causes coming in here should be heap dumper and heap inspector.
// However, neither the heap dumper nor the heap inspector really need a GC
// to happen, but the result of their heap iterations might in that case be
// less accurate since they might include objects that would otherwise have

@ -101,7 +101,7 @@ bool ZDirector::rule_allocation_rate() const {
// Perform GC if the estimated max allocation rate indicates that we
// will run out of memory. The estimated max allocation rate is based
// on the moving average of the sampled allocation rate plus a safety
// margin based on variations in the allocation rate and unforseen
// margin based on variations in the allocation rate and unforeseen
// allocation spikes.
// Calculate amount of free memory available to Java threads. Note that
@ -115,9 +115,9 @@ bool ZDirector::rule_allocation_rate() const {
// Calculate time until OOM given the max allocation rate and the amount
// of free memory. The allocation rate is a moving average and we multiply
// that with an alllcation spike tolerance factor to guard against unforseen
// that with an allocation spike tolerance factor to guard against unforeseen
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
// the allocation rate variance, which means the probablility is 1 in 1000
// the allocation rate variance, which means the probability is 1 in 1000
// that a sample is outside of the confidence interval.
const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero

@ -181,11 +181,11 @@ public:
ZStatTimer timer(ZPhasePauseMarkStart);
ZServiceabilityMarkStartTracer tracer;
// Setup soft reference policy
// Set up soft reference policy
const bool clear = should_clear_soft_references();
ZHeap::heap()->set_soft_reference_policy(clear);
// Setup boost mode
// Set up boost mode
const bool boost = should_boost_worker_threads();
ZHeap::heap()->set_boost_worker_threads(boost);
@ -373,7 +373,7 @@ void ZDriver::run_gc_cycle(GCCause::Cause cause) {
ZHeap::heap()->select_relocation_set();
}
// Phase 8: Prepare Relocation Set
// Phase 8: Concurrent Prepare Relocation Set
{
ZStatTimer timer(ZPhaseConcurrentPrepareRelocationSet);
ZHeap::heap()->prepare_relocation_set();

@ -34,7 +34,7 @@ void ZForwardingTable::setup(size_t live_objects) {
// Allocate table for linear probing. The size of the table must be
// a power of two to allow for quick and inexpensive indexing/masking.
// The table is sized to have a load factor of 50%, i.e. sized to have
// double the number of entries actuallly inserted.
// double the number of entries actually inserted.
_size = ZUtils::round_up_power_of_2(live_objects * 2);
_table = MallocArrayAllocator<ZForwardingTableEntry>::allocate(_size, mtGC);

@ -65,7 +65,7 @@ inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index) const
}
inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
// Reading entries in the table races with the atomic cas done for
// Reading entries in the table races with the atomic CAS done for
// insertion into the table. This is safe because each entry is at
// most updated once (from -1 to something else).
ZForwardingTableEntry entry = first(from_index, cursor);

@ -50,12 +50,12 @@
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
ZHeap* ZHeap::_heap = NULL;
@ -155,7 +155,7 @@ size_t ZHeap::unsafe_max_tlab_alloc() const {
// fit the smallest possible TLAB. This means that the next
// TLAB allocation will force the allocator to get a new
// backing page anyway, which in turn means that we can then
// fit the larges possible TLAB.
// fit the largest possible TLAB.
size = max_tlab_size();
}

@ -187,7 +187,7 @@ void ZHeapIterator::objects_do(ObjectClosure* cl) {
ZRootsIterator roots;
// Follow roots. Note that we also visit the JVMTI weak tag map
// as if they where strong roots to make sure we visit all tagged
// as if they were strong roots to make sure we visit all tagged
// objects, even those that might now have become unreachable.
// If we didn't do this the user would have expected to see
// ObjectFree events for unreachable objects in the tag map.

@ -29,7 +29,7 @@
template <typename T> class ZList;
// Element in a double linked list
// Element in a doubly linked list
template <typename T>
class ZListNode {
friend class ZList<T>;
@ -61,7 +61,7 @@ public:
}
};
// Double-linked list
// Doubly linked list
template <typename T>
class ZList {
private:

@ -73,7 +73,7 @@ void ZLiveMap::reset(size_t index) {
// Mark reset contention
if (!contention) {
// Count contention once, not every loop
// Count contention once
ZStatInc(ZCounterMarkSeqNumResetContention);
contention = true;
@ -95,7 +95,7 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
// Mark reset contention
if (!contention) {
// Count contention once, not every loop
// Count contention once
ZStatInc(ZCounterMarkSegmentResetContention);
contention = true;

@ -97,8 +97,8 @@ inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const {
inline bool ZLiveMap::get(size_t index) const {
BitMap::idx_t segment = index_to_segment(index);
return is_marked() && // Page is marked
is_segment_live(segment) && // Segment is marked
return is_marked() && // Page is marked
is_segment_live(segment) && // Segment is marked
_bitmap.at(index); // Object is marked
}

@ -398,7 +398,7 @@ bool ZMark::flush(bool at_safepoint) {
}
bool ZMark::try_flush(volatile size_t* nflush) {
// Only flush if handhakes are enabled
// Only flush if handshakes are enabled
if (!ThreadLocalHandshakes) {
return false;
}
@ -681,5 +681,5 @@ void ZMark::verify_all_stacks_empty() const {
Threads::threads_do(&cl);
// Verify stripe stacks
guarantee(_stripes.is_empty(), "Should be emtpy");
guarantee(_stripes.is_empty(), "Should be empty");
}

@ -77,7 +77,7 @@ private:
public:
ZMarkStackEntry() {
// This constructor is intentionally left emtpy and does not initialize
// This constructor is intentionally left empty and does not initialize
// _entry to allow it to be optimized out when instantiating ZMarkStack,
// which has a long array of ZMarkStackEntry elements, but doesn't care
// what _entry is initialized to.

@ -40,7 +40,7 @@ private:
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
// Allocate an object in a shared page. Allocate and
// atomically install a new page if neccesary.
// atomically install a new page if necessary.
uintptr_t alloc_object_in_shared_page(ZPage** shared_page,
uint8_t page_type,
size_t page_size,

@ -145,7 +145,7 @@ void ZPageAllocator::reset_statistics() {
void ZPageAllocator::increase_used(size_t size, bool relocation) {
if (relocation) {
// Allocating a page for the purpose of relocation has a
// negative contribution to the number of relcaimed bytes.
// negative contribution to the number of reclaimed bytes.
_reclaimed -= size;
}
_allocated += size;

@ -244,7 +244,7 @@ public:
virtual void do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
// Update thread local adddress bad mask
// Update thread local address bad mask
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
}

@ -149,7 +149,7 @@ ZServiceabilityManagerStatsTracer::ZServiceabilityManagerStatsTracer(bool is_gc_
ZServiceabilityCountersTracer::ZServiceabilityCountersTracer() {
// Nothing to trace with TraceCollectorStats, since ZGC has
// neither a young collector or a full collector.
// neither a young collector nor a full collector.
}
ZServiceabilityCountersTracer::~ZServiceabilityCountersTracer() {

@ -482,7 +482,7 @@ ZStatCounterData ZStatUnsampledCounter::collect_and_reset() const {
}
//
// Stat MMU (Mimimum Mutator Utilization)
// Stat MMU (Minimum Mutator Utilization)
//
ZStatMMUPause::ZStatMMUPause() :
_start(0.0),
@ -560,9 +560,8 @@ void ZStatMMU::register_pause(const Ticks& start, const Ticks& end) {
}
void ZStatMMU::print() {
log_info(gc, mmu)(
"MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%",
_mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms);
log_info(gc, mmu)("MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%",
_mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms);
}
//

@ -156,7 +156,7 @@ public:
};
//
// Stat MMU (Mimimum Mutator Utilization)
// Stat MMU (Minimum Mutator Utilization)
//
class ZStatMMUPause {
private:

@ -35,7 +35,7 @@ public:
static size_t round_up_power_of_2(size_t value);
static size_t round_down_power_of_2(size_t value);
// Size convertion
// Size conversion
static size_t bytes_to_words(size_t size_in_words);
static size_t words_to_bytes(size_t size_in_words);

@ -44,7 +44,7 @@ uint ZWorkers::calculate_nparallel() {
uint ZWorkers::calculate_nconcurrent() {
// Use 12.5% of the CPUs, rounded up. The number of concurrent threads we
// would like to use heavily depends on the type of workload we are running.
// Using too many threads will have a nagative impact on the application
// Using too many threads will have a negative impact on the application
// throughput, while using too few threads will prolong the GC-cycle and
// we then risk being out-run by the application. Using 12.5% of the active
// processors appears to be a fairly good balance.