8025856: Fix typos in the GC code

Fix about 440 typos in comments in the VM code

Reviewed-by: mgerdin, tschatzl, coleenp, kmo, jcoomes
This commit is contained in:
Jesper Wilhelmsson 2014-01-23 14:47:23 +01:00
parent 4c7c3f0613
commit 81ba2e32c0
150 changed files with 524 additions and 545 deletions

View File

@ -201,16 +201,10 @@ void ciField::initialize_from(fieldDescriptor* fd) {
return;
}
// This field just may be constant. The only cases where it will
// not be constant are:
//
// 1. The field holds a non-perm-space oop. The field is, strictly
// speaking, constant but we cannot embed non-perm-space oops into
// generated code. For the time being we need to consider the
// field to be not constant.
// 2. The field is a *special* static&final field whose value
// may change. The three examples are java.lang.System.in,
// java.lang.System.out, and java.lang.System.err.
// This field just may be constant. The only case where it will
// not be constant is when the field is a *special* static&final field
// whose value may change. The three examples are java.lang.System.in,
// java.lang.System.out, and java.lang.System.err.
KlassHandle k = _holder->get_Klass();
assert( SystemDictionary::System_klass() != NULL, "Check once per vm");

View File

@ -130,9 +130,7 @@ public:
// 1. The field is both static and final
// 2. The canonical holder of the field has undergone
// static initialization.
// 3. If the field is an object or array, then the oop
// in question is allocated in perm space.
// 4. The field is not one of the special static/final
// 3. The field is not one of the special static/final
// non-constant fields. These are java.lang.System.in
// and java.lang.System.out. Abomination.
//

View File

@ -466,7 +466,7 @@ void CMSAdaptiveSizePolicy::checkpoint_roots_initial_end(
void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
_STW_timer.stop();
_latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
// Start accumumlating time for the remark in the STW timer.
// Start accumulating time for the remark in the STW timer.
_STW_timer.reset();
_STW_timer.start();
}
@ -537,8 +537,8 @@ void CMSAdaptiveSizePolicy::msc_collection_end(GCCause::Cause gc_cause) {
avg_msc_pause()->sample(msc_pause_in_seconds);
double mutator_time_in_seconds = 0.0;
if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
// This assertion may fail because of time stamp gradularity.
// Comment it out and investiage it at a later time. The large
// This assertion may fail because of time stamp granularity.
// Comment it out and investigate it at a later time. The large
// time stamp granularity occurs on some older linux systems.
#ifndef CLOCK_GRANULARITY_TOO_LARGE
assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
@ -836,7 +836,7 @@ double CMSAdaptiveSizePolicy::cms_gc_cost() const {
void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
_STW_timer.stop();
// Start accumumlating time for the marking in the STW timer.
// Start accumulating time for the marking in the STW timer.
_STW_timer.reset();
_STW_timer.start();
}
@ -1227,7 +1227,7 @@ uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
// We use the tenuring threshold to equalize the cost of major
// and minor collections.
// ThresholdTolerance is used to indicate how sensitive the
// tenuring threshold is to differences in cost betweent the
// tenuring threshold is to differences in cost between the
// collection types.
// Get the times of interest. This involves a little work, so

View File

@ -356,7 +356,7 @@ class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
void concurrent_sweeping_begin();
void concurrent_sweeping_end();
// Similar to the above (e.g., concurrent_marking_end()) and
// is used for both the precleaning an abortable precleaing
// is used for both the precleaning an abortable precleaning
// phases.
void concurrent_precleaning_begin();
void concurrent_precleaning_end();

View File

@ -88,8 +88,7 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
// of the tenured generation.
PerfVariable* _avg_msc_pause_counter;
// Average for the time between the most recent end of a
// MSC collection and the beginning of the next
// MSC collection.
// MSC collection and the beginning of the next MSC collection.
PerfVariable* _avg_msc_interval_counter;
// Average for the GC cost of a MSC collection based on
// _avg_msc_pause_counter and _avg_msc_interval_counter.
@ -99,8 +98,7 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
// of the tenured generation.
PerfVariable* _avg_ms_pause_counter;
// Average for the time between the most recent end of a
// MS collection and the beginning of the next
// MS collection.
// MS collection and the beginning of the next MS collection.
PerfVariable* _avg_ms_interval_counter;
// Average for the GC cost of a MS collection based on
// _avg_ms_pause_counter and _avg_ms_interval_counter.
@ -108,9 +106,9 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
// Average of the bytes promoted per minor collection.
PerfVariable* _promoted_avg_counter;
// Average of the deviation of the promoted average
// Average of the deviation of the promoted average.
PerfVariable* _promoted_avg_dev_counter;
// Padded average of the bytes promoted per minor colleciton
// Padded average of the bytes promoted per minor collection.
PerfVariable* _promoted_padded_avg_counter;
// See description of the _change_young_gen_for_maj_pauses

View File

@ -258,10 +258,10 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
bool take_from_overflow_list();
};
// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
// stack and the bitMap are shared, so access needs to be suitably
// sycnhronized. An OopTaskQueue structure, supporting efficient
// workstealing, replaces a CMSMarkStack for storing grey objects.
// synchronized. An OopTaskQueue structure, supporting efficient
// work stealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
private:
MemRegion _span;

View File

@ -407,8 +407,8 @@ size_t CompactibleFreeListSpace::max_alloc_in_words() const {
res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
(size_t) SmallForLinearAlloc - 1));
// XXX the following could potentially be pretty slow;
// should one, pesimally for the rare cases when res
// caclulated above is less than IndexSetSize,
// should one, pessimistically for the rare cases when res
// calculated above is less than IndexSetSize,
// just return res calculated above? My reasoning was that
// those cases will be so rare that the extra time spent doesn't
// really matter....
@ -759,7 +759,7 @@ CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
// Note on locking for the space iteration functions:
// since the collector's iteration activities are concurrent with
// allocation activities by mutators, absent a suitable mutual exclusion
// mechanism the iterators may go awry. For instace a block being iterated
// mechanism the iterators may go awry. For instance a block being iterated
// may suddenly be allocated or divided up and part of it allocated and
// so on.
@ -2090,7 +2090,7 @@ CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
// Support for concurrent collection policy decisions.
bool CompactibleFreeListSpace::should_concurrent_collect() const {
// In the future we might want to add in frgamentation stats --
// In the future we might want to add in fragmentation stats --
// including erosion of the "mountain" into this decision as well.
return !adaptive_freelists() && linearAllocationWouldFail();
}
@ -2099,7 +2099,7 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const {
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
// prepare_for_compaction() uses the space between live objects
// Prepare_for_compaction() uses the space between live objects
// so that later phase can skip dead space quickly. So verification
// of the free lists doesn't work after.
}
@ -2122,7 +2122,7 @@ void CompactibleFreeListSpace::compact() {
SCAN_AND_COMPACT(obj_size);
}
// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
// where fbs is free block sizes
double CompactibleFreeListSpace::flsFrag() const {
size_t itabFree = totalSizeInIndexedFreeLists();
@ -2651,7 +2651,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
// changes on-the-fly during a scavenge and avoid such a phase-change
// pothole. The following code is a heuristic attempt to do that.
// It is protected by a product flag until we have gained
// enough experience with this heuristic and fine-tuned its behaviour.
// enough experience with this heuristic and fine-tuned its behavior.
// WARNING: This might increase fragmentation if we overreact to
// small spikes, so some kind of historical smoothing based on
// previous experience with the greater reactivity might be useful.

View File

@ -58,7 +58,7 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
HeapWord* _ptr;
size_t _word_size;
size_t _refillSize;
size_t _allocation_size_limit; // largest size that will be allocated
size_t _allocation_size_limit; // Largest size that will be allocated
void print_on(outputStream* st) const;
};
@ -116,14 +116,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
PromotionInfo _promoInfo;
// helps to impose a global total order on freelistLock ranks;
// Helps to impose a global total order on freelistLock ranks;
// assumes that CFLSpace's are allocated in global total order
static int _lockRank;
// a lock protecting the free lists and free blocks;
// A lock protecting the free lists and free blocks;
// mutable because of ubiquity of locking even for otherwise const methods
mutable Mutex _freelistLock;
// locking verifier convenience function
// Locking verifier convenience function
void assert_locked() const PRODUCT_RETURN;
void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
@ -131,12 +131,13 @@ class CompactibleFreeListSpace: public CompactibleSpace {
LinearAllocBlock _smallLinearAllocBlock;
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
AFLBinaryTreeDictionary* _dictionary; // ptr to dictionary for large size blocks
AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks
// Indexed array for small size blocks
AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
// indexed array for small size blocks
// allocation stategy
bool _fitStrategy; // Use best fit strategy.
// Allocation strategy
bool _fitStrategy; // Use best fit strategy
bool _adaptive_freelists; // Use adaptive freelists
// This is an address close to the largest free chunk in the heap.
@ -157,7 +158,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Extra stuff to manage promotion parallelism.
// a lock protecting the dictionary during par promotion allocation.
// A lock protecting the dictionary during par promotion allocation.
mutable Mutex _parDictionaryAllocLock;
Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
@ -275,26 +276,26 @@ class CompactibleFreeListSpace: public CompactibleSpace {
}
protected:
// reset the indexed free list to its initial empty condition.
// Reset the indexed free list to its initial empty condition.
void resetIndexedFreeListArray();
// reset to an initial state with a single free block described
// Reset to an initial state with a single free block described
// by the MemRegion parameter.
void reset(MemRegion mr);
// Return the total number of words in the indexed free lists.
size_t totalSizeInIndexedFreeLists() const;
public:
// Constructor...
// Constructor
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
// accessors
// Accessors
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
// Set CMS global values
// Set CMS global values.
static void set_cms_values();
// Return the free chunk at the end of the space. If no such
@ -305,7 +306,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void set_collector(CMSCollector* collector) { _collector = collector; }
// Support for parallelization of rescan and marking
// Support for parallelization of rescan and marking.
const size_t rescan_task_size() const { return _rescan_task_size; }
const size_t marking_task_size() const { return _marking_task_size; }
SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
@ -346,7 +347,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Resizing support
void set_end(HeapWord* value); // override
// mutual exclusion support
// Mutual exclusion support
Mutex* freelistLock() const { return &_freelistLock; }
// Iteration support
@ -370,7 +371,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the iteration encounters an unparseable portion of the region,
// terminate the iteration and return the address of the start of the
// subregion that isn't done. Return of "NULL" indicates that the
// interation completed.
// iteration completed.
virtual HeapWord*
object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl);
@ -393,11 +394,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
size_t block_size_nopar(const HeapWord* p) const;
bool block_is_obj_nopar(const HeapWord* p) const;
// iteration support for promotion
// Iteration support for promotion
void save_marks();
bool no_allocs_since_save_marks();
// iteration support for sweeping
// Iteration support for sweeping
void save_sweep_limit() {
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
unallocated_block() : end();
@ -457,7 +458,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
FreeChunk* allocateScratch(size_t size);
// returns true if either the small or large linear allocation buffer is empty.
// Returns true if either the small or large linear allocation buffer is empty.
bool linearAllocationWouldFail() const;
// Adjust the chunk for the minimum size. This version is called in
@ -477,18 +478,18 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
bool coalesced);
// Support for decisions regarding concurrent collection policy
// Support for decisions regarding concurrent collection policy.
bool should_concurrent_collect() const;
// Support for compaction
// Support for compaction.
void prepare_for_compaction(CompactPoint* cp);
void adjust_pointers();
void compact();
// reset the space to reflect the fact that a compaction of the
// Reset the space to reflect the fact that a compaction of the
// space has been done.
virtual void reset_after_compaction();
// Debugging support
// Debugging support.
void print() const;
void print_on(outputStream* st) const;
void prepare_for_verify();
@ -500,7 +501,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool verify_chunk_in_free_list(FreeChunk* fc) const;
// Verify that the given chunk is the linear allocation block
// Verify that the given chunk is the linear allocation block.
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
// Do some basic checks on the the free lists.
void check_free_list_consistency() const PRODUCT_RETURN;
@ -516,7 +517,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
size_t sumIndexedFreeListArrayReturnedBytes();
// Return the total number of chunks in the indexed free lists.
size_t totalCountInIndexedFreeLists() const;
// Return the total numberof chunks in the space.
// Return the total number of chunks in the space.
size_t totalCount();
)

View File

@ -117,10 +117,10 @@ GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
// hide the naked CGC_lock manipulation in the baton-passing code
// further below. That's something we should try to do. Also, the proof
// of correctness of this 2-level locking scheme is far from obvious,
// and potentially quite slippery. We have an uneasy supsicion, for instance,
// and potentially quite slippery. We have an uneasy suspicion, for instance,
// that there may be a theoretical possibility of delay/starvation in the
// low-level lock/wait/notify scheme used for the baton-passing because of
// potential intereference with the priority scheme embodied in the
// potential interference with the priority scheme embodied in the
// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
// invocation further below and marked with "XXX 20011219YSR".
// Indeed, as we note elsewhere, this may become yet more slippery
@ -259,7 +259,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// Ideally, in the calculation below, we'd compute the dilatation
// factor as: MinChunkSize/(promoting_gen's min object size)
// Since we do not have such a general query interface for the
// promoting generation, we'll instead just use the mimimum
// promoting generation, we'll instead just use the minimum
// object size (which today is a header's worth of space);
// note that all arithmetic is in units of HeapWords.
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
@ -274,7 +274,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
//
// Let "f" be MinHeapFreeRatio in
//
// _intiating_occupancy = 100-f +
// _initiating_occupancy = 100-f +
// f * (CMSTriggerRatio/100)
// where CMSTriggerRatio is the argument "tr" below.
//
@ -2671,7 +2671,7 @@ bool CMSCollector::waitForForegroundGC() {
// that it's responsible for collecting, while itself doing any
// work common to all generations it's responsible for. A similar
// comment applies to the gc_epilogue()'s.
// The role of the varaible _between_prologue_and_epilogue is to
// The role of the variable _between_prologue_and_epilogue is to
// enforce the invocation protocol.
void CMSCollector::gc_prologue(bool full) {
// Call gc_prologue_work() for the CMSGen
@ -2878,10 +2878,10 @@ bool CMSCollector::have_cms_token() {
// Check reachability of the given heap address in CMS generation,
// treating all other generations as roots.
bool CMSCollector::is_cms_reachable(HeapWord* addr) {
// We could "guarantee" below, rather than assert, but i'll
// We could "guarantee" below, rather than assert, but I'll
// leave these as "asserts" so that an adventurous debugger
// could try this in the product build provided some subset of
// the conditions were met, provided they were intersted in the
// the conditions were met, provided they were interested in the
// results and knew that the computation below wouldn't interfere
// with other concurrent computations mutating the structures
// being read or written.
@ -2982,7 +2982,7 @@ bool CMSCollector::verify_after_remark(bool silent) {
// This is as intended, because by this time
// GC must already have cleared any refs that need to be cleared,
// and traced those that need to be marked; moreover,
// the marking done here is not going to intefere in any
// the marking done here is not going to interfere in any
// way with the marking information used by GC.
NoRefDiscovery no_discovery(ref_processor());
@ -3000,7 +3000,7 @@ bool CMSCollector::verify_after_remark(bool silent) {
if (CMSRemarkVerifyVariant == 1) {
// In this first variant of verification, we complete
// all marking, then check if the new marks-verctor is
// all marking, then check if the new marks-vector is
// a subset of the CMS marks-vector.
verify_after_remark_work_1();
} else if (CMSRemarkVerifyVariant == 2) {
@ -3399,7 +3399,7 @@ HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThr
CMSExpansionCause::_allocate_par_lab);
// Now go around the loop and try alloc again;
// A competing par_promote might beat us to the expansion space,
// so we may go around the loop again if promotion fails agaion.
// so we may go around the loop again if promotion fails again.
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
}
@ -4370,7 +4370,7 @@ void CMSConcMarkingTask::coordinator_yield() {
// should really use wait/notify, which is the recommended
// way of doing this type of interaction. Additionally, we should
// consolidate the eight methods that do the yield operation and they
// are almost identical into one for better maintenability and
// are almost identical into one for better maintainability and
// readability. See 6445193.
//
// Tony 2006.06.29
@ -4538,7 +4538,7 @@ void CMSCollector::abortable_preclean() {
// If Eden's current occupancy is below this threshold,
// immediately schedule the remark; else preclean
// past the next scavenge in an effort to
// schedule the pause as described avove. By choosing
// schedule the pause as described above. By choosing
// CMSScheduleRemarkEdenSizeThreshold >= max eden size
// we will never do an actual abortable preclean cycle.
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
@ -5532,8 +5532,8 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
// CAUTION: This closure has state that persists across calls to
// the work method dirty_range_iterate_clear() in that it has
// imbedded in it a (subtype of) UpwardsObjectClosure. The
// use of that state in the imbedded UpwardsObjectClosure instance
// embedded in it a (subtype of) UpwardsObjectClosure. The
// use of that state in the embedded UpwardsObjectClosure instance
// assumes that the cards are always iterated (even if in parallel
// by several threads) in monotonically increasing order per each
// thread. This is true of the implementation below which picks
@ -5548,7 +5548,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// sure that the changes there do not run counter to the
// assumptions made here and necessary for correctness and
// efficiency. Note also that this code might yield inefficient
// behaviour in the case of very large objects that span one or
// behavior in the case of very large objects that span one or
// more work chunks. Such objects would potentially be scanned
// several times redundantly. Work on 4756801 should try and
// address that performance anomaly if at all possible. XXX
@ -5574,7 +5574,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
while (!pst->is_task_claimed(/* reference */ nth_task)) {
// Having claimed the nth_task, compute corresponding mem-region,
// which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
// which is a-fortiori aligned correctly (i.e. at a MUT boundary).
// The alignment restriction ensures that we do not need any
// synchronization with other gang-workers while setting or
// clearing bits in thus chunk of the MUT.
@ -6365,7 +6365,7 @@ void CMSCollector::sweep(bool asynch) {
_inter_sweep_timer.reset();
_inter_sweep_timer.start();
// We need to use a monotonically non-deccreasing time in ms
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@ -6726,7 +6726,7 @@ bool CMSBitMap::allocate(MemRegion mr) {
warning("CMS bit map allocation failure");
return false;
}
// For now we'll just commit all of the bit map up fromt.
// For now we'll just commit all of the bit map up front.
// Later on we'll try to be more parsimonious with swap.
if (!_virtual_space.initialize(brs, brs.size())) {
warning("CMS bit map backing store failure");
@ -6833,8 +6833,8 @@ bool CMSMarkStack::allocate(size_t size) {
// XXX FIX ME !!! In the MT case we come in here holding a
// leaf lock. For printing we need to take a further lock
// which has lower rank. We need to recallibrate the two
// lock-ranks involved in order to be able to rpint the
// which has lower rank. We need to recalibrate the two
// lock-ranks involved in order to be able to print the
// messages below. (Or defer the printing to the caller.
// For now we take the expedient path of just disabling the
// messages for the problematic case.)
@ -7174,7 +7174,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
}
#endif // ASSERT
} else {
// an unitialized object
// An uninitialized object.
assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
size = pointer_delta(nextOneAddr + 1, addr);
@ -7182,7 +7182,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
"alignment problem");
// Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
// will dirty the card when the klass pointer is installed in the
// object (signalling the completion of initialization).
// object (signaling the completion of initialization).
}
} else {
// Either a not yet marked object or an uninitialized object
@ -7993,7 +7993,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
// we need to dirty all of the cards that the object spans,
// since the rescan of object arrays will be limited to the
// dirty cards.
// Note that no one can be intefering with us in this action
// Note that no one can be interfering with us in this action
// of dirtying the mod union table, so no locking or atomics
// are required.
if (obj->is_objArray()) {
@ -9019,7 +9019,7 @@ void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
// It's OK to call this multi-threaded; the worst thing
// that can happen is that we'll get a bunch of closely
// spaced simulated oveflows, but that's OK, in fact
// spaced simulated overflows, but that's OK, in fact
// probably good as it would exercise the overflow code
// under contention.
bool CMSCollector::simulate_overflow() {
@ -9139,7 +9139,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
}
} else {
// Chop off the suffix and rerturn it to the global list.
// Chop off the suffix and return it to the global list.
assert(cur->mark() != BUSY, "Error");
oop suffix_head = cur->mark(); // suffix will be put back on global list
cur->set_mark(NULL); // break off suffix

View File

@ -171,19 +171,19 @@ class CMSBitMap VALUE_OBJ_CLASS_SPEC {
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMSMarkStack: public CHeapObj<mtGC> {
//
friend class CMSCollector; // to get at expasion stats further below
friend class CMSCollector; // To get at expansion stats further below.
//
VirtualSpace _virtual_space; // space for the stack
oop* _base; // bottom of stack
size_t _index; // one more than last occupied index
size_t _capacity; // max #elements
Mutex _par_lock; // an advisory lock used in case of parallel access
NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
VirtualSpace _virtual_space; // Space for the stack
oop* _base; // Bottom of stack
size_t _index; // One more than last occupied index
size_t _capacity; // Max #elements
Mutex _par_lock; // An advisory lock used in case of parallel access
NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run
protected:
size_t _hit_limit; // we hit max stack size limit
size_t _failed_double; // we failed expansion before hitting limit
size_t _hit_limit; // We hit max stack size limit
size_t _failed_double; // We failed expansion before hitting limit
public:
CMSMarkStack():
@ -238,7 +238,7 @@ class CMSMarkStack: public CHeapObj<mtGC> {
_index = 0;
}
// Expand the stack, typically in response to an overflow condition
// Expand the stack, typically in response to an overflow condition.
void expand();
// Compute the least valued stack element.
@ -250,7 +250,7 @@ class CMSMarkStack: public CHeapObj<mtGC> {
return least;
}
// Exposed here to allow stack expansion in || case
// Exposed here to allow stack expansion in || case.
Mutex* par_lock() { return &_par_lock; }
};
@ -557,7 +557,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Manipulated with CAS in the parallel/multi-threaded case.
oop _overflow_list;
// The following array-pair keeps track of mark words
// displaced for accomodating overflow list above.
// displaced for accommodating overflow list above.
// This code will likely be revisited under RFE#4922830.
Stack<oop, mtGC> _preserved_oop_stack;
Stack<markOop, mtGC> _preserved_mark_stack;
@ -599,7 +599,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void verify_after_remark_work_1();
void verify_after_remark_work_2();
// true if any verification flag is on.
// True if any verification flag is on.
bool _verifying;
bool verifying() const { return _verifying; }
void set_verifying(bool v) { _verifying = v; }
@ -611,9 +611,9 @@ class CMSCollector: public CHeapObj<mtGC> {
void set_did_compact(bool v);
// XXX Move these to CMSStats ??? FIX ME !!!
elapsedTimer _inter_sweep_timer; // time between sweeps
elapsedTimer _intra_sweep_timer; // time _in_ sweeps
// padded decaying average estimates of the above
elapsedTimer _inter_sweep_timer; // Time between sweeps
elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
// Padded decaying average estimates of the above
AdaptivePaddedAverage _inter_sweep_estimate;
AdaptivePaddedAverage _intra_sweep_estimate;
@ -632,16 +632,16 @@ class CMSCollector: public CHeapObj<mtGC> {
void report_heap_summary(GCWhen::Type when);
protected:
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
MemRegion _span; // span covering above two
CardTableRS* _ct; // card table
ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
MemRegion _span; // Span covering above two
CardTableRS* _ct; // Card table
// CMS marking support structures
CMSBitMap _markBitMap;
CMSBitMap _modUnionTable;
CMSMarkStack _markStack;
HeapWord* _restart_addr; // in support of marking stack overflow
HeapWord* _restart_addr; // In support of marking stack overflow
void lower_restart_addr(HeapWord* low);
// Counters in support of marking stack / work queue overflow handling:
@ -656,12 +656,12 @@ class CMSCollector: public CHeapObj<mtGC> {
size_t _par_kac_ovflw;
NOT_PRODUCT(ssize_t _num_par_pushes;)
// ("Weak") Reference processing support
// ("Weak") Reference processing support.
ReferenceProcessor* _ref_processor;
CMSIsAliveClosure _is_alive_closure;
// keep this textually after _markBitMap and _span; c'tor dependency
// Keep this textually after _markBitMap and _span; c'tor dependency.
ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
ModUnionClosure _modUnionClosure;
ModUnionClosurePar _modUnionClosurePar;
@ -697,7 +697,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// State related to prologue/epilogue invocation for my generations
bool _between_prologue_and_epilogue;
// Signalling/State related to coordination between fore- and backgroud GC
// Signaling/State related to coordination between fore- and background GC
// Note: When the baton has been passed from background GC to foreground GC,
// _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
static bool _foregroundGCIsActive; // true iff foreground collector is active or
@ -712,13 +712,13 @@ class CMSCollector: public CHeapObj<mtGC> {
int _numYields;
size_t _numDirtyCards;
size_t _sweep_count;
// number of full gc's since the last concurrent gc.
// Number of full gc's since the last concurrent gc.
uint _full_gcs_since_conc_gc;
// occupancy used for bootstrapping stats
// Occupancy used for bootstrapping stats
double _bootstrap_occupancy;
// timer
// Timer
elapsedTimer _timer;
// Timing, allocation and promotion statistics, used for scheduling.
@ -770,7 +770,7 @@ class CMSCollector: public CHeapObj<mtGC> {
int no_of_gc_threads);
void push_on_overflow_list(oop p);
void par_push_on_overflow_list(oop p);
// the following is, obviously, not, in general, "MT-stable"
// The following is, obviously, not, in general, "MT-stable"
bool overflow_list_is_empty() const;
void preserve_mark_if_necessary(oop p);
@ -778,24 +778,24 @@ class CMSCollector: public CHeapObj<mtGC> {
void preserve_mark_work(oop p, markOop m);
void restore_preserved_marks_if_any();
NOT_PRODUCT(bool no_preserved_marks() const;)
// in support of testing overflow code
// In support of testing overflow code
NOT_PRODUCT(int _overflow_counter;)
NOT_PRODUCT(bool simulate_overflow();) // sequential
NOT_PRODUCT(bool simulate_overflow();) // Sequential
NOT_PRODUCT(bool par_simulate_overflow();) // MT version
// CMS work methods
void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
// a return value of false indicates failure due to stack overflow
bool markFromRootsWork(bool asynch); // concurrent marking work
// A return value of false indicates failure due to stack overflow
bool markFromRootsWork(bool asynch); // Concurrent marking work
public: // FIX ME!!! only for testing
bool do_marking_st(bool asynch); // single-threaded marking
bool do_marking_mt(bool asynch); // multi-threaded marking
bool do_marking_st(bool asynch); // Single-threaded marking
bool do_marking_mt(bool asynch); // Multi-threaded marking
private:
// concurrent precleaning work
// Concurrent precleaning work
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
ScanMarkedObjectsAgainCarefullyClosure* cl);
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
@ -811,26 +811,26 @@ class CMSCollector: public CHeapObj<mtGC> {
// Resets (i.e. clears) the per-thread plab sample vectors
void reset_survivor_plab_arrays();
// final (second) checkpoint work
// Final (second) checkpoint work
void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
bool init_mark_was_synchronous);
// work routine for parallel version of remark
// Work routine for parallel version of remark
void do_remark_parallel();
// work routine for non-parallel version of remark
// Work routine for non-parallel version of remark
void do_remark_non_parallel();
// reference processing work routine (during second checkpoint)
// Reference processing work routine (during second checkpoint)
void refProcessingWork(bool asynch, bool clear_all_soft_refs);
// concurrent sweeping work
// Concurrent sweeping work
void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
// (concurrent) resetting of support data structures
// (Concurrent) resetting of support data structures
void reset(bool asynch);
// Clear _expansion_cause fields of constituent generations
void clear_expansion_cause();
// An auxilliary method used to record the ends of
// An auxiliary method used to record the ends of
// used regions of each generation to limit the extent of sweep
void save_sweep_limits();
@ -854,7 +854,7 @@ class CMSCollector: public CHeapObj<mtGC> {
bool is_external_interruption();
void report_concurrent_mode_interruption();
// If the backgrould GC is active, acquire control from the background
// If the background GC is active, acquire control from the background
// GC and do the collection.
void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
@ -893,7 +893,7 @@ class CMSCollector: public CHeapObj<mtGC> {
ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
// locking checks
// Locking checks
NOT_PRODUCT(static bool have_cms_token();)
// XXXPERM bool should_collect(bool full, size_t size, bool tlab);
@ -958,7 +958,7 @@ class CMSCollector: public CHeapObj<mtGC> {
CMSBitMap* markBitMap() { return &_markBitMap; }
void directAllocated(HeapWord* start, size_t size);
// main CMS steps and related support
// Main CMS steps and related support
void checkpointRootsInitial(bool asynch);
bool markFromRoots(bool asynch); // a return value of false indicates failure
// due to stack overflow
@ -977,7 +977,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Performance Counter Support
CollectorCounters* counters() { return _gc_counters; }
// timer stuff
// Timer stuff
void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
@ -1014,18 +1014,18 @@ class CMSCollector: public CHeapObj<mtGC> {
static void print_on_error(outputStream* st);
// debugging
// Debugging
void verify();
bool verify_after_remark(bool silent = VerifySilently);
void verify_ok_to_terminate() const PRODUCT_RETURN;
void verify_work_stacks_empty() const PRODUCT_RETURN;
void verify_overflow_empty() const PRODUCT_RETURN;
// convenience methods in support of debugging
// Convenience methods in support of debugging
static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
// accessors
// Accessors
CMSMarkStack* verification_mark_stack() { return &_markStack; }
CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
@ -1109,7 +1109,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
CollectionTypes _debug_collection_type;
// True if a compactiing collection was done.
// True if a compacting collection was done.
bool _did_compact;
bool did_compact() { return _did_compact; }
@ -1203,7 +1203,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Support for compaction
CompactibleSpace* first_compaction_space() const;
// Adjust quantites in the generation affected by
// Adjust quantities in the generation affected by
// the compaction.
void reset_after_compaction();
@ -1301,7 +1301,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void setNearLargestChunk();
bool isNearLargestChunk(HeapWord* addr);
// Get the chunk at the end of the space. Delagates to
// Get the chunk at the end of the space. Delegates to
// the space.
FreeChunk* find_chunk_at_end();
@ -1422,7 +1422,6 @@ class MarkFromRootsClosure: public BitMapClosure {
// marking from the roots following the first checkpoint.
// XXX This should really be a subclass of The serial version
// above, but i have not had the time to refactor things cleanly.
// That willbe done for Dolphin.
class Par_MarkFromRootsClosure: public BitMapClosure {
CMSCollector* _collector;
MemRegion _whole_span;
@ -1780,7 +1779,7 @@ class SweepClosure: public BlkClosureCareful {
void do_already_free_chunk(FreeChunk *fc);
// Work method called when processing an already free or a
// freshly garbage chunk to do a lookahead and possibly a
// premptive flush if crossing over _limit.
// preemptive flush if crossing over _limit.
void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
// Process a garbage chunk during sweeping.
size_t do_garbage_chunk(FreeChunk *fc);
@ -1879,7 +1878,7 @@ class CMSParDrainMarkingStackClosure: public VoidClosure {
};
// Allow yielding or short-circuiting of reference list
// prelceaning work.
// precleaning work.
class CMSPrecleanRefsYieldClosure: public YieldClosure {
CMSCollector* _collector;
void do_yield_work();

View File

@ -197,13 +197,13 @@ inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
}
// Return the HeapWord address corrsponding to the next "0" bit
// Return the HeapWord address corresponding to the next "0" bit
// (inclusive).
inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
return getNextUnmarkedWordAddress(addr, endWord());
}
// Return the HeapWord address corrsponding to the next "0" bit
// Return the HeapWord address corresponding to the next "0" bit
// (inclusive).
inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
HeapWord* start_addr, HeapWord* end_addr) const {

View File

@ -164,7 +164,7 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// _pending_yields that holds the sum (of both sync and async requests), and
// a second counter _pending_decrements that only holds the async requests,
// for greater efficiency, since in a typical CMS run, there are many more
// pontential (i.e. static) yield points than there are actual
// potential (i.e. static) yield points than there are actual
// (i.e. dynamic) yields because of requests, which are few and far between.
//
// Note that, while "_pending_yields >= _pending_decrements" is an invariant,

View File

@ -279,7 +279,7 @@ void PromotionInfo::print_statistics(uint worker_id) const {
// When _spoolTail is NULL, then the set of slots with displaced headers
// is all those starting at the slot <_spoolHead, _firstIndex> and
// going up to the last slot of last block in the linked list.
// In this lartter case, _splice_point points to the tail block of
// In this latter case, _splice_point points to the tail block of
// this linked list of blocks holding displaced headers.
void PromotionInfo::verify() const {
// Verify the following:

View File

@ -39,7 +39,7 @@
// up, the wrapped closure is applied to all elements, keeping track of
// this elapsed time of this process, and leaving the array empty.
// The caller must be sure to call "done" to process any unprocessed
// buffered entriess.
// buffered entries.
class Generation;
class HeapRegion;

View File

@ -33,7 +33,7 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
_threads(NULL), _n_threads(0),
_hot_card_cache(g1h)
{
// Ergomonically select initial concurrent refinement parameters
// Ergonomically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
}

View File

@ -44,8 +44,8 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
_vtime_accum(0.0)
{
// Each thread has its own monitor. The i-th thread is responsible for signalling
// to thread i+1 if the number of buffers in the queue exceeds a threashold for this
// Each thread has its own monitor. The i-th thread is responsible for signaling
// to thread i+1 if the number of buffers in the queue exceeds a threshold for this
// thread. Monitors are also used to wake up the threads during termination.
// The 0th worker in notified by mutator threads and has a special monitor.
// The last worker is used for young gen rset size sampling.

View File

@ -909,7 +909,7 @@ void ConcurrentMark::checkpointRootsInitialPre() {
}
#endif
// Initialise marking structures. This has to be done in a STW phase.
// Initialize marking structures. This has to be done in a STW phase.
reset();
// For each region note start of marking.
@ -923,8 +923,8 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// If we force an overflow during remark, the remark operation will
// actually abort and we'll restart concurrent marking. If we always
// force an oveflow during remark we'll never actually complete the
// marking phase. So, we initilize this here, at the start of the
// force an overflow during remark we'll never actually complete the
// marking phase. So, we initialize this here, at the start of the
// cycle, so that at the remaining overflow number will decrease at
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
@ -959,7 +959,7 @@ void ConcurrentMark::checkpointRootsInitialPost() {
*
* Note, however, that this code is also used during remark and in
* this case we should not attempt to leave / enter the STS, otherwise
* we'll either hit an asseert (debug / fastdebug) or deadlock
* we'll either hit an assert (debug / fastdebug) or deadlock
* (product). So we should only leave / enter the STS if we are
* operating concurrently.
*
@ -1001,7 +1001,7 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// task 0 is responsible for clearing the global data structures
// We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
// we exit this method to abort the pause and restart concurent
// we exit this method to abort the pause and restart concurrent
// marking.
reset_marking_state(true /* clear_overflow */);
force_overflow()->update();
@ -1251,7 +1251,7 @@ void ConcurrentMark::markFromRoots() {
CMConcurrentMarkingTask markingTask(this, cmThread());
if (use_parallel_marking_threads()) {
_parallel_workers->set_active_workers((int)active_workers);
// Don't set _n_par_threads because it affects MT in proceess_strong_roots()
// Don't set _n_par_threads because it affects MT in process_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask);
@ -1484,7 +1484,7 @@ public:
}
// Set the marked bytes for the current region so that
// it can be queried by a calling verificiation routine
// it can be queried by a calling verification routine
_region_marked_bytes = marked_bytes;
return false;
@ -2306,7 +2306,7 @@ class G1CMDrainMarkingStackClosure: public VoidClosure {
// oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
//
// CMTask::do_marking_step() is called in a loop, which we'll exit
// if there's nothing more to do (i.e. we'completely drained the
// if there's nothing more to do (i.e. we've completely drained the
// entries that were pushed as a a result of applying the 'keep alive'
// closure to the entries on the discovered ref lists) or we overflow
// the global marking stack.
@ -2469,7 +2469,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// reference processing is not multi-threaded and is thus
// performed by the current thread instead of a gang worker).
//
// The gang tasks involved in parallel reference procssing create
// The gang tasks involved in parallel reference processing create
// their own instances of these closures, which do their own
// synchronization among themselves.
G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
@ -2546,7 +2546,7 @@ private:
public:
void work(uint worker_id) {
// Since all available tasks are actually started, we should
// only proceed if we're supposed to be actived.
// only proceed if we're supposed to be active.
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
@ -3066,7 +3066,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
// 'start' should be in the heap.
assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
// 'end' *may* be just beyone the end of the heap (if hr is the last region)
// 'end' *may* be just beyond the end of the heap (if hr is the last region)
assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
@ -4414,7 +4414,7 @@ void CMTask::do_marking_step(double time_target_ms,
// overflow was raised. This means we have to restart the
// marking phase and start iterating over regions. However, in
// order to do this we have to make sure that all tasks stop
// what they are doing and re-initialise in a safe manner. We
// what they are doing and re-initialize in a safe manner. We
// will achieve this with the use of two barrier sync points.
if (_cm->verbose_low()) {
@ -4428,7 +4428,7 @@ void CMTask::do_marking_step(double time_target_ms,
// When we exit this sync barrier we know that all tasks have
// stopped doing marking work. So, it's now safe to
// re-initialise our data structures. At the end of this method,
// re-initialize our data structures. At the end of this method,
// task 0 will clear the global data structures.
}

View File

@ -378,19 +378,19 @@ class ConcurrentMark: public CHeapObj<mtGC> {
friend class G1CMDrainMarkingStackClosure;
protected:
ConcurrentMarkThread* _cmThread; // the thread doing the work
G1CollectedHeap* _g1h; // the heap.
uint _parallel_marking_threads; // the number of marking
// threads we're use
uint _max_parallel_marking_threads; // max number of marking
// threads we'll ever use
double _sleep_factor; // how much we have to sleep, with
ConcurrentMarkThread* _cmThread; // The thread doing the work
G1CollectedHeap* _g1h; // The heap
uint _parallel_marking_threads; // The number of marking
// threads we're using
uint _max_parallel_marking_threads; // Max number of marking
// threads we'll ever use
double _sleep_factor; // How much we have to sleep, with
// respect to the work we just did, to
// meet the marking overhead goal
double _marking_task_overhead; // marking target overhead for
double _marking_task_overhead; // Marking target overhead for
// a single task
// same as the two above, but for the cleanup task
// Same as the two above, but for the cleanup task
double _cleanup_sleep_factor;
double _cleanup_task_overhead;
@ -399,8 +399,8 @@ protected:
// Concurrent marking support structures
CMBitMap _markBitMap1;
CMBitMap _markBitMap2;
CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap
CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
BitMap _region_bm;
BitMap _card_bm;
@ -409,43 +409,43 @@ protected:
HeapWord* _heap_start;
HeapWord* _heap_end;
// Root region tracking and claiming.
// Root region tracking and claiming
CMRootRegions _root_regions;
// For gray objects
CMMarkStack _markStack; // Grey objects behind global finger.
HeapWord* volatile _finger; // the global finger, region aligned,
CMMarkStack _markStack; // Grey objects behind global finger
HeapWord* volatile _finger; // The global finger, region aligned,
// always points to the end of the
// last claimed region
// marking tasks
uint _max_worker_id;// maximum worker id
uint _active_tasks; // task num currently active
CMTask** _tasks; // task queue array (max_worker_id len)
CMTaskQueueSet* _task_queues; // task queue set
ParallelTaskTerminator _terminator; // for termination
// Marking tasks
uint _max_worker_id;// Maximum worker id
uint _active_tasks; // Task num currently active
CMTask** _tasks; // Task queue array (max_worker_id len)
CMTaskQueueSet* _task_queues; // Task queue set
ParallelTaskTerminator _terminator; // For termination
// Two sync barriers that are used to synchronise tasks when an
// Two sync barriers that are used to synchronize tasks when an
// overflow occurs. The algorithm is the following. All tasks enter
// the first one to ensure that they have all stopped manipulating
// the global data structures. After they exit it, they re-initialise
// their data structures and task 0 re-initialises the global data
// the global data structures. After they exit it, they re-initialize
// their data structures and task 0 re-initializes the global data
// structures. Then, they enter the second sync barrier. This
// ensure, that no task starts doing work before all data
// structures (local and global) have been re-initialised. When they
// structures (local and global) have been re-initialized. When they
// exit it, they are free to start working again.
WorkGangBarrierSync _first_overflow_barrier_sync;
WorkGangBarrierSync _second_overflow_barrier_sync;
// this is set by any task, when an overflow on the global data
// structures is detected.
// This is set by any task, when an overflow on the global data
// structures is detected
volatile bool _has_overflown;
// true: marking is concurrent, false: we're in remark
// True: marking is concurrent, false: we're in remark
volatile bool _concurrent;
// set at the end of a Full GC so that marking aborts
// Set at the end of a Full GC so that marking aborts
volatile bool _has_aborted;
// used when remark aborts due to an overflow to indicate that
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
volatile bool _restart_for_overflow;
@ -455,10 +455,10 @@ protected:
// time of remark.
volatile bool _concurrent_marking_in_progress;
// verbose level
// Verbose level
CMVerboseLevel _verbose_level;
// All of these times are in ms.
// All of these times are in ms
NumberSeq _init_times;
NumberSeq _remark_times;
NumberSeq _remark_mark_times;
@ -467,7 +467,7 @@ protected:
double _total_counting_time;
double _total_rs_scrub_time;
double* _accum_task_vtime; // accumulated task vtime
double* _accum_task_vtime; // Accumulated task vtime
FlexibleWorkGang* _parallel_workers;
@ -487,7 +487,7 @@ protected:
void reset_marking_state(bool clear_overflow = true);
// We do this after we're done with marking so that the marking data
// structures are initialised to a sensible and predictable state.
// structures are initialized to a sensible and predictable state.
void set_non_marking_state();
// Called to indicate how many threads are currently active.
@ -497,14 +497,14 @@ protected:
// mark or remark) and how many threads are currently active.
void set_concurrency_and_phase(uint active_tasks, bool concurrent);
// prints all gathered CM-related statistics
// Prints all gathered CM-related statistics
void print_stats();
bool cleanup_list_is_empty() {
return _cleanup_list.is_empty();
}
// accessor methods
// Accessor methods
uint parallel_marking_threads() const { return _parallel_marking_threads; }
uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; }
@ -542,7 +542,7 @@ protected:
// frequently.
HeapRegion* claim_region(uint worker_id);
// It determines whether we've run out of regions to scan.
// It determines whether we've run out of regions to scan
bool out_of_regions() { return _finger == _heap_end; }
// Returns the task with the given id
@ -816,7 +816,7 @@ public:
inline bool do_yield_check(uint worker_i = 0);
inline bool should_yield();
// Called to abort the marking cycle after a Full GC takes palce.
// Called to abort the marking cycle after a Full GC takes place.
void abort();
bool has_aborted() { return _has_aborted; }
@ -933,11 +933,11 @@ public:
// Similar to the above routine but there are times when we cannot
// safely calculate the size of obj due to races and we, therefore,
// pass the size in as a parameter. It is the caller's reponsibility
// pass the size in as a parameter. It is the caller's responsibility
// to ensure that the size passed in for obj is valid.
inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
// Unconditionally mark the given object, and unconditinally count
// Unconditionally mark the given object, and unconditionally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj, HeapRegion* hr);

View File

@ -105,7 +105,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
// will then correspond to a (non-existent) card that is also
// just beyond the heap.
if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
// end of region is not card aligned - incremement to cover
// end of region is not card aligned - increment to cover
// all the cards spanned by the region.
end_idx += 1;
}
@ -222,7 +222,7 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
return false;
}
// Unconditionally mark the given object, and unconditinally count
// Unconditionally mark the given object, and unconditionally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {

View File

@ -70,7 +70,7 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
bool bot_updates) {
// First we have to tedo the allocation, assuming we're holding the
// First we have to redo the allocation, assuming we're holding the
// appropriate lock, in case another thread changed the region while
// we were waiting to get the lock.
HeapWord* result = attempt_allocation(word_size, bot_updates);

View File

@ -448,7 +448,7 @@ HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
// Otherwise, find the block start using the table, but taking
// care (cf block_start_unsafe() above) not to parse any objects/blocks
// on the cards themsleves.
// on the cards themselves.
size_t index = _array->index_for(addr);
assert(_array->address_for_index(index) == addr,
"arg should be start of card");

View File

@ -169,7 +169,7 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
// We use the last address in hr as hr could be the
// last region in the heap. In which case trying to find
// the card for hr->end() will be an OOB accesss to the
// the card for hr->end() will be an OOB access to the
// card table.
HeapWord* last = hr->end() - 1;
assert(_g1h->g1_committed().contains(last),

View File

@ -1373,7 +1373,7 @@ public:
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
// in parallel by ParallelGCThreads worker threads with discinct worker
// in parallel by ParallelGCThreads worker threads with distinct worker
// ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
// calls will use the same "claim_value", and that that claim value is
// different from the claim_value of any heap region before the start of
@ -1518,7 +1518,7 @@ public:
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs
// are capped at the humongous thresold and we want to
// are capped at the humongous threshold and we want to
// ensure that we don't try to allocate a TLAB as
// humongous and that we don't allocate a humongous
// object in a TLAB.
@ -1648,24 +1648,24 @@ public:
// Optimized nmethod scanning support routines
// Register the given nmethod with the G1 heap
// Register the given nmethod with the G1 heap.
virtual void register_nmethod(nmethod* nm);
// Unregister the given nmethod from the G1 heap
// Unregister the given nmethod from the G1 heap.
virtual void unregister_nmethod(nmethod* nm);
// Migrate the nmethods in the code root lists of the regions
// in the collection set to regions in to-space. In the event
// of an evacuation failure, nmethods that reference objects
// that were not successfullly evacuated are not migrated.
// that were not successfully evacuated are not migrated.
void migrate_strong_code_roots();
// During an initial mark pause, mark all the code roots that
// point into regions *not* in the collection set.
void mark_strong_code_roots(uint worker_id);
// Rebuild the stong code root lists for each region
// after a full GC
// Rebuild the strong code root lists for each region
// after a full GC.
void rebuild_strong_code_roots();
// Delete entries for dead interned string and clean up unreferenced symbols

View File

@ -1075,7 +1075,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
}
_short_lived_surv_rate_group->start_adding_regions();
// do that for any other surv rate groupsx
// Do that for any other surv rate groups
if (update_stats) {
double cost_per_card_ms = 0.0;
@ -1741,7 +1741,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_cset_bytes_used_before += used_bytes;
// Cache the values we have added to the aggregated informtion
// Cache the values we have added to the aggregated information
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code

View File

@ -116,7 +116,7 @@ class TraceGen1TimeData : public CHeapObj<mtGC> {
// If only -XX:NewRatio is set we should use the specified ratio of the heap
// as both min and max. This will be interpreted as "fixed" just like the
// NewSize==MaxNewSize case above. But we will update the min and max
// everytime the heap size changes.
// every time the heap size changes.
//
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
@ -523,9 +523,9 @@ private:
// synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths;
// A concurrent refinement thread periodcially samples the young
// A concurrent refinement thread periodically samples the young
// region RSets and needs to update _inc_cset_recorded_rs_lengths as
// the RSets grow. Instead of having to syncronize updates to that
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_cset_recorded_rs_lengths_diffs;
@ -604,7 +604,7 @@ private:
// Calculate and return the maximum young list target length that
// can fit into the pause time goal. The parameters are: rs_lengths
// represent the prediction of how large the young RSet lengths will
// be, base_min_length is the alreay existing number of regions in
// be, base_min_length is the already existing number of regions in
// the young list, min_length and max_length are the desired min and
// max young list length according to the user's inputs.
uint calculate_young_list_target_length(size_t rs_lengths,

View File

@ -103,7 +103,7 @@ private:
// The data structure implemented is a circular queue.
// Head "points" to the most recent addition, tail to the oldest one.
// The array is of fixed size and I don't think we'll need more than
// two or three entries with the current behaviour of G1 pauses.
// two or three entries with the current behavior of G1 pauses.
// If the array is full, an easy fix is to look for the pauses with
// the shortest gap between them and consolidate them.
// For now, we have taken the expedient alternative of forgetting

View File

@ -176,7 +176,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
// fail. At the end of the GC, the orginal mark word values
// fail. At the end of the GC, the original mark word values
// (including hash values) are restored to the appropriate
// objects.
if (!VerifySilently) {

View File

@ -112,7 +112,7 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
// take_sample() only returns "used". When sampling was used, there
// were some anomolous values emitted which may have been the consequence
// of not updating all values simultaneously (i.e., see the calculation done
// in eden_space_used(), is it possbile that the values used to
// in eden_space_used(), is it possible that the values used to
// calculate either eden_used or survivor_used are being updated by
// the collector when the sample is being done?).
const bool sampled = false;
@ -135,7 +135,7 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
// Young collection set
// name "generation.0". This is logically the young generation.
// The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
// The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
// See _old_collection_counters for additional counters
_young_collection_counters = new G1YoungGenerationCounters(this, "young");
@ -254,7 +254,7 @@ void G1MonitoringSupport::update_sizes() {
eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
eden_counters()->update_used(eden_space_used());
// only the to survivor space (s1) is active, so we don't need to
// update the counteres for the from survivor space (s0)
// update the counters for the from survivor space (s0)
to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
to_counters()->update_used(survivor_space_used());
old_space_counters()->update_capacity(pad_capacity(old_space_committed()));

View File

@ -108,7 +108,7 @@ class G1CollectedHeap;
// is that all the above sizes need to be recalculated when the old
// gen changes capacity (after a GC or after a humongous allocation)
// but only the eden occupancy changes when a new eden region is
// allocated. So, in the latter case we have minimal recalcuation to
// allocated. So, in the latter case we have minimal recalculation to
// do which is important as we want to keep the eden region allocation
// path as low-overhead as possible.

View File

@ -177,7 +177,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the rembered sets after a full GC
// * rebuilding the remembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred

View File

@ -195,7 +195,7 @@ public:
HeapRegionRemSetIterator iter(hrrs);
size_t card_index;
// We claim cards in block so as to recude the contention. The block size is determined by
// We claim cards in block so as to reduce the contention. The block size is determined by
// the G1RSetScanBlockSize parameter.
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
@ -587,7 +587,7 @@ bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
// While we are processing RSet buffers during the collection, we
// actually don't want to scan any cards on the collection set,
// since we don't want to update remebered sets with entries that
// since we don't want to update remembered sets with entries that
// point into the collection set, given that live objects from the
// collection set are about to move and such entries will be stale
// very soon. This change also deals with a reliability issue which

View File

@ -1027,7 +1027,7 @@ void HeapRegion::verify(VerifyOption vo,
}
}
// Loook up end - 1
// Look up end - 1
HeapWord* addr_4 = the_end - 1;
HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
if (b_start_4 != p) {
@ -1111,7 +1111,7 @@ void G1OffsetTableContigSpace::set_saved_mark() {
// will be false, and it will pick up top() as the high water mark
// of region. If it does so after _gc_time_stamp = ..., then it
// will pick up the right saved_mark_word() as the high water mark
// of the region. Either way, the behaviour will be correct.
// of the region. Either way, the behavior will be correct.
ContiguousSpace::set_saved_mark();
OrderAccess::storestore();
_gc_time_stamp = curr_gc_time_stamp;

View File

@ -97,7 +97,7 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
public:
// Empty contructor, we'll initialize it with the initialize() method.
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
void initialize(HeapWord* bottom, HeapWord* end);

View File

@ -71,7 +71,7 @@ void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
assert(_lock->owned_by_self(), "Required.");
// We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
// we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
// we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
// have the same rank and we may get the "possible deadlock" message
_lock->unlock();
@ -151,7 +151,7 @@ void PtrQueue::handle_zero_index() {
// The current PtrQ may be the shared dirty card queue and
// may be being manipulated by more than one worker thread
// during a pause. Since the enqueuing of the completed
// during a pause. Since the enqueueing of the completed
// buffer unlocks the Shared_DirtyCardQ_lock more than one
// worker thread can 'race' on reading the shared queue attributes
// (_buf and _index) and multiple threads can call into this
@ -170,7 +170,7 @@ void PtrQueue::handle_zero_index() {
locking_enqueue_completed_buffer(buf); // enqueue completed buffer
// While the current thread was enqueuing the buffer another thread
// While the current thread was enqueueing the buffer another thread
// may have a allocated a new buffer and inserted it into this pointer
// queue. If that happens then we just return so that the current
// thread doesn't overwrite the buffer allocated by the other thread

View File

@ -144,7 +144,7 @@ public:
// Attempts to ensure that the given card_index in the given region is in
// the sparse table. If successful (because the card was already
// present, or because it was successfullly added) returns "true".
// present, or because it was successfully added) returns "true".
// Otherwise, returns "false" to indicate that the addition would
// overflow the entry for the region. The caller must transfer these
// entries to a larger-capacity representation.
@ -201,8 +201,7 @@ public:
bool has_next(size_t& card_index);
};
// Concurrent accesss to a SparsePRT must be serialized by some external
// mutex.
// Concurrent access to a SparsePRT must be serialized by some external mutex.
class SparsePRTIter;
class SparsePRTCleanupTask;
@ -248,7 +247,7 @@ public:
// Attempts to ensure that the given card_index in the given region is in
// the sparse table. If successful (because the card was already
// present, or because it was successfullly added) returns "true".
// present, or because it was successfully added) returns "true".
// Otherwise, returns "false" to indicate that the addition would
// overflow the entry for the region. The caller must transfer these
// entries to a larger-capacity representation.

View File

@ -154,7 +154,7 @@ bool ASParNewGeneration::resize_generation(size_t eden_size,
// There used to be this guarantee there.
// guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");
// Code below forces this requirement. In addition the desired eden
// size and disired survivor sizes are desired goals and may
// size and desired survivor sizes are desired goals and may
// exceed the total generation size.
assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),

View File

@ -213,7 +213,7 @@ process_chunk_boundaries(Space* sp,
&& sp->block_is_obj(first_block) // first block is an object
&& !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
|| oop(first_block)->is_typeArray())) {
// Find our least non-clean card, so that a left neighbour
// Find our least non-clean card, so that a left neighbor
// does not scan an object straddling the mutual boundary
// too far to the right, and attempt to scan a portion of
// that object twice.
@ -247,14 +247,14 @@ process_chunk_boundaries(Space* sp,
} NOISY(else {
tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
// In the future, we could have this thread look for a non-NULL value to copy from its
// right neighbour (up to the end of the first object).
// right neighbor (up to the end of the first object).
if (last_card_of_cur_chunk < last_card_of_first_obj) {
tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
" might be efficient to get value from right neighbour?");
}
})
} else {
// In this case we can help our neighbour by just asking them
// In this case we can help our neighbor by just asking them
// to stop at our first card (even though it may not be dirty).
NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");

View File

@ -1070,7 +1070,7 @@ void ParNewGeneration::collect(bool full,
size_policy->avg_survived()->sample(from()->used());
}
// We need to use a monotonically non-deccreasing time in ms
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@ -1402,7 +1402,7 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
#ifndef PRODUCT
// It's OK to call this multi-threaded; the worst thing
// that can happen is that we'll get a bunch of closely
// spaced simulated oveflows, but that's OK, in fact
// spaced simulated overflows, but that's OK, in fact
// probably good as it would exercise the overflow code
// under contention.
bool ParNewGeneration::should_simulate_overflow() {

View File

@ -118,8 +118,8 @@ size_t AdjoiningGenerations::reserved_byte_size() {
// Make checks on the current sizes of the generations and
// the contraints on the sizes of the generations. Push
// up the boundary within the contraints. A partial
// the constraints on the sizes of the generations. Push
// up the boundary within the constraints. A partial
// push can occur.
void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");

View File

@ -69,7 +69,7 @@ class AdjoiningGenerations : public CHeapObj<mtGC> {
// the available space and attempt to move the boundary if more space
// is needed. The growth is not guaranteed to occur.
void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes);
// Similary for a growth of the young generation.
// Similarly for a growth of the young generation.
void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size);
// Return the total byte size of the reserved space

View File

@ -65,7 +65,7 @@ class CheckForUnmarkedOops : public OopClosure {
}
};
// Checks all objects for the existance of some type of mark,
// Checks all objects for the existence of some type of mark,
// precise or imprecise, dirty or newgen.
class CheckForUnmarkedObjects : public ObjectClosure {
private:
@ -84,7 +84,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
}
// Card marks are not precise. The current system can leave us with
// a mismash of precise marks and beginning of object marks. This means
// a mismatch of precise marks and beginning of object marks. This means
// we test for missing precise marks first. If any are found, we don't
// fail unless the object head is also unmarked.
virtual void do_object(oop obj) {

View File

@ -202,12 +202,12 @@ void GCTaskQueue::enqueue(GCTaskQueue* list) {
list->print("list:");
}
if (list->is_empty()) {
// Enqueuing the empty list: nothing to do.
// Enqueueing the empty list: nothing to do.
return;
}
uint list_length = list->length();
if (is_empty()) {
// Enqueuing to empty list: just acquire elements.
// Enqueueing to empty list: just acquire elements.
set_insert_end(list->insert_end());
set_remove_end(list->remove_end());
set_length(list_length);

View File

@ -303,7 +303,7 @@ protected:
// load balancing (i.e., over partitioning). The last task to be
// executed by a GC thread in a job is a work stealing task. A
// GC thread that gets a work stealing task continues to execute
// that task until the job is done. In the static number of GC theads
// that task until the job is done. In the static number of GC threads
// case, tasks are added to a queue (FIFO). The work stealing tasks are
// the last to be added. Once the tasks are added, the GC threads grab
// a task and go. A single thread can do all the non-work stealing tasks

View File

@ -673,7 +673,7 @@ ParallelScavengeHeap* ParallelScavengeHeap::heap() {
// Before delegating the resize to the young generation,
// the reserved space for the young and old generations
// may be changed to accomodate the desired resize.
// may be changed to accommodate the desired resize.
void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
size_t survivor_size) {
if (UseAdaptiveGCBoundary) {
@ -690,7 +690,7 @@ void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
// Before delegating the resize to the old generation,
// the reserved space for the young and old generations
// may be changed to accomodate the desired resize.
// may be changed to accommodate the desired resize.
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
if (UseAdaptiveGCBoundary) {
if (size_policy()->bytes_absorbed_from_eden() != 0) {

View File

@ -45,7 +45,7 @@
// the do_it() method of a ThreadRootsMarkingTask is executed, it
// starts marking from the thread's roots.
//
// The enqueuing of the MarkFromRootsTask and ThreadRootsMarkingTask
// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask
// do little more than create the task and put it on a queue. The
// queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue.
//

View File

@ -482,7 +482,7 @@ void PSAdaptiveSizePolicy::compute_old_gen_free_space(
// adjust down the total heap size. Adjust down the larger of the
// generations.
// Add some checks for a threshhold for a change. For example,
// Add some checks for a threshold for a change. For example,
// a change less than the necessary alignment is probably not worth
// attempting.
@ -1161,7 +1161,7 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
// We use the tenuring threshold to equalize the cost of major
// and minor collections.
// ThresholdTolerance is used to indicate how sensitive the
// tenuring threshold is to differences in cost betweent the
// tenuring threshold is to differences in cost between the
// collection types.
// Get the times of interest. This involves a little work, so

View File

@ -37,7 +37,7 @@
//
// It also computes an optimal tenuring threshold between the young
// and old generations, so as to equalize the cost of collections
// of those generations, as well as optimial survivor space sizes
// of those generations, as well as optimal survivor space sizes
// for the young generation.
//
// While this class is specifically intended for a generational system
@ -113,7 +113,7 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// Changing the generation sizing depends on the data that is
// gathered about the effects of changes on the pause times and
// throughput. These variable count the number of data points
// gathered. The policy may use these counters as a threshhold
// gathered. The policy may use these counters as a threshold
// for reliable data.
julong _young_gen_change_for_major_pause_count;

View File

@ -195,7 +195,7 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
// Update all the counters that can be updated from the size policy.
// This should be called after all policy changes have been made
// and reflected internall in the size policy.
// and reflected internally in the size policy.
void update_counters_from_policy();
// Update counters that can be updated from fields internal to the

View File

@ -661,7 +661,7 @@ void PSMarkSweep::mark_sweep_phase4() {
}
jlong PSMarkSweep::millis_since_last_gc() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
jlong ret_val = now - _time_of_last_gc;
@ -674,7 +674,7 @@ jlong PSMarkSweep::millis_since_last_gc() {
}
void PSMarkSweep::reset_millis_since_last_gc() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
_time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
}

View File

@ -280,7 +280,7 @@ bool PSOldGen::expand_by(size_t bytes) {
"Should be true before post_resize()");
MemRegion mangle_region(object_space()->end(), virtual_space_high);
// Note that the object space has not yet been updated to
// coincede with the new underlying virtual space.
// coincide with the new underlying virtual space.
SpaceMangler::mangle_region(mangle_region);
}
post_resize();

View File

@ -187,7 +187,7 @@ class PSOldGen : public CHeapObj<mtGC> {
void space_invariants() PRODUCT_RETURN;
// Performace Counter support
// Performance Counter support
void update_counters();
// Printing support

View File

@ -2176,7 +2176,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->resize_all_tlabs();
// Resize the metaspace capactiy after a collection
// Resize the metaspace capacity after a collection
MetaspaceGC::compute_new_size();
if (TraceGen1Time) accumulated_time()->stop();
@ -3285,7 +3285,7 @@ PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
}
jlong PSParallelCompact::millis_since_last_gc() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
jlong ret_val = now - _time_of_last_gc;
@ -3298,7 +3298,7 @@ jlong PSParallelCompact::millis_since_last_gc() {
}
void PSParallelCompact::reset_millis_since_last_gc() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
_time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
}

View File

@ -877,7 +877,7 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
// The summary phase calculates the total live data to the left of each region
// XXX. Based on that total and the bottom of the space, it can calculate the
// starting location of the live data in XXX. The summary phase calculates for
// each region XXX quantites such as
// each region XXX quantities such as
//
// - the amount of live data at the beginning of a region from an object
// entering the region.

View File

@ -78,7 +78,7 @@ class PSPromotionLAB : public CHeapObj<mtGC> {
// Returns a subregion containing all objects in this space.
MemRegion used_region() { return MemRegion(bottom(), top()); }
// Boolean querries.
// Boolean queries.
bool is_empty() const { return used() == 0; }
bool not_empty() const { return used() > 0; }
bool contains(const void* p) const { return _bottom <= p && p < _end; }

View File

@ -558,7 +558,7 @@ bool PSScavenge::invoke_no_policy() {
((gc_cause != GCCause::_java_lang_system_gc) ||
UseAdaptiveSizePolicyWithSystemGC)) {
// Calculate optimial free space amounts
// Calculate optimal free space amounts
assert(young_gen->max_size() >
young_gen->from_space()->capacity_in_bytes() +
young_gen->to_space()->capacity_in_bytes(),

View File

@ -35,7 +35,7 @@
class PSVirtualSpace : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
// The space is committed/uncommited in chunks of size _alignment. The
// The space is committed/uncommitted in chunks of size _alignment. The
// ReservedSpace passed to initialize() must be aligned to this value.
const size_t _alignment;

View File

@ -136,7 +136,7 @@ void PSYoungGen::initialize_work() {
// generation - the less space committed, the smaller the survivor
// space, possibly as small as an alignment. However, we are interested
// in the case where the young generation is 100% committed, as this
// is the point where eden reachs its maximum size. At this point,
// is the point where eden reaches its maximum size. At this point,
// the size of a survivor space is max_survivor_size.
max_eden_size = size - 2 * max_survivor_size;
}
@ -288,7 +288,7 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// There used to be this guarantee there.
// guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");
// Code below forces this requirement. In addition the desired eden
// size and disired survivor sizes are desired goals and may
// size and desired survivor sizes are desired goals and may
// exceed the total generation size.
assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");

View File

@ -121,7 +121,7 @@ int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
// Choose a number of GC threads based on the current size
// of the heap. This may be complicated because the size of
// the heap depends on factors such as the thoughput goal.
// the heap depends on factors such as the throughput goal.
// Still a large heap should be collected by more GC threads.
active_workers_by_heap_size =
MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
@ -445,7 +445,7 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
// the longer term behavior.
const size_t live_in_eden =
MIN2(eden_live, (size_t) avg_eden_live()->average());
const size_t free_in_eden = max_eden_size > live_in_eden ?

View File

@ -74,7 +74,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
};
// Goal for the fraction of the total time during which application
// threads run.
// threads run
const double _throughput_goal;
// Last calculated sizes, in bytes, and aligned
@ -83,21 +83,21 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
size_t _survivor_size; // calculated survivor size in bytes
// This is a hint for the heap: we've detected that gc times
// This is a hint for the heap: we've detected that GC times
// are taking longer than GCTimeLimit allows.
bool _gc_overhead_limit_exceeded;
// Use for diagnostics only. If UseGCOverheadLimit is false,
// this variable is still set.
bool _print_gc_overhead_limit_would_be_exceeded;
// Count of consecutive GC that have exceeded the
// GC time limit criterion.
// GC time limit criterion
uint _gc_overhead_limit_count;
// This flag signals that GCTimeLimit is being exceeded
// but may not have done so for the required number of consequetive
// collections.
// but may not have done so for the required number of consecutive
// collections
// Minor collection timers used to determine both
// pause and interval times for collections.
// pause and interval times for collections
static elapsedTimer _minor_timer;
// Major collection timers, used to determine both
@ -120,7 +120,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// Statistics for survivor space calculation for young generation
AdaptivePaddedAverage* _avg_survived;
// Objects that have been directly allocated in the old generation.
// Objects that have been directly allocated in the old generation
AdaptivePaddedNoZeroDevAverage* _avg_pretenured;
// Variable for estimating the major and minor pause times.
@ -142,33 +142,33 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// for making ergonomic decisions.
double _latest_minor_mutator_interval_seconds;
// Allowed difference between major and minor gc times, used
// for computing tenuring_threshold.
// Allowed difference between major and minor GC times, used
// for computing tenuring_threshold
const double _threshold_tolerance_percent;
const double _gc_pause_goal_sec; // goal for maximum gc pause
const double _gc_pause_goal_sec; // Goal for maximum GC pause
// Flag indicating that the adaptive policy is ready to use
bool _young_gen_policy_is_ready;
// decrease/increase the young generation for minor pause time
// Decrease/increase the young generation for minor pause time
int _change_young_gen_for_min_pauses;
// decrease/increase the old generation for major pause time
// Decrease/increase the old generation for major pause time
int _change_old_gen_for_maj_pauses;
// change old geneneration for throughput
// change old generation for throughput
int _change_old_gen_for_throughput;
// change young generation for throughput
int _change_young_gen_for_throughput;
// Flag indicating that the policy would
// increase the tenuring threshold because of the total major gc cost
// is greater than the total minor gc cost
// increase the tenuring threshold because of the total major GC cost
// is greater than the total minor GC cost
bool _increment_tenuring_threshold_for_gc_cost;
// decrease the tenuring threshold because of the the total minor gc
// cost is greater than the total major gc cost
// decrease the tenuring threshold because of the the total minor GC
// cost is greater than the total major GC cost
bool _decrement_tenuring_threshold_for_gc_cost;
// decrease due to survivor size limit
bool _decrement_tenuring_threshold_for_survivor_limit;
@ -182,7 +182,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// Changing the generation sizing depends on the data that is
// gathered about the effects of changes on the pause times and
// throughput. These variable count the number of data points
// gathered. The policy may use these counters as a threshhold
// gathered. The policy may use these counters as a threshold
// for reliable data.
julong _young_gen_change_for_minor_throughput;
julong _old_gen_change_for_major_throughput;
@ -225,7 +225,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// larger than 1.0 if just the sum of the minor cost the
// the major cost is used. Worse than that is the
// fact that the minor cost and the major cost each
// tend toward 1.0 in the extreme of high gc costs.
// tend toward 1.0 in the extreme of high GC costs.
// Limit the value of gc_cost to 1.0 so that the mutator
// cost stays non-negative.
virtual double gc_cost() const {
@ -238,23 +238,23 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
virtual double time_since_major_gc() const;
// Average interval between major collections to be used
// in calculating the decaying major gc cost. An overestimate
// in calculating the decaying major GC cost. An overestimate
// of this time would be a conservative estimate because
// this time is used to decide if the major GC cost
// should be decayed (i.e., if the time since the last
// major gc is long compared to the time returned here,
// major GC is long compared to the time returned here,
// then the major GC cost will be decayed). See the
// implementations for the specifics.
virtual double major_gc_interval_average_for_decay() const {
return _avg_major_interval->average();
}
// Return the cost of the GC where the major gc cost
// Return the cost of the GC where the major GC cost
// has been decayed based on the time since the last
// major collection.
double decaying_gc_cost() const;
// Decay the major gc cost. Use this only for decisions on
// Decay the major GC cost. Use this only for decisions on
// whether to adjust, not to determine by how much to adjust.
// This approximation is crude and may not be good enough for the
// latter.

View File

@ -49,11 +49,11 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// estimates.
AdaptivePaddedAverage _demand_rate_estimate;
ssize_t _desired; // Demand stimate computed as described above
ssize_t _desired; // Demand estimate computed as described above
ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing
ssize_t _surplus; // count - (desired +/- small-percent),
// used to tune splitting in best fit
ssize_t _surplus; // count - (desired +/- small-percent),
// used to tune splitting in best fit
ssize_t _bfr_surp; // surplus at start of current sweep
ssize_t _prev_sweep; // count from end of previous sweep
ssize_t _before_sweep; // count from before current sweep

View File

@ -54,7 +54,7 @@ void ConcurrentGCThread::safepoint_desynchronize() {
void ConcurrentGCThread::create_and_start() {
if (os::create_thread(this, os::cgc_thread)) {
// XXX: need to set this to low priority
// unless "agressive mode" set; priority
// unless "aggressive mode" set; priority
// should be just less than that of VMThread.
os::set_priority(this, NearMaxPriority);
if (!_should_terminate && !DisableStartThread) {

View File

@ -159,7 +159,7 @@ double LinearLeastSquareFit::y(double x) {
// that no calculation of the slope has yet been done. Returning true
// for a slope equal to 0 reflects the intuitive expectation of the
// dependence on the slope. Don't use the complement of these functions
// since that untuitive expectation is not built into the complement.
// since that intuitive expectation is not built into the complement.
bool LinearLeastSquareFit::decrement_will_decrease() {
return (_slope >= 0.00);
}

View File

@ -210,7 +210,7 @@ class LinearLeastSquareFit : public CHeapObj<mtGC> {
double y(double x);
double slope() { return _slope; }
// Methods to decide if a change in the dependent variable will
// achive a desired goal. Note that these methods are not
// achieve a desired goal. Note that these methods are not
// complementary and both are needed.
bool decrement_will_decrease();
bool increment_will_decrease();

View File

@ -72,7 +72,7 @@ void MutableNUMASpace::check_mangled_unused_area_complete() {
#endif // NOT_PRODUCT
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
// that should be filled with dead objects to ensure parsability.
void MutableNUMASpace::ensure_parsability() {
for (int i = 0; i < lgrp_spaces()->length(); i++) {
LGRPSpace *ls = lgrp_spaces()->at(i);
@ -880,8 +880,8 @@ void MutableNUMASpace::print_on(outputStream* st) const {
}
void MutableNUMASpace::verify() {
// This can be called after setting an arbitary value to the space's top,
// so an object can cross the chunk boundary. We ensure the parsablity
// This can be called after setting an arbitrary value to the space's top,
// so an object can cross the chunk boundary. We ensure the parsability
// of the space and just walk the objects in linear fashion.
ensure_parsability();
MutableSpace::verify();

View File

@ -31,7 +31,7 @@
// A MutableSpace is a subtype of ImmutableSpace that supports the
// concept of allocation. This includes the concepts that a space may
// be only partially full, and the querry methods that go with such
// be only partially full, and the query methods that go with such
// an assumption. MutableSpace is also responsible for minimizing the
// page allocation time by having the memory pretouched (with
// AlwaysPretouch) and for optimizing page placement on NUMA systems
@ -111,7 +111,7 @@ class MutableSpace: public ImmutableSpace {
virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
// Boolean querries.
// Boolean queries.
bool is_empty() const { return used_in_words() == 0; }
bool not_empty() const { return used_in_words() > 0; }
bool contains(const void* p) const { return _bottom <= p && p < _end; }

View File

@ -152,7 +152,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
// The buffer comes with its own BOT, with a shared (obviously) underlying
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
// as we would for any contiguous space. However, on accasion we
// as we would for any contiguous space. However, on occasion we
// need to do some buffer surgery at the extremities before we
// start using the body of the buffer for allocations. Such surgery
// (as explained elsewhere) is to prevent allocation on a card that

View File

@ -92,7 +92,7 @@ public:
}
// The total (word) size of the buffer, including both allocated and
// unallocted space.
// unallocated space.
size_t word_sz() { return _word_sz; }
// Should only be done if we are about to reset with a new buffer of the

View File

@ -75,7 +75,7 @@ class SpaceMangler: public CHeapObj<mtGC> {
// High water mark for allocations. Typically, the space above
// this point have been mangle previously and don't need to be
// touched again. Space belows this point has been allocated
// touched again. Space below this point has been allocated
// and remangling is needed between the current top and this
// high water mark.
HeapWord* _top_for_allocations;

View File

@ -82,7 +82,7 @@ void VM_GC_Operation::release_and_notify_pending_list_lock() {
// Allocations may fail in several threads at about the same time,
// resulting in multiple gc requests. We only want to do one of them.
// In case a GC locker is active and the need for a GC is already signalled,
// In case a GC locker is active and the need for a GC is already signaled,
// we want to skip this GC attempt altogether, without doing a futile
// safepoint operation.
bool VM_GC_Operation::skip_operation() const {

View File

@ -31,7 +31,7 @@
// This class exposes implementation details of the various
// collector(s), and we need to be very careful with it. If
// use of this class grows, we should split it into public
// and implemenation-private "causes".
// and implementation-private "causes".
//
class GCCause : public AllStatic {

View File

@ -576,8 +576,8 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; }
bool allocated_on_arena() const { return get_allocation_type() == ARENA; }
ResourceObj(); // default construtor
ResourceObj(const ResourceObj& r); // default copy construtor
ResourceObj(); // default constructor
ResourceObj(const ResourceObj& r); // default copy constructor
ResourceObj& operator=(const ResourceObj& r); // default copy assignment
~ResourceObj();
#endif // ASSERT

View File

@ -124,7 +124,7 @@ public:
virtual bool has_read_region_opt() = 0;
virtual bool has_write_region_opt() = 0;
// These operations should assert false unless the correponding operation
// These operations should assert false unless the corresponding operation
// above returns true. Otherwise, they should perform an appropriate
// barrier for an array whose elements are all in the given memory region.
virtual void read_ref_array(MemRegion mr) = 0;
@ -165,7 +165,7 @@ public:
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. The constructor is
// passed the maximum number of independently committable subregions to
// be covered, and the "resize_covoered_region" function allows the
// be covered, and the "resize_covered_region" function allows the
// sub-parts of the heap to inform the barrier set of changes of their
// sizes.
BarrierSet(int max_covered_regions) :

View File

@ -56,7 +56,7 @@ TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chu
template <class Chunk_t, template <class> class FreeList_t>
void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
if (prev() != NULL) { // interior list node shouldn'r have tree fields
if (prev() != NULL) { // interior list node shouldn't have tree fields
guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
embedded_list()->right() == NULL, "should be clear");
}
@ -247,7 +247,7 @@ TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_repla
prevFC->link_after(nextTC);
}
// Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
// Below this point the embedded TreeList<Chunk_t, FreeList_t> being used for the
// tree node may have changed. Don't use "this"
// TreeList<Chunk_t, FreeList_t>*.
// chunk should still be a free chunk (bit set in _prev)
@ -703,7 +703,7 @@ TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove
// The only use of this method would not pass the root of the
// tree (as indicated by the assertion above that the tree list
// has a parent) but the specification does not explicitly exclude the
// passing of the root so accomodate it.
// passing of the root so accommodate it.
set_root(NULL);
}
debug_only(

View File

@ -322,7 +322,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
void set_tree_hints(void);
// Reset statistics for all the lists in the tree.
void clear_tree_census(void);
// Print the statistcis for all the lists in the tree. Also may
// Print the statistics for all the lists in the tree. Also may
// print out summaries.
void print_dict_census(void) const;
void print_free_lists(outputStream* st) const;

View File

@ -590,7 +590,7 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
// Otherwise, find the block start using the table, but taking
// care (cf block_start_unsafe() above) not to parse any objects/blocks
// on the cards themsleves.
// on the cards themselves.
size_t index = _array->index_for(addr);
assert(_array->address_for_index(index) == addr,
"arg should be start of card");

View File

@ -424,7 +424,7 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
BlockOffsetArray(array, mr, false),
_unallocated_block(_bottom) { }
// accessor
// Accessor
HeapWord* unallocated_block() const {
assert(BlockOffsetArrayUseUnallocatedBlock,
"_unallocated_block is not being maintained");

View File

@ -98,7 +98,7 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
"card marking array");
}
// The assember store_check code will do an unsigned shift of the oop,
// The assembler store_check code will do an unsigned shift of the oop,
// then add it to byte_map_base, i.e.
//
// _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
@ -243,7 +243,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
if (new_region.word_size() != old_region.word_size()) {
// Commit new or uncommit old pages, if necessary.
MemRegion cur_committed = _committed[ind];
// Extend the end of this _commited region
// Extend the end of this _committed region
// to cover the end of any lower _committed regions.
// This forms overlapping regions, but never interior regions.
HeapWord* const max_prev_end = largest_prev_committed_end(ind);
@ -448,7 +448,7 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
// off parallelism is used, then active_workers can be used in
// place of n_par_threads.
// This is an example of a path where n_par_threads is
// set to 0 to turn off parallism.
// set to 0 to turn off parallelism.
// [7] CardTableModRefBS::non_clean_card_iterate()
// [8] CardTableRS::younger_refs_in_space_iterate()
// [9] Generation::younger_refs_in_space_iterate()

View File

@ -590,7 +590,7 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
// Then, the case analysis above reveals that, in the worst case,
// any such stale card will be scanned unnecessarily at most twice.
//
// It is nonethelss advisable to try and get rid of some of this
// It is nonetheless advisable to try and get rid of some of this
// redundant work in a subsequent (low priority) re-design of
// the card-scanning code, if only to simplify the underlying
// state machine analysis/proof. ysr 1/28/2002. XXX

View File

@ -45,7 +45,7 @@
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#endif // INCLUDE_ALL_GCS
// CollectorPolicy methods.
// CollectorPolicy methods
CollectorPolicy::CollectorPolicy() :
_space_alignment(0),
@ -185,7 +185,7 @@ size_t CollectorPolicy::compute_heap_alignment() {
// other collectors should also be updated to do their own alignment and then
// this use of lcm() should be removed.
if (UseLargePages && !UseParallelGC) {
// in presence of large pages we have to make sure that our
// In presence of large pages we have to make sure that our
// alignment is large page aware
alignment = lcm(os::large_page_size(), alignment);
}
@ -193,7 +193,7 @@ size_t CollectorPolicy::compute_heap_alignment() {
return alignment;
}
// GenCollectorPolicy methods.
// GenCollectorPolicy methods
GenCollectorPolicy::GenCollectorPolicy() :
_min_gen0_size(0),
@ -375,10 +375,10 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
_initial_heap_byte_size = InitialHeapSize;
}
// adjust max heap size if necessary
// Adjust NewSize and OldSize or MaxHeapSize to match each other
if (NewSize + OldSize > MaxHeapSize) {
if (_max_heap_size_cmdline) {
// somebody set a maximum heap size with the intention that we should not
// Somebody has set a maximum heap size with the intention that we should not
// exceed it. Adjust New/OldSize as necessary.
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
@ -439,9 +439,8 @@ void GenCollectorPolicy::initialize_size_info() {
// minimum gen0 sizes.
if (_max_heap_byte_size == _min_heap_byte_size) {
// The maximum and minimum heap sizes are the same so
// the generations minimum and initial must be the
// same as its maximum.
// The maximum and minimum heap sizes are the same so the generations
// minimum and initial must be the same as its maximum.
_min_gen0_size = max_new_size;
_initial_gen0_size = max_new_size;
_max_gen0_size = max_new_size;
@ -463,8 +462,7 @@ void GenCollectorPolicy::initialize_size_info() {
// For the case where NewSize is the default, use NewRatio
// to size the minimum and initial generation sizes.
// Use the default NewSize as the floor for these values. If
// NewRatio is overly large, the resulting sizes can be too
// small.
// NewRatio is overly large, the resulting sizes can be too small.
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
desired_new_size =
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
@ -483,8 +481,7 @@ void GenCollectorPolicy::initialize_size_info() {
_max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
// At this point all three sizes have been checked against the
// maximum sizes but have not been checked for consistency
// among the three.
// maximum sizes but have not been checked for consistency among the three.
// Final check min <= initial <= max
_min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
@ -492,7 +489,7 @@ void GenCollectorPolicy::initialize_size_info() {
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
}
// Write back to flags if necessary
// Write back to flags if necessary.
if (NewSize != _initial_gen0_size) {
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
}
@ -538,7 +535,7 @@ bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
}
// Minimum sizes of the generations may be different than
// the initial sizes. An inconsistently is permitted here
// the initial sizes. An inconsistency is permitted here
// in the total size that can be specified explicitly by
// command line specification of OldSize and NewSize and
// also a command line specification of -Xms. Issue a warning
@ -550,12 +547,12 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// At this point the minimum, initial and maximum sizes
// of the overall heap and of gen0 have been determined.
// The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since no explicit flags exits
// and maximum heap size since no explicit flags exist
// for setting the gen1 maximum.
_max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
// If no explicit command line flag has been set for the
// gen1 size, use what is left for gen1.
// gen1 size, use what is left for gen1
if (!FLAG_IS_CMDLINE(OldSize)) {
// The user has not specified any value but the ergonomics
// may have chosen a value (which may or may not be consistent
@ -567,14 +564,14 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// _max_gen1_size has already been made consistent above
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
} else {
// It's been explicitly set on the command line. Use the
// OldSize has been explicitly set on the command line. Use the
// OldSize and then determine the consequences.
_min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
_initial_gen1_size = OldSize;
// If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning.
// The generation minimums and the overall heap mimimum should
// The generation minimums and the overall heap minimum should
// be within one generation alignment.
if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
warning("Inconsistency between minimum heap size and minimum "
@ -596,7 +593,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
}
// Initial size
// The same as above for the old gen initial size.
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
_initial_heap_byte_size)) {
if (PrintGCDetails && Verbose) {
@ -606,10 +603,10 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
}
}
}
// Enforce the maximum gen1 size.
_min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
// Check that min gen1 <= initial gen1 <= max gen1
// Make sure that min gen1 <= initial gen1 <= max gen1.
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
@ -650,10 +647,9 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
HeapWord* result = NULL;
// Loop until the allocation is satisified,
// or unsatisfied after GC.
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // discard any handles allocated in each iteration
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
Generation *gen0 = gch->get_gen(0);
@ -666,7 +662,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result;
}
}
unsigned int gc_count_before; // read inside the Heap_lock locked region
unsigned int gc_count_before; // Read inside the Heap_lock locked region.
{
MutexLocker ml(Heap_lock);
if (PrintGC && Verbose) {
@ -685,19 +681,19 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
if (GC_locker::is_active_and_needs_gc()) {
if (is_tlab) {
return NULL; // Caller will retry allocating individual object
return NULL; // Caller will retry allocating individual object.
}
if (!gch->is_maximal_no_gc()) {
// Try and expand heap to satisfy request
// Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab);
// result could be null if we are out of space
// Result could be null if we are out of space.
if (result != NULL) {
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return NULL; // we didn't get to do a GC and we didn't get any memory
return NULL; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall
@ -732,7 +728,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
result = op.result();
if (op.gc_locked()) {
assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary
continue; // Retry and/or stall as necessary.
}
// Allocation has failed and a collection
@ -793,7 +789,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
if (!gch->is_maximal_no_gc()) {
result = expand_heap_and_allocate(size, is_tlab);
}
return result; // could be null if we are out of space
return result; // Could be null if we are out of space.
} else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection.
gch->do_collection(false /* full */,
@ -915,10 +911,8 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
GCCause::_metadata_GC_threshold);
VMThread::execute(&op);
// If GC was locked out, try again. Check
// before checking success because the prologue
// could have succeeded and the GC still have
// been locked out.
// If GC was locked out, try again. Check before checking success because the
// prologue could have succeeded and the GC still have been locked out.
if (op.gc_locked()) {
continue;
}
@ -979,7 +973,7 @@ void MarkSweepPolicy::initialize_generations() {
}
void MarkSweepPolicy::initialize_gc_policy_counters() {
// initialize the policy counters - 2 collectors, 3 generations
// Initialize the policy counters - 2 collectors, 3 generations.
if (UseParNewGC) {
_gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
} else {

View File

@ -76,10 +76,10 @@ class CollectorPolicy : public CHeapObj<mtGC> {
size_t _heap_alignment;
// Needed to keep information if MaxHeapSize was set on the command line
// when the flag value is aligned etc by ergonomics
// when the flag value is aligned etc by ergonomics.
bool _max_heap_size_cmdline;
// The sizing of the heap are controlled by a sizing policy.
// The sizing of the heap is controlled by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Set to true when policy wants soft refs cleared.
@ -102,7 +102,7 @@ class CollectorPolicy : public CHeapObj<mtGC> {
initialize_size_info();
}
// Return maximum heap alignment that may be imposed by the policy
// Return maximum heap alignment that may be imposed by the policy.
static size_t compute_heap_alignment();
size_t space_alignment() { return _space_alignment; }
@ -180,7 +180,7 @@ class CollectorPolicy : public CHeapObj<mtGC> {
size_t size,
Metaspace::MetadataType mdtype);
// Performace Counter support
// Performance Counter support
GCPolicyCounters* counters() { return _gc_policy_counters; }
// Create the jstat counters for the GC policy. By default, policy's
@ -231,9 +231,8 @@ class GenCollectorPolicy : public CollectorPolicy {
GenerationSpec **_generations;
// Return true if an allocation should be attempted in the older
// generation if it fails in the younger generation. Return
// false, otherwise.
// Return true if an allocation should be attempted in the older generation
// if it fails in the younger generation. Return false, otherwise.
virtual bool should_try_older_generation_allocation(size_t word_size) const;
void initialize_flags();
@ -245,7 +244,7 @@ class GenCollectorPolicy : public CollectorPolicy {
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
// Compute max heap alignment
// Compute max heap alignment.
size_t compute_max_alignment();
// Scale the base_size by NewRatio according to
@ -253,7 +252,7 @@ class GenCollectorPolicy : public CollectorPolicy {
// and align by min_alignment()
size_t scale_by_NewRatio_aligned(size_t base_size);
// Bound the value by the given maximum minus the min_alignment
// Bound the value by the given maximum minus the min_alignment.
size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
public:

View File

@ -126,7 +126,7 @@ jint GenCollectedHeap::initialize() {
(HeapWord*)(heap_rs.base() + heap_rs.size()));
// It is important to do this in a way such that concurrent readers can't
// temporarily think somethings in the heap. (Seen this happen in asserts.)
// temporarily think something is in the heap. (Seen this happen in asserts.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base());
size_t actual_heap_size = heap_rs.size();
@ -1262,7 +1262,7 @@ class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
};
jlong GenCollectedHeap::millis_since_last_gc() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
GenTimeOfLastGCClosure tolgc_cl(now);

View File

@ -315,7 +315,7 @@ public:
}
// Update the gc statistics for each generation.
// "level" is the level of the lastest collection
// "level" is the level of the latest collection.
void update_gc_stats(int current_level, bool full) {
for (int i = 0; i < _n_gens; i++) {
_gens[i]->update_gc_stats(current_level, full);

View File

@ -148,8 +148,8 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
Universe::update_heap_info_at_gc();
// Update time of last gc for all generations we collected
// (which curently is all the generations in the heap).
// We need to use a monotonically non-deccreasing time in ms
// (which currently is all the generations in the heap).
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;

View File

@ -27,7 +27,7 @@
#include "oops/oop.hpp"
// A GenRemSet provides ways of iterating over pointers accross generations.
// A GenRemSet provides ways of iterating over pointers across generations.
// (This is especially useful for older-to-younger.)
class Generation;
@ -58,7 +58,7 @@ public:
// These are for dynamic downcasts. Unfortunately that it names the
// possible subtypes (but not that they are subtypes!) Return NULL if
// the cast is invalide.
// the cast is invalid.
virtual CardTableRS* as_CardTableRS() { return NULL; }
// Return the barrier set associated with "this."

View File

@ -289,7 +289,7 @@ class Generation: public CHeapObj<mtGC> {
// These functions return the addresses of the fields that define the
// boundaries of the contiguous allocation area. (These fields should be
// physicall near to one another.)
// physically near to one another.)
virtual HeapWord** top_addr() const { return NULL; }
virtual HeapWord** end_addr() const { return NULL; }
@ -485,7 +485,7 @@ class Generation: public CHeapObj<mtGC> {
// General signature...
virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
// ...and specializations for de-virtualization. (The general
// implemention of the _nv versions call the virtual version.
// implementation of the _nv versions call the virtual version.
// Note that the _nv suffix is not really semantically necessary,
// but it avoids some not-so-useful warnings on Solaris.)
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \

View File

@ -183,7 +183,7 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satify request from freelist
// First check if we can satisfy request from freelist
debug_only(verify());
HeapBlock* block = search_freelist(number_of_segments, is_critical);
debug_only(if (VerifyCodeCacheOften) verify());
@ -372,7 +372,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
}
// Scan for right place to put into list. List
// is sorted by increasing addresseses
// is sorted by increasing addresses
FreeBlock* prev = NULL;
FreeBlock* cur = _freelist;
while(cur != NULL && cur < b) {

View File

@ -127,8 +127,8 @@ class CodeHeap : public CHeapObj<mtCode> {
// Heap extents
bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
void release(); // releases all allocated memory
bool expand_by(size_t size); // expands commited memory by size
void shrink_by(size_t size); // shrinks commited memory by size
bool expand_by(size_t size); // expands committed memory by size
void shrink_by(size_t size); // shrinks committed memory by size
void clear(); // clears all heap contents
// Memory allocation

View File

@ -347,7 +347,7 @@ class KlassInfoHisto : public StackObj {
#endif // INCLUDE_SERVICES
// These declarations are needed since teh declaration of KlassInfoTable and
// These declarations are needed since the declaration of KlassInfoTable and
// KlassInfoClosure are guarded by #if INLCUDE_SERVICES
class KlassInfoTable;
class KlassInfoClosure;

View File

@ -2399,7 +2399,7 @@ bool SpaceManager::contains(const void *ptr) {
void SpaceManager::verify() {
// If there are blocks in the dictionary, then
// verfication of chunks does not work since
// verification of chunks does not work since
// being in the dictionary alters a chunk.
if (block_freelists()->total_size() == 0) {
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@ -2868,7 +2868,7 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address
uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
// If compressed class space fits in lower 32G, we don't need a base.
if (higher_address <= (address)klass_encoding_max) {
lower_base = 0; // effectively lower base is zero.
lower_base = 0; // Effectively lower base is zero.
}
}

View File

@ -487,7 +487,7 @@ void VM_PopulateDumpSharedSpace::doit() {
NOT_PRODUCT(SystemDictionary::verify();)
// Copy the the symbol table, and the system dictionary to the shared
// space in usable form. Copy the hastable
// space in usable form. Copy the hashtable
// buckets first [read-write], then copy the linked lists of entries
// [read-only].
@ -953,7 +953,7 @@ void MetaspaceShared::initialize_shared_spaces() {
// The following data in the shared misc data region are the linked
// list elements (HashtableEntry objects) for the symbol table, string
// table, and shared dictionary. The heap objects refered to by the
// table, and shared dictionary. The heap objects referred to by the
// symbol table, string table, and shared dictionary are permanent and
// unmovable. Since new entries added to the string and symbol tables
// are always added at the beginning of the linked lists, THESE LINKED

View File

@ -72,7 +72,7 @@ public:
bool has_read_region_opt() { return false; }
// These operations should assert false unless the correponding operation
// These operations should assert false unless the corresponding operation
// above returns true.
void read_ref_array(MemRegion mr) {
assert(false, "can't call");

View File

@ -45,7 +45,7 @@ void referenceProcessor_init() {
}
void ReferenceProcessor::init_statics() {
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@ -152,7 +152,7 @@ void ReferenceProcessor::update_soft_ref_master_clock() {
// Update (advance) the soft ref master clock field. This must be done
// after processing the soft ref list.
// We need a monotonically non-deccreasing time in ms but
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
@ -168,7 +168,7 @@ void ReferenceProcessor::update_soft_ref_master_clock() {
// javaTimeNanos(), which is guaranteed to be monotonically
// non-decreasing provided the underlying platform provides such
// a time source (and it is bug free).
// In product mode, however, protect ourselves from non-monotonicty.
// In product mode, however, protect ourselves from non-monotonicity.
if (now > _soft_ref_timestamp_clock) {
_soft_ref_timestamp_clock = now;
java_lang_ref_SoftReference::set_clock(now);
@ -349,7 +349,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
oop obj = NULL;
oop next_d = refs_list.head();
if (pending_list_uses_discovered_field()) { // New behaviour
if (pending_list_uses_discovered_field()) { // New behavior
// Walk down the list, self-looping the next field
// so that the References are not considered active.
while (obj != next_d) {
@ -366,7 +366,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
// Post-barrier not needed when looping to self.
java_lang_ref_Reference::set_next_raw(obj, obj);
if (next_d == obj) { // obj is last
// Swap refs_list into pendling_list_addr and
// Swap refs_list into pending_list_addr and
// set obj's discovered to what we read from pending_list_addr.
oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
// Need post-barrier on pending_list_addr above;
@ -376,7 +376,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
}
}
} else { // Old behaviour
} else { // Old behavior
// Walk down the list, copying the discovered field into
// the next field and clearing the discovered field.
while (obj != next_d) {
@ -390,7 +390,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
assert(java_lang_ref_Reference::next(obj) == NULL,
"The reference should not be enqueued");
if (next_d == obj) { // obj is last
// Swap refs_list into pendling_list_addr and
// Swap refs_list into pending_list_addr and
// set obj's next to what we read from pending_list_addr.
oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
// Need oop_check on pending_list_addr above;
@ -1341,7 +1341,7 @@ void ReferenceProcessor::preclean_discovered_references(
// whose referents are still alive, whose referents are NULL or which
// are not active (have a non-NULL next field). NOTE: When we are
// thus precleaning the ref lists (which happens single-threaded today),
// we do not disable refs discovery to honour the correct semantics of
// we do not disable refs discovery to honor the correct semantics of
// java.lang.Reference. As a result, we need to be careful below
// that ref removal steps interleave safely with ref discovery steps
// (in this thread).

View File

@ -474,7 +474,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
bool processing_is_mt() const { return _processing_is_mt; }
void set_mt_processing(bool mt) { _processing_is_mt = mt; }
// whether all enqueuing of weak references is complete
// whether all enqueueing of weak references is complete
bool enqueuing_is_done() { return _enqueuing_is_done; }
void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }

View File

@ -196,7 +196,7 @@ protected:
// leveraging existing data structures if we simply create a way to manage this one
// special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
// then existing ResourceMarks would work fine since no one use new to allocate them
// and they would be stack allocated. This leaves open the possibilty of accidental
// and they would be stack allocated. This leaves open the possibility of accidental
// misuse so we simple duplicate the ResourceMark functionality here.
class DeoptResourceMark: public CHeapObj<mtInternal> {

View File

@ -92,7 +92,7 @@ class KlassClosure;
// 0 is a "special" value in set_n_threads() which translates to
// setting _n_threads to 1.
//
// Some code uses _n_terminiation to decide if work should be done in
// Some code uses _n_termination to decide if work should be done in
// parallel. The notorious possibly_parallel_oops_do() in threads.cpp
// is an example of such code. Look for variable "is_par" for other
// examples.

View File

@ -112,7 +112,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
// cards are processed. For instance, CMS must remember mutator updates
// (i.e. dirty cards) so as to re-scan mutated objects.
// Such work can be piggy-backed here on dirty card scanning, so as to make
// it slightly more efficient than doing a complete non-detructive pre-scan
// it slightly more efficient than doing a complete non-destructive pre-scan
// of the card table.
MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
if (pCl != NULL) {
@ -324,8 +324,8 @@ void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
}
void OffsetTableContigSpace::set_end(HeapWord* new_end) {
// Space should not advertize an increase in size
// until after the underlying offest table has been enlarged.
// Space should not advertise an increase in size
// until after the underlying offset table has been enlarged.
_offsets.resize(pointer_delta(new_end, bottom()));
Space::set_end(new_end);
}
@ -729,7 +729,7 @@ void ContiguousSpace::object_iterate(ObjectClosure* blk) {
object_iterate_from(bm, blk);
}
// For a continguous space object_iterate() and safe_object_iterate()
// For a ContiguousSpace object_iterate() and safe_object_iterate()
// are the same.
void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
object_iterate(blk);

View File

@ -56,7 +56,7 @@
// Here's the Space hierarchy:
//
// - Space -- an asbtract base class describing a heap area
// - Space -- an abstract base class describing a heap area
// - CompactibleSpace -- a space supporting compaction
// - CompactibleFreeListSpace -- (used for CMS generation)
// - ContiguousSpace -- a compactible space in which all free space
@ -159,7 +159,7 @@ class Space: public CHeapObj<mtGC> {
// (that is, if the space is contiguous), then this region must contain only
// such objects: the memregion will be from the bottom of the region to the
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
// the space must distiguish between objects in the region allocated before
// the space must distinguish between objects in the region allocated before
// and after the call to save marks.
virtual MemRegion used_region_at_save_marks() const {
return MemRegion(bottom(), saved_mark_word());
@ -190,7 +190,7 @@ class Space: public CHeapObj<mtGC> {
// Returns true iff the given the space contains the
// given address as part of an allocated object. For
// ceratin kinds of spaces, this might be a potentially
// certain kinds of spaces, this might be a potentially
// expensive operation. To prevent performance problems
// on account of its inadvertent use in product jvm's,
// we restrict its use to assertion checks only.
@ -244,13 +244,13 @@ class Space: public CHeapObj<mtGC> {
// Return an address indicating the extent of the iteration in the
// event that the iteration had to return because of finding an
// uninitialized object in the space, or if the closure "cl"
// signalled early termination.
// signaled early termination.
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
virtual HeapWord* object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl);
// Create and return a new dirty card to oop closure. Can be
// overriden to return the appropriate type of closure
// overridden to return the appropriate type of closure
// depending on the type of space in which the closure will
// operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
@ -474,13 +474,13 @@ public:
// be one, since compaction must succeed -- we go to the first space of
// the previous generation if necessary, updating "cp"), reset compact_top
// and then forward. In either case, returns the new value of "compact_top".
// If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
// If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
// function of the then-current compaction space, and updates "cp->threshold
// accordingly".
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
HeapWord* compact_top);
// Return a size with adjusments as required of the space.
// Return a size with adjustments as required of the space.
virtual size_t adjust_object_size_v(size_t size) const { return size; }
protected:
@ -500,7 +500,7 @@ protected:
// Requires "allowed_deadspace_words > 0", that "q" is the start of a
// free block of the given "word_len", and that "q", were it an object,
// would not move if forwared. If the size allows, fill the free
// would not move if forwarded. If the size allows, fill the free
// block with an object, to prevent excessive compaction. Returns "true"
// iff the free region was made deadspace, and modifies
// "allowed_deadspace_words" to reflect the number of available deadspace

View File

@ -135,7 +135,7 @@ bool TenuredGeneration::should_collect(bool full,
free());
}
}
// If we had to expand to accomodate promotions from younger generations
// If we had to expand to accommodate promotions from younger generations
if (!result && _capacity_at_prologue < capacity()) {
result = true;
if (PrintGC && Verbose) {

View File

@ -1136,7 +1136,7 @@ bool universe_post_init() {
SystemDictionary::ProtectionDomain_klass(), m);;
}
// The folowing is initializing converter functions for serialization in
// The following is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
// a better solution for this as well.
initialize_converter_functions();
@ -1178,7 +1178,7 @@ void Universe::flush_dependents_on(instanceKlassHandle dependee) {
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped dring the safepoint so CodeCache will be safe to update without
// stopped during the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
KlassDepChange changes(dependee);
@ -1199,7 +1199,7 @@ void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped dring the safepoint so CodeCache will be safe to update without
// stopped during the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
CallSiteDepChange changes(call_site(), method_handle());
@ -1230,7 +1230,7 @@ void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped dring the safepoint so CodeCache will be safe to update without
// stopped during the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
// Compute the dependent nmethods

View File

@ -38,13 +38,11 @@
#include "utilities/accessFlags.hpp"
#include "utilities/growableArray.hpp"
// A Method* represents a Java method.
// A Method represents a Java method.
//
// Memory layout (each line represents a word). Note that most applications load thousands of methods,
// so keeping the size of this structure small has a big impact on footprint.
//
// We put all oops and method_size first for better gc cache locality.
//
// The actual bytecodes are inlined after the end of the Method struct.
//
// There are bits in the access_flags telling whether inlined tables are present.
@ -64,17 +62,17 @@
// | header |
// | klass |
// |------------------------------------------------------|
// | ConstMethod* (oop) |
// | ConstMethod* (metadata) |
// |------------------------------------------------------|
// | methodData (oop) |
// | methodCounters |
// | MethodData* (metadata) |
// | MethodCounters |
// |------------------------------------------------------|
// | access_flags |
// | vtable_index |
// |------------------------------------------------------|
// | result_index (C++ interpreter only) |
// |------------------------------------------------------|
// | method_size | intrinsic_id| flags |
// | method_size | intrinsic_id | flags |
// |------------------------------------------------------|
// | code (pointer) |
// | i2i (pointer) |

Some files were not shown because too many files have changed in this diff Show More