From 4a02eb3afe5945166d0d2df1b827d82af4d8d4e8 Mon Sep 17 00:00:00 2001 From: Jiangli Zhou Date: Thu, 29 Mar 2012 22:18:56 -0400 Subject: [PATCH 01/15] 7154670: The instanceKlass _implementors[] and _nof_implementors are not needed for non-interface klass Change implementor to embedded instanceKlass field. Reviewed-by: sspitsyn, minqi, coleenp --- .../jvm/hotspot/jdi/VirtualMachineImpl.java | 14 +--- .../sun/jvm/hotspot/oops/InstanceKlass.java | 18 +---- hotspot/src/share/vm/c1/c1_GraphBuilder.cpp | 4 +- hotspot/src/share/vm/ci/ciInstanceKlass.cpp | 45 +++-------- hotspot/src/share/vm/ci/ciInstanceKlass.hpp | 26 +++--- .../share/vm/classfile/classFileParser.cpp | 4 +- hotspot/src/share/vm/code/dependencies.cpp | 25 +++--- hotspot/src/share/vm/memory/oopFactory.cpp | 7 +- hotspot/src/share/vm/memory/oopFactory.hpp | 3 +- hotspot/src/share/vm/oops/instanceKlass.cpp | 62 ++++++++------- hotspot/src/share/vm/oops/instanceKlass.hpp | 79 +++++++++++++------ .../src/share/vm/oops/instanceKlassKlass.cpp | 55 +++++++------ .../src/share/vm/oops/instanceKlassKlass.hpp | 3 +- hotspot/src/share/vm/runtime/vmStructs.cpp | 3 - 14 files changed, 183 insertions(+), 165 deletions(-) diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java index 4cbc1447ea9..a7d7d4ee768 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -889,15 +889,9 @@ public class VirtualMachineImpl extends MirrorImpl implements PathSearchingVirtu Klass kls = ((ReferenceTypeImpl)type).ref(); if (kls instanceof InstanceKlass) { InstanceKlass ik = (InstanceKlass) kls; - if (ik.isInterface()) { - if (ik.nofImplementors() == 0L) { - return new ArrayList(0); - } - } else { - // if the Klass is final or if there are no subklasses loaded yet - if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) { - includeSubtypes = false; - } + // if the Klass is final or if there are no subklasses loaded yet + if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) { + includeSubtypes = false; } } else { // no subtypes for primitive array types diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java index d75d890485c..f342eb72882 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,6 @@ public class InstanceKlass extends Klass { private static int HIGH_OFFSET; private static int GENERIC_SIGNATURE_INDEX_OFFSET; private static int FIELD_SLOTS; - public static int IMPLEMENTORS_LIMIT; // ClassState constants private static int CLASS_STATE_UNPARSABLE_BY_GC; @@ -70,13 +69,6 @@ public class InstanceKlass extends Klass { methodOrdering = new OopField(type.getOopField("_method_ordering"), Oop.getHeaderSize()); localInterfaces = new OopField(type.getOopField("_local_interfaces"), Oop.getHeaderSize()); transitiveInterfaces = new OopField(type.getOopField("_transitive_interfaces"), Oop.getHeaderSize()); - nofImplementors = new CIntField(type.getCIntegerField("_nof_implementors"), Oop.getHeaderSize()); - IMPLEMENTORS_LIMIT = db.lookupIntConstant("instanceKlass::implementors_limit").intValue(); - implementors = new OopField[IMPLEMENTORS_LIMIT]; - for (int i = 0; i < IMPLEMENTORS_LIMIT; i++) { - long arrayOffset = Oop.getHeaderSize() + (i * db.getAddressSize()); - implementors[i] = new OopField(type.getOopField("_implementors[0]"), arrayOffset); - } fields = new OopField(type.getOopField("_fields"), Oop.getHeaderSize()); javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), Oop.getHeaderSize()); constants = new OopField(type.getOopField("_constants"), Oop.getHeaderSize()); @@ -136,8 +128,6 @@ public class InstanceKlass extends Klass { private static OopField methodOrdering; private static OopField localInterfaces; private static OopField transitiveInterfaces; - private static CIntField nofImplementors; - private static OopField[] implementors; private static OopField fields; private static CIntField javaFieldsCount; private static OopField constants; @@ -317,9 +307,6 @@ public class InstanceKlass extends Klass { public TypeArray getMethodOrdering() { return (TypeArray) methodOrdering.getValue(this); } public ObjArray getLocalInterfaces() { return (ObjArray) localInterfaces.getValue(this); } public ObjArray getTransitiveInterfaces() { return (ObjArray) transitiveInterfaces.getValue(this); } - public long nofImplementors() { return nofImplementors.getValue(this); } - public Klass getImplementor() { return (Klass) implementors[0].getValue(this); } - public Klass getImplementor(int i) { return (Klass) implementors[i].getValue(this); } public TypeArray getFields() { return (TypeArray) fields.getValue(this); } public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); } public int getAllFieldsCount() { return (int)getFields().getLength() / FIELD_SLOTS; } @@ -527,9 +514,6 @@ public class InstanceKlass extends Klass { visitor.doOop(methodOrdering, true); visitor.doOop(localInterfaces, true); visitor.doOop(transitiveInterfaces, true); - visitor.doCInt(nofImplementors, true); - for (int i = 0; i < IMPLEMENTORS_LIMIT; i++) - visitor.doOop(implementors[i], true); visitor.doOop(fields, true); visitor.doOop(constants, true); visitor.doOop(classLoader, true); diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp index b4659d97261..34fdb591c2e 100644 --- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp @@ -1694,7 +1694,9 @@ void GraphBuilder::invoke(Bytecodes::Code code) { // they are roughly equivalent to Object. ciInstanceKlass* singleton = NULL; if (target->holder()->nof_implementors() == 1) { - singleton = target->holder()->implementor(0); + singleton = target->holder()->implementor(); + assert(singleton != NULL && singleton != target->holder(), + "just checking"); assert(holder->is_interface(), "invokeinterface to non interface?"); ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder; diff --git a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp index b7c2ab75850..be730a00ce9 100644 --- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,10 +59,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) : _has_nonstatic_fields = ik->has_nonstatic_fields(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: - _nof_implementors = ik->nof_implementors(); - for (int i = 0; i < implementors_limit; i++) { - _implementors[i] = NULL; // we will fill these lazily - } + _implementor = NULL; // we will fill these lazily Thread *thread = Thread::current(); if (ciObjectFactory::is_initialized()) { @@ -102,7 +99,6 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name, _nonstatic_field_size = -1; _has_nonstatic_fields = false; _nonstatic_fields = NULL; - _nof_implementors = -1; _loader = loader; _protection_domain = protection_domain; _is_shared = false; @@ -132,17 +128,6 @@ bool ciInstanceKlass::compute_shared_has_subklass() { ) } -// ------------------------------------------------------------------ -// ciInstanceKlass::compute_shared_nof_implementors -int ciInstanceKlass::compute_shared_nof_implementors() { - // We requery this property, since it is a very old ciObject. - GUARDED_VM_ENTRY( - instanceKlass* ik = get_instanceKlass(); - _nof_implementors = ik->nof_implementors(); - return _nof_implementors; - ) -} - // ------------------------------------------------------------------ // ciInstanceKlass::loader oop ciInstanceKlass::loader() { @@ -540,7 +525,7 @@ bool ciInstanceKlass::is_leaf_type() { if (is_shared()) { return is_final(); // approximately correct } else { - return !_has_subklass && (_nof_implementors == 0); + return !_has_subklass && (nof_implementors() == 0); } } @@ -548,35 +533,31 @@ bool ciInstanceKlass::is_leaf_type() { // ciInstanceKlass::implementor // // Report an implementor of this interface. -// Returns NULL if exact information is not available. // Note that there are various races here, since my copy // of _nof_implementors might be out of date with respect // to results returned by instanceKlass::implementor. // This is OK, since any dependencies we decide to assert // will be checked later under the Compile_lock. -ciInstanceKlass* ciInstanceKlass::implementor(int n) { - if (n >= implementors_limit) { - return NULL; - } - ciInstanceKlass* impl = _implementors[n]; +ciInstanceKlass* ciInstanceKlass::implementor() { + ciInstanceKlass* impl = _implementor; if (impl == NULL) { - if (_nof_implementors > implementors_limit) { - return NULL; - } // Go into the VM to fetch the implementor. { VM_ENTRY_MARK; - klassOop k = get_instanceKlass()->implementor(n); + klassOop k = get_instanceKlass()->implementor(); if (k != NULL) { - impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass(); + if (k == get_instanceKlass()->as_klassOop()) { + // More than one implementors. Use 'this' in this case. + impl = this; + } else { + impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass(); + } } } // Memoize this result. if (!is_shared()) { - _implementors[n] = (impl == NULL)? this: impl; + _implementor = impl; } - } else if (impl == this) { - impl = NULL; // memoized null result from a VM query } return impl; } diff --git a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp index 05ecf87a1e2..f8d0a7bd9eb 100644 --- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,9 +65,11 @@ private: ciConstantPoolCache* _field_cache; // cached map index->field GrowableArray* _nonstatic_fields; - enum { implementors_limit = instanceKlass::implementors_limit }; - ciInstanceKlass* _implementors[implementors_limit]; - jint _nof_implementors; + // The possible values of the _implementor fall into following three cases: + // NULL: no implementor. + // A ciInstanceKlass that's not itself: one implementor. + // Itsef: more than one implementors. + ciInstanceKlass* _implementor; GrowableArray* _non_static_fields; @@ -97,7 +99,6 @@ protected: void compute_shared_init_state(); bool compute_shared_has_subklass(); - int compute_shared_nof_implementors(); int compute_nonstatic_fields(); GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields); @@ -158,10 +159,17 @@ public: assert(is_loaded(), "must be loaded"); return _nonstatic_oop_map_size; } ciInstanceKlass* super(); - jint nof_implementors() { + jint nof_implementors() { + ciInstanceKlass* impl; assert(is_loaded(), "must be loaded"); - if (_is_shared) return compute_shared_nof_implementors(); - return _nof_implementors; + impl = implementor(); + if (impl == NULL) { + return 0; + } else if (impl != this) { + return 1; + } else { + return 2; + } } ciInstanceKlass* get_canonical_holder(int offset); @@ -207,7 +215,7 @@ public: // but consider adding to vmSymbols.hpp instead. bool is_leaf_type(); - ciInstanceKlass* implementor(int n); + ciInstanceKlass* implementor(); // Is the defining class loader of this class the default loader? bool uses_default_loader(); diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp index 7ace834a086..218f2c986bb 100644 --- a/hotspot/src/share/vm/classfile/classFileParser.cpp +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3354,6 +3354,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size, static_field_size, total_oop_map_count, + access_flags, rt, CHECK_(nullHandle)); instanceKlassHandle this_klass (THREAD, ik); @@ -3362,7 +3363,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, "sanity"); // Fill in information already parsed - this_klass->set_access_flags(access_flags); this_klass->set_should_verify_class(verify); jint lh = Klass::instance_layout_helper(instance_size, false); this_klass->set_layout_helper(lh); diff --git a/hotspot/src/share/vm/code/dependencies.cpp b/hotspot/src/share/vm/code/dependencies.cpp index a3fd99ecd45..3d6705680bf 100644 --- a/hotspot/src/share/vm/code/dependencies.cpp +++ b/hotspot/src/share/vm/code/dependencies.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1033,21 +1033,25 @@ klassOop ClassHierarchyWalker::find_witness_anywhere(klassOop context_type, // (Old CHA had the same limitation.) return context_type; } - for (int i = 0; i < nof_impls; i++) { - klassOop impl = instanceKlass::cast(context_type)->implementor(i); - if (impl == NULL) { - // implementors array overflowed => no exact info. + if (nof_impls > 0) { + klassOop impl = instanceKlass::cast(context_type)->implementor(); + assert(impl != NULL, "just checking"); + // If impl is the same as the context_type, then more than one + // implementor has seen. No exact info in this case. + if (impl == context_type) { return context_type; // report an inexact witness to this sad affair } if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); } if (is_participant(impl)) { - if (participants_hide_witnesses) continue; - // else fall through to process this guy's subclasses + if (!participants_hide_witnesses) { + ADD_SUBCLASS_CHAIN(impl); + } } else if (is_witness(impl) && !ignore_witness(impl)) { return impl; + } else { + ADD_SUBCLASS_CHAIN(impl); } - ADD_SUBCLASS_CHAIN(impl); } // Recursively process each non-trivial sibling chain. @@ -1174,8 +1178,9 @@ klassOop Dependencies::check_leaf_type(klassOop ctxk) { } else if (ctx->nof_implementors() != 0) { // if it is an interface, it must be unimplemented // (if it is not an interface, nof_implementors is always zero) - klassOop impl = ctx->implementor(0); - return (impl != NULL)? impl: ctxk; + klassOop impl = ctx->implementor(); + assert(impl != NULL, "must be set"); + return impl; } else { return NULL; } diff --git a/hotspot/src/share/vm/memory/oopFactory.cpp b/hotspot/src/share/vm/memory/oopFactory.cpp index c6e644c96d4..fcac438bbe3 100644 --- a/hotspot/src/share/vm/memory/oopFactory.cpp +++ b/hotspot/src/share/vm/memory/oopFactory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,9 +127,12 @@ constantPoolCacheOop oopFactory::new_constantPoolCache(int length, klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, ReferenceType rt, TRAPS) { instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj()); - return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL); + return ikk->allocate_instance_klass(name, vtable_len, itable_len, + static_field_size, nonstatic_oop_map_count, + access_flags, rt, CHECK_NULL); } diff --git a/hotspot/src/share/vm/memory/oopFactory.hpp b/hotspot/src/share/vm/memory/oopFactory.hpp index dbb42f05332..61faf8e3db7 100644 --- a/hotspot/src/share/vm/memory/oopFactory.hpp +++ b/hotspot/src/share/vm/memory/oopFactory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ class oopFactory: AllStatic { int vtable_len, int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, ReferenceType rt, TRAPS); // Methods diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp index 81edd80946f..aa5f7765a79 100644 --- a/hotspot/src/share/vm/oops/instanceKlass.cpp +++ b/hotspot/src/share/vm/oops/instanceKlass.cpp @@ -567,8 +567,18 @@ void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle ol.notify_all(CHECK); } +// The embedded _implementor field can only record one implementor. +// When there are more than one implementors, the _implementor field +// is set to the interface klassOop itself. Following are the possible +// values for the _implementor field: +// NULL - no implementor +// implementor klassOop - one implementor +// self - more than one implementor +// +// The _implementor field only exists for interfaces. void instanceKlass::add_implementor(klassOop k) { assert(Compile_lock->owned_by_self(), ""); + assert(is_interface(), "not interface"); // Filter out my subinterfaces. // (Note: Interfaces are never on the subklass list.) if (instanceKlass::cast(k)->is_interface()) return; @@ -583,17 +593,13 @@ void instanceKlass::add_implementor(klassOop k) { // Any supers of the super have the same (or fewer) transitive_interfaces. return; - // Update number of implementors - int i = _nof_implementors++; - - // Record this implementor, if there are not too many already - if (i < implementors_limit) { - assert(_implementors[i] == NULL, "should be exactly one implementor"); - oop_store_without_check((oop*)&_implementors[i], k); - } else if (i == implementors_limit) { - // clear out the list on first overflow - for (int i2 = 0; i2 < implementors_limit; i2++) - oop_store_without_check((oop*)&_implementors[i2], NULL); + klassOop ik = implementor(); + if (ik == NULL) { + set_implementor(k); + } else if (ik != this->as_klassOop()) { + // There is already an implementor. Use itself as an indicator of + // more than one implementors. + set_implementor(this->as_klassOop()); } // The implementor also implements the transitive_interfaces @@ -603,9 +609,9 @@ void instanceKlass::add_implementor(klassOop k) { } void instanceKlass::init_implementor() { - for (int i = 0; i < implementors_limit; i++) - oop_store_without_check((oop*)&_implementors[i], NULL); - _nof_implementors = 0; + if (is_interface()) { + set_implementor(NULL); + } } @@ -1849,24 +1855,22 @@ int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { void instanceKlass::follow_weak_klass_links( BoolObjectClosure* is_alive, OopClosure* keep_alive) { assert(is_alive->do_object_b(as_klassOop()), "this oop should be live"); - if (ClassUnloading) { - for (int i = 0; i < implementors_limit; i++) { - klassOop impl = _implementors[i]; - if (impl == NULL) break; // no more in the list - if (!is_alive->do_object_b(impl)) { - // remove this guy from the list by overwriting him with the tail - int lasti = --_nof_implementors; - assert(lasti >= i && lasti < implementors_limit, "just checking"); - _implementors[i] = _implementors[lasti]; - _implementors[lasti] = NULL; - --i; // rerun the loop at this index + + if (is_interface()) { + if (ClassUnloading) { + klassOop impl = implementor(); + if (impl != NULL) { + if (!is_alive->do_object_b(impl)) { + // remove this guy + *start_of_implementor() = NULL; + } } - } - } else { - for (int i = 0; i < implementors_limit; i++) { - keep_alive->do_oop(&adr_implementors()[i]); + } else { + assert(adr_implementor() != NULL, "just checking"); + keep_alive->do_oop(adr_implementor()); } } + Klass::follow_weak_klass_links(is_alive, keep_alive); } diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp index 9cbfa0fbfb6..32a00f5c6b0 100644 --- a/hotspot/src/share/vm/oops/instanceKlass.hpp +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,8 +56,6 @@ // [methods ] // [local interfaces ] // [transitive interfaces ] -// [number of implementors ] -// [implementors ] klassOop[2] // [fields ] // [constants ] // [class loader ] @@ -77,9 +75,9 @@ // [oop map cache (stack maps) ] // [EMBEDDED Java vtable ] size in words = vtable_len // [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size -// -// The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating -// where oops are located in instances of this klass. +// The embedded nonstatic oop-map blocks are short pairs (offset, length) +// indicating where oops are located in instances of this klass. +// [EMBEDDED implementor of the interface] only exist for interface // forward declaration for class -- see below for definition @@ -153,10 +151,6 @@ class instanceKlass: public Klass { oop* oop_block_beg() const { return adr_array_klasses(); } oop* oop_block_end() const { return adr_methods_default_annotations() + 1; } - enum { - implementors_limit = 2 // how many implems can we track? - }; - protected: // // The oop block. See comment in klass.hpp before making changes. @@ -200,8 +194,6 @@ class instanceKlass: public Klass { // and EnclosingMethod attributes the _inner_classes array length is // number_of_inner_classes * 4 + enclosing_method_attribute_size. typeArrayOop _inner_classes; - // Implementors of this interface (not valid if it overflows) - klassOop _implementors[implementors_limit]; // Annotations for this class, or null if none. typeArrayOop _class_annotations; // Annotation objects (byte arrays) for fields, or null if no annotations. @@ -257,7 +249,6 @@ class instanceKlass: public Klass { nmethodBucket* _dependencies; // list of dependent nmethods nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class BreakpointInfo* _breakpoints; // bpt lists, managed by methodOop - int _nof_implementors; // No of implementors of this interface (zero if not an interface) // Array of interesting part(s) of the previous version(s) of this // instanceKlass. See PreviousVersionWalker below. GrowableArray* _previous_versions; @@ -278,6 +269,13 @@ class instanceKlass: public Klass { // embedded Java itables follows here // embedded static fields follows here // embedded nonstatic oop-map blocks follows here + // embedded implementor of this interface follows here + // The embedded implementor only exists if the current klass is an + // iterface. The possible values of the implementor fall into following + // three cases: + // NULL: no implementor. + // A klassOop that's not itself: one implementor. + // Itsef: more than one implementors. friend class instanceKlassKlass; friend class SystemDictionary; @@ -649,14 +647,34 @@ class instanceKlass: public Klass { // subclass/subinterface checks bool implements_interface(klassOop k) const; - // Access to implementors of an interface. We only store the count - // of implementors, and in case, there are only a few - // implementors, we store them in a short list. - // This accessor returns NULL if we walk off the end of the list. - klassOop implementor(int i) const { - return (i < implementors_limit)? _implementors[i]: (klassOop) NULL; + // Access to the implementor of an interface. + klassOop implementor() const + { + klassOop* k = start_of_implementor(); + if (k == NULL) { + return NULL; + } else { + return *k; + } } - int nof_implementors() const { return _nof_implementors; } + + void set_implementor(klassOop k) { + assert(is_interface(), "not interface"); + oop* addr = (oop*)start_of_implementor(); + oop_store_without_check(addr, k); + } + + int nof_implementors() const { + klassOop k = implementor(); + if (k == NULL) { + return 0; + } else if (k != this->as_klassOop()) { + return 1; + } else { + return 2; + } + } + void add_implementor(klassOop k); // k is a new class that implements this interface void init_implementor(); // initialize @@ -693,7 +711,15 @@ class instanceKlass: public Klass { // Sizing (in words) static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); } - int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); } + + int object_size() const + { + return object_size(align_object_offset(vtable_length()) + + align_object_offset(itable_length()) + + (is_interface() ? + (align_object_offset(nonstatic_oop_map_size()) + (int)sizeof(klassOop)/HeapWordSize) : + nonstatic_oop_map_size())); + } static int vtable_start_offset() { return header_size(); } static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; } static int object_size(int extra) { return align_object_size(header_size() + extra); } @@ -710,6 +736,15 @@ class instanceKlass: public Klass { return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length())); } + klassOop* start_of_implementor() const { + if (is_interface()) { + return (klassOop*)(start_of_nonstatic_oop_maps() + + nonstatic_oop_map_count()); + } else { + return NULL; + } + }; + // Allocation profiling support juint alloc_size() const { return _alloc_count * size_helper(); } void set_alloc_size(juint n) {} @@ -819,7 +854,7 @@ private: oop* adr_host_klass() const { return (oop*)&this->_host_klass;} oop* adr_signers() const { return (oop*)&this->_signers;} oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;} - oop* adr_implementors() const { return (oop*)&this->_implementors[0];} + oop* adr_implementor() const { return (oop*)start_of_implementor(); } oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;} oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;} oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;} diff --git a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp index 2ceef2a9107..808b6e56999 100644 --- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp +++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ void instanceKlassKlass::oop_follow_contents(oop obj) { MarkSweep::mark_and_push(ik->adr_methods_parameter_annotations()); MarkSweep::mark_and_push(ik->adr_methods_default_annotations()); - // We do not follow adr_implementors() here. It is followed later + // We do not follow adr_implementor() here. It is followed later // in instanceKlass::follow_weak_klass_links() klassKlass::oop_follow_contents(obj); @@ -180,8 +180,8 @@ int instanceKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) { blk->do_oop(ik->adr_host_klass()); blk->do_oop(ik->adr_signers()); blk->do_oop(ik->adr_inner_classes()); - for (int i = 0; i < instanceKlass::implementors_limit; i++) { - blk->do_oop(&ik->adr_implementors()[i]); + if (ik->is_interface()) { + blk->do_oop(ik->adr_implementor()); } blk->do_oop(ik->adr_class_annotations()); blk->do_oop(ik->adr_fields_annotations()); @@ -232,9 +232,9 @@ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_inner_classes(); if (mr.contains(adr)) blk->do_oop(adr); - adr = ik->adr_implementors(); - for (int i = 0; i < instanceKlass::implementors_limit; i++) { - if (mr.contains(&adr[i])) blk->do_oop(&adr[i]); + if (ik->is_interface()) { + adr = ik->adr_implementor(); + if (mr.contains(adr)) blk->do_oop(adr); } adr = ik->adr_class_annotations(); if (mr.contains(adr)) blk->do_oop(adr); @@ -273,8 +273,8 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) { MarkSweep::adjust_pointer(ik->adr_host_klass()); MarkSweep::adjust_pointer(ik->adr_signers()); MarkSweep::adjust_pointer(ik->adr_inner_classes()); - for (int i = 0; i < instanceKlass::implementors_limit; i++) { - MarkSweep::adjust_pointer(&ik->adr_implementors()[i]); + if (ik->is_interface()) { + MarkSweep::adjust_pointer(ik->adr_implementor()); } MarkSweep::adjust_pointer(ik->adr_class_annotations()); MarkSweep::adjust_pointer(ik->adr_fields_annotations()); @@ -328,6 +328,9 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { PSParallelCompact::adjust_pointer(cur_oop); } + if (ik->is_interface()) { + PSParallelCompact::adjust_pointer(ik->adr_implementor()); + } OopClosure* closure = PSParallelCompact::adjust_root_pointer_closure(); iterate_c_heap_oops(ik, closure); @@ -342,11 +345,18 @@ klassOop instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len, int static_field_size, unsigned nonstatic_oop_map_count, + AccessFlags access_flags, ReferenceType rt, TRAPS) { const int nonstatic_oop_map_size = instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count); - int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size); + int size = align_object_offset(vtable_len) + align_object_offset(itable_len); + if (access_flags.is_interface()) { + size += align_object_offset(nonstatic_oop_map_size) + (int)sizeof(klassOop)/HeapWordSize; + } else { + size += nonstatic_oop_map_size; + } + size = instanceKlass::object_size(size); // Allocation KlassHandle h_this_klass(THREAD, as_klassOop()); @@ -378,6 +388,7 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it ik->set_itable_length(itable_len); ik->set_static_field_size(static_field_size); ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size); + ik->set_access_flags(access_flags); assert(k()->size() == size, "wrong size for object"); ik->set_array_klasses(NULL); @@ -470,16 +481,12 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { if (ik->is_interface()) { st->print_cr(BULLET"nof implementors: %d", ik->nof_implementors()); - int print_impl = 0; - for (int i = 0; i < instanceKlass::implementors_limit; i++) { - if (ik->implementor(i) != NULL) { - if (++print_impl == 1) - st->print_cr(BULLET"implementor: "); - st->print(" "); - ik->implementor(i)->print_value_on(st); - } + if (ik->nof_implementors() == 1) { + st->print_cr(BULLET"implementor: "); + st->print(" "); + ik->implementor()->print_value_on(st); + st->cr(); } - if (print_impl > 0) st->cr(); } st->print(BULLET"arrays: "); ik->array_klasses()->print_value_on(st); st->cr(); @@ -640,16 +647,12 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) { } // Verify implementor fields - bool saw_null_impl = false; - for (int i = 0; i < instanceKlass::implementors_limit; i++) { - klassOop im = ik->implementor(i); - if (im == NULL) { saw_null_impl = true; continue; } - guarantee(!saw_null_impl, "non-nulls must preceded all nulls"); + klassOop im = ik->implementor(); + if (im != NULL) { guarantee(ik->is_interface(), "only interfaces should have implementor set"); - guarantee(i < ik->nof_implementors(), "should only have one implementor"); guarantee(im->is_perm(), "should be in permspace"); guarantee(im->is_klass(), "should be klass"); - guarantee(!Klass::cast(klassOop(im))->is_interface(), "implementors cannot be interfaces"); + guarantee(!Klass::cast(klassOop(im))->is_interface() || im == ik->as_klassOop(), "implementors cannot be interfaces"); } // Verify local interfaces diff --git a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp index 9cbabe24644..ced9451da58 100644 --- a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp +++ b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ class instanceKlassKlass : public klassKlass { int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, ReferenceType rt, TRAPS); diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp index 924cde15404..25d3b5b4179 100644 --- a/hotspot/src/share/vm/runtime/vmStructs.cpp +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp @@ -292,8 +292,6 @@ static inline uint64_t cast_uint64_t(size_t x) nonstatic_field(instanceKlass, _method_ordering, typeArrayOop) \ nonstatic_field(instanceKlass, _local_interfaces, objArrayOop) \ nonstatic_field(instanceKlass, _transitive_interfaces, objArrayOop) \ - nonstatic_field(instanceKlass, _nof_implementors, int) \ - nonstatic_field(instanceKlass, _implementors[0], klassOop) \ nonstatic_field(instanceKlass, _fields, typeArrayOop) \ nonstatic_field(instanceKlass, _java_fields_count, u2) \ nonstatic_field(instanceKlass, _constants, constantPoolOop) \ @@ -2343,7 +2341,6 @@ static inline uint64_t cast_uint64_t(size_t x) /* instanceKlass enum */ \ /*************************************/ \ \ - declare_constant(instanceKlass::implementors_limit) \ \ /*************************************/ \ /* FieldInfo FieldOffset enum */ \ From beaeca558b2c0c6a74b668e0356dbc3c4a3dc5dd Mon Sep 17 00:00:00 2001 From: Axel Siebenborn Date: Thu, 5 Apr 2012 12:17:52 -0400 Subject: [PATCH 02/15] 7158988: jvm crashes while debugging on x86_32 and x86_64 Object pointer is pushed more than once on stack, where GC doesn't expect it. Reviewed-by: coleenp, kvn --- .../src/cpu/x86/vm/templateTable_x86_32.cpp | 87 +++--- .../src/cpu/x86/vm/templateTable_x86_64.cpp | 40 +-- .../test/runtime/7158988/FieldMonitor.java | 249 ++++++++++++++++++ .../test/runtime/7158988/TestFieldMonitor.sh | 94 +++++++ .../7158988/TestPostFieldModification.java | 249 ++++++++++++++++++ 5 files changed, 655 insertions(+), 64 deletions(-) create mode 100644 hotspot/test/runtime/7158988/FieldMonitor.java create mode 100644 hotspot/test/runtime/7158988/TestFieldMonitor.sh create mode 100644 hotspot/test/runtime/7158988/TestPostFieldModification.java diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp index 1cbc67e6060..6a5fb90c050 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp @@ -2651,56 +2651,49 @@ void TemplateTable::jvmti_post_fast_field_mod() { // Check to see if a field modification watch has been set before we take // the time to call into the VM. Label L2; - __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); - __ testl(rcx,rcx); - __ jcc(Assembler::zero, L2); - __ pop_ptr(rbx); // copy the object pointer from tos - __ verify_oop(rbx); - __ push_ptr(rbx); // put the object pointer back on tos - __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object - __ mov(rcx, rsp); - __ push_ptr(rbx); // save object pointer so we can steal rbx, - __ xorptr(rbx, rbx); - const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize); - const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize); - switch (bytecode()) { // load values into the jvalue object - case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break; - case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break; - case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break; - case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break; - case Bytecodes::_fast_lputfield: - NOT_LP64(__ movptr(hi_value, rdx)); - __ movptr(lo_value, rax); - break; + __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); + __ testl(rcx,rcx); + __ jcc(Assembler::zero, L2); + __ pop_ptr(rbx); // copy the object pointer from tos + __ verify_oop(rbx); + __ push_ptr(rbx); // put the object pointer back on tos - // need to call fld_s() after fstp_s() to restore the value for below - case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break; + // Save tos values before call_VM() clobbers them. Since we have + // to do it for every data type, we use the saved values as the + // jvalue object. + switch (bytecode()) { // load values into the jvalue object + case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through + case Bytecodes::_fast_sputfield: // fall through + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ push_i(rax); break; + case Bytecodes::_fast_dputfield: __ push_d(); break; + case Bytecodes::_fast_fputfield: __ push_f(); break; + case Bytecodes::_fast_lputfield: __ push_l(rax); break; - // need to call fld_d() after fstp_d() to restore the value for below - case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break; + default: + ShouldNotReachHere(); + } + __ mov(rcx, rsp); // points to jvalue on the stack + // access constant pool cache entry + __ get_cache_entry_pointer_at_bcp(rax, rdx, 1); + __ verify_oop(rbx); + // rbx,: object pointer copied above + // rax,: cache entry pointer + // rcx: jvalue object on the stack + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx); - // since rcx is not an object we don't call store_check() here - case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break; - - default: ShouldNotReachHere(); - } - __ pop_ptr(rbx); // restore copy of object pointer - - // Save rax, and sometimes rdx because call_VM() will clobber them, - // then use them for JVM/DI purposes - __ push(rax); - if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx); - // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(rax, rdx, 1); - __ verify_oop(rbx); - // rbx,: object pointer copied above - // rax,: cache entry pointer - // rcx: jvalue object on the stack - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx); - if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value - __ pop(rax); // restore lower value - __ addptr(rsp, sizeof(jvalue)); // release jvalue object space - __ bind(L2); + switch (bytecode()) { // restore tos values + case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through + case Bytecodes::_fast_sputfield: // fall through + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ pop_i(rax); break; + case Bytecodes::_fast_dputfield: __ pop_d(); break; + case Bytecodes::_fast_fputfield: __ pop_f(); break; + case Bytecodes::_fast_lputfield: __ pop_l(rax); break; + } + __ bind(L2); } } diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp index 0e5ac274f36..6bb302f2992 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp @@ -2685,26 +2685,23 @@ void TemplateTable::jvmti_post_fast_field_mod() { __ pop_ptr(rbx); // copy the object pointer from tos __ verify_oop(rbx); __ push_ptr(rbx); // put the object pointer back on tos - __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object - __ mov(c_rarg3, rsp); - const Address field(c_rarg3, 0); - + // Save tos values before call_VM() clobbers them. Since we have + // to do it for every data type, we use the saved values as the + // jvalue object. switch (bytecode()) { // load values into the jvalue object - case Bytecodes::_fast_aputfield: __ movq(field, rax); break; - case Bytecodes::_fast_lputfield: __ movq(field, rax); break; - case Bytecodes::_fast_iputfield: __ movl(field, rax); break; - case Bytecodes::_fast_bputfield: __ movb(field, rax); break; + case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: __ movw(field, rax); break; - case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break; - case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break; + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ push_i(rax); break; + case Bytecodes::_fast_dputfield: __ push_d(); break; + case Bytecodes::_fast_fputfield: __ push_f(); break; + case Bytecodes::_fast_lputfield: __ push_l(rax); break; + default: ShouldNotReachHere(); } - - // Save rax because call_VM() will clobber it, then use it for - // JVMTI purposes - __ push(rax); + __ mov(c_rarg3, rsp); // points to jvalue on the stack // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); __ verify_oop(rbx); @@ -2715,8 +2712,17 @@ void TemplateTable::jvmti_post_fast_field_mod() { CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3); - __ pop(rax); // restore lower value - __ addptr(rsp, sizeof(jvalue)); // release jvalue object space + + switch (bytecode()) { // restore tos values + case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through + case Bytecodes::_fast_sputfield: // fall through + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ pop_i(rax); break; + case Bytecodes::_fast_dputfield: __ pop_d(); break; + case Bytecodes::_fast_fputfield: __ pop_f(); break; + case Bytecodes::_fast_lputfield: __ pop_l(rax); break; + } __ bind(L2); } } diff --git a/hotspot/test/runtime/7158988/FieldMonitor.java b/hotspot/test/runtime/7158988/FieldMonitor.java new file mode 100644 index 00000000000..584d39d20ca --- /dev/null +++ b/hotspot/test/runtime/7158988/FieldMonitor.java @@ -0,0 +1,249 @@ +/* + * Copyright 2012 SAP AG. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test FieldMonitor.java + * @bug 7158988 + * @summary verify jvm does not crash while debugging + * @run shell TestFieldMonitor.sh + * @author axel.siebenborn@sap.com + */ +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.Writer; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import com.sun.jdi.Bootstrap; +import com.sun.jdi.Field; +import com.sun.jdi.ReferenceType; +import com.sun.jdi.VirtualMachine; +import com.sun.jdi.connect.Connector; +import com.sun.jdi.connect.IllegalConnectorArgumentsException; +import com.sun.jdi.connect.LaunchingConnector; +import com.sun.jdi.connect.VMStartException; +import com.sun.jdi.event.ClassPrepareEvent; +import com.sun.jdi.event.Event; +import com.sun.jdi.event.EventQueue; +import com.sun.jdi.event.EventSet; +import com.sun.jdi.event.ModificationWatchpointEvent; +import com.sun.jdi.event.VMDeathEvent; +import com.sun.jdi.event.VMDisconnectEvent; +import com.sun.jdi.request.ClassPrepareRequest; +import com.sun.jdi.request.EventRequest; +import com.sun.jdi.request.EventRequestManager; +import com.sun.jdi.request.ModificationWatchpointRequest; + +public class FieldMonitor { + + public static final String CLASS_NAME = "TestPostFieldModification"; + public static final String FIELD_NAME = "value"; + public static final String ARGUMENTS = "-Xshare:off -XX:+PrintGC"; + + public static void main(String[] args) + throws IOException, InterruptedException { + + StringBuffer sb = new StringBuffer(); + + for (int i=0; i < args.length; i++) { + sb.append(' '); + sb.append(args[i]); + } + //VirtualMachine vm = launchTarget(sb.toString()); + VirtualMachine vm = launchTarget(CLASS_NAME); + + System.out.println("Vm launched"); + // set watch field on already loaded classes + List referenceTypes = vm + .classesByName(CLASS_NAME); + for (ReferenceType refType : referenceTypes) { + addFieldWatch(vm, refType); + } + // watch for loaded classes + addClassWatch(vm); + + // process events + EventQueue eventQueue = vm.eventQueue(); + // resume the vm + + Process process = vm.process(); + + + // Copy target's output and error to our output and error. + Thread outThread = new StreamRedirectThread("out reader", process.getInputStream()); + Thread errThread = new StreamRedirectThread("error reader", process.getErrorStream()); + + errThread.start(); + outThread.start(); + + + vm.resume(); + boolean connected = true; + while (connected) { + EventSet eventSet = eventQueue.remove(); + for (Event event : eventSet) { + if (event instanceof VMDeathEvent + || event instanceof VMDisconnectEvent) { + // exit + connected = false; + } else if (event instanceof ClassPrepareEvent) { + // watch field on loaded class + System.out.println("ClassPrepareEvent"); + ClassPrepareEvent classPrepEvent = (ClassPrepareEvent) event; + ReferenceType refType = classPrepEvent + .referenceType(); + addFieldWatch(vm, refType); + } else if (event instanceof ModificationWatchpointEvent) { + System.out.println("sleep for 500 ms"); + Thread.sleep(500); + System.out.println("resume..."); + + ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event; + System.out.println("old=" + + modEvent.valueCurrent()); + System.out.println("new=" + modEvent.valueToBe()); + System.out.println(); + } + } + eventSet.resume(); + } + // Shutdown begins when event thread terminates + try { + errThread.join(); // Make sure output is forwarded + outThread.join(); + } catch (InterruptedException exc) { + // we don't interrupt + } + } + + /** + * Find a com.sun.jdi.CommandLineLaunch connector + */ + static LaunchingConnector findLaunchingConnector() { + List connectors = Bootstrap.virtualMachineManager().allConnectors(); + Iterator iter = connectors.iterator(); + while (iter.hasNext()) { + Connector connector = iter.next(); + if (connector.name().equals("com.sun.jdi.CommandLineLaunch")) { + return (LaunchingConnector)connector; + } + } + throw new Error("No launching connector"); + } + /** + * Return the launching connector's arguments. + */ + static Map connectorArguments(LaunchingConnector connector, String mainArgs) { + Map arguments = connector.defaultArguments(); + for (String key : arguments.keySet()) { + System.out.println(key); + } + + Connector.Argument mainArg = (Connector.Argument)arguments.get("main"); + if (mainArg == null) { + throw new Error("Bad launching connector"); + } + mainArg.setValue(mainArgs); + + Connector.Argument optionsArg = (Connector.Argument)arguments.get("options"); + if (optionsArg == null) { + throw new Error("Bad launching connector"); + } + optionsArg.setValue(ARGUMENTS); + return arguments; + } + + static VirtualMachine launchTarget(String mainArgs) { + LaunchingConnector connector = findLaunchingConnector(); + Map arguments = connectorArguments(connector, mainArgs); + try { + return (VirtualMachine) connector.launch(arguments); + } catch (IOException exc) { + throw new Error("Unable to launch target VM: " + exc); + } catch (IllegalConnectorArgumentsException exc) { + throw new Error("Internal error: " + exc); + } catch (VMStartException exc) { + throw new Error("Target VM failed to initialize: " + + exc.getMessage()); + } +} + + + private static void addClassWatch(VirtualMachine vm) { + EventRequestManager erm = vm.eventRequestManager(); + ClassPrepareRequest classPrepareRequest = erm + .createClassPrepareRequest(); + classPrepareRequest.addClassFilter(CLASS_NAME); + classPrepareRequest.setEnabled(true); + } + + + private static void addFieldWatch(VirtualMachine vm, + ReferenceType refType) { + EventRequestManager erm = vm.eventRequestManager(); + Field field = refType.fieldByName(FIELD_NAME); + ModificationWatchpointRequest modificationWatchpointRequest = erm + .createModificationWatchpointRequest(field); + modificationWatchpointRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD); + modificationWatchpointRequest.setEnabled(true); + } +} + +class StreamRedirectThread extends Thread { + + private final BufferedReader in; + + private static final int BUFFER_SIZE = 2048; + + /** + * Set up for copy. + * @param name Name of the thread + * @param in Stream to copy from + * @param out Stream to copy to + */ + StreamRedirectThread(String name, InputStream in) { + super(name); + this.in = new BufferedReader(new InputStreamReader(in)); + } + + /** + * Copy. + */ + public void run() { + try { + String line; + while ((line = in.readLine ()) != null) { + System.out.println ("testvm: " + line); + } + System.out.flush(); + } catch(IOException exc) { + System.err.println("Child I/O Transfer - " + exc); + } + } +} diff --git a/hotspot/test/runtime/7158988/TestFieldMonitor.sh b/hotspot/test/runtime/7158988/TestFieldMonitor.sh new file mode 100644 index 00000000000..8715bd546d8 --- /dev/null +++ b/hotspot/test/runtime/7158988/TestFieldMonitor.sh @@ -0,0 +1,94 @@ +#!/bin/sh + +if [ "${TESTSRC}" = "" ] +then TESTSRC=. +fi + +if [ "${TESTJAVA}" = "" ] +then + PARENT=`dirname \`which java\`` + TESTJAVA=`dirname ${PARENT}` + echo "TESTJAVA not set, selecting " ${TESTJAVA} + echo "If this is incorrect, try setting the variable manually." +fi + +if [ "${TESTCLASSES}" = "" ] +then + echo "TESTCLASSES not set. Test cannot execute. Failed." + exit 1 +fi + +BIT_FLAG="" + +# set platform-dependent variables +OS=`uname -s` +case "$OS" in + SunOS | Linux ) + NULL=/dev/null + PS=":" + FS="/" + ## for solaris, linux it's HOME + FILE_LOCATION=$HOME + if [ -f ${FILE_LOCATION}${FS}JDK64BIT -a ${OS} = "SunOS" -a `uname -p`='sparc' ] + then + BIT_FLAG="-d64" + fi + ;; + Windows_95 | Windows_98 | Windows_ME ) + NULL=NUL + PS=";" + FS="\\" + echo "Test skipped, only for WinNT" + exit 0 + ;; + Windows_NT ) + NULL=NUL + PS=";" + FS="\\" + ;; + * ) + echo "Unrecognized system!" + exit 1; + ;; +esac + +#CLASSPATH=.${PS}${TESTCLASSES} ; export CLASSPATH + +cp ${TESTSRC}${FS}*.java . + +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -fullversion + +${TESTJAVA}${FS}bin${FS}javac -classpath .${PS}$TESTJAVA${FS}lib${FS}tools.jar *.java + +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -classpath .${PS}$TESTJAVA${FS}lib${FS}tools.jar FieldMonitor > test.out 2>&1 & + +P_PID=$! + +sleep 60 +STATUS=0 + +case "$OS" in + SunOS | Linux ) + ps -ef | grep $P_PID | grep -v grep > ${NULL} + if [ $? = 0 ]; then + kill -9 $P_PID + STATUS=1 + fi + ;; + * ) + ps | grep -i "FieldMonitor" | grep -v grep > ${NULL} + if [ $? = 0 ]; then + C_PID=`ps | grep -i "FieldMonitor" | awk '{print $1}'` + kill -s 9 $C_PID + STATUS=1 + fi + ;; +esac + +grep "A fatal error has been detected" test.out > ${NULL} +if [ $? = 0 ]; then + cat test.out + STATUS=1 +fi + +exit $STATUS diff --git a/hotspot/test/runtime/7158988/TestPostFieldModification.java b/hotspot/test/runtime/7158988/TestPostFieldModification.java new file mode 100644 index 00000000000..584d39d20ca --- /dev/null +++ b/hotspot/test/runtime/7158988/TestPostFieldModification.java @@ -0,0 +1,249 @@ +/* + * Copyright 2012 SAP AG. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test FieldMonitor.java + * @bug 7158988 + * @summary verify jvm does not crash while debugging + * @run shell TestFieldMonitor.sh + * @author axel.siebenborn@sap.com + */ +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.Writer; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import com.sun.jdi.Bootstrap; +import com.sun.jdi.Field; +import com.sun.jdi.ReferenceType; +import com.sun.jdi.VirtualMachine; +import com.sun.jdi.connect.Connector; +import com.sun.jdi.connect.IllegalConnectorArgumentsException; +import com.sun.jdi.connect.LaunchingConnector; +import com.sun.jdi.connect.VMStartException; +import com.sun.jdi.event.ClassPrepareEvent; +import com.sun.jdi.event.Event; +import com.sun.jdi.event.EventQueue; +import com.sun.jdi.event.EventSet; +import com.sun.jdi.event.ModificationWatchpointEvent; +import com.sun.jdi.event.VMDeathEvent; +import com.sun.jdi.event.VMDisconnectEvent; +import com.sun.jdi.request.ClassPrepareRequest; +import com.sun.jdi.request.EventRequest; +import com.sun.jdi.request.EventRequestManager; +import com.sun.jdi.request.ModificationWatchpointRequest; + +public class FieldMonitor { + + public static final String CLASS_NAME = "TestPostFieldModification"; + public static final String FIELD_NAME = "value"; + public static final String ARGUMENTS = "-Xshare:off -XX:+PrintGC"; + + public static void main(String[] args) + throws IOException, InterruptedException { + + StringBuffer sb = new StringBuffer(); + + for (int i=0; i < args.length; i++) { + sb.append(' '); + sb.append(args[i]); + } + //VirtualMachine vm = launchTarget(sb.toString()); + VirtualMachine vm = launchTarget(CLASS_NAME); + + System.out.println("Vm launched"); + // set watch field on already loaded classes + List referenceTypes = vm + .classesByName(CLASS_NAME); + for (ReferenceType refType : referenceTypes) { + addFieldWatch(vm, refType); + } + // watch for loaded classes + addClassWatch(vm); + + // process events + EventQueue eventQueue = vm.eventQueue(); + // resume the vm + + Process process = vm.process(); + + + // Copy target's output and error to our output and error. + Thread outThread = new StreamRedirectThread("out reader", process.getInputStream()); + Thread errThread = new StreamRedirectThread("error reader", process.getErrorStream()); + + errThread.start(); + outThread.start(); + + + vm.resume(); + boolean connected = true; + while (connected) { + EventSet eventSet = eventQueue.remove(); + for (Event event : eventSet) { + if (event instanceof VMDeathEvent + || event instanceof VMDisconnectEvent) { + // exit + connected = false; + } else if (event instanceof ClassPrepareEvent) { + // watch field on loaded class + System.out.println("ClassPrepareEvent"); + ClassPrepareEvent classPrepEvent = (ClassPrepareEvent) event; + ReferenceType refType = classPrepEvent + .referenceType(); + addFieldWatch(vm, refType); + } else if (event instanceof ModificationWatchpointEvent) { + System.out.println("sleep for 500 ms"); + Thread.sleep(500); + System.out.println("resume..."); + + ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event; + System.out.println("old=" + + modEvent.valueCurrent()); + System.out.println("new=" + modEvent.valueToBe()); + System.out.println(); + } + } + eventSet.resume(); + } + // Shutdown begins when event thread terminates + try { + errThread.join(); // Make sure output is forwarded + outThread.join(); + } catch (InterruptedException exc) { + // we don't interrupt + } + } + + /** + * Find a com.sun.jdi.CommandLineLaunch connector + */ + static LaunchingConnector findLaunchingConnector() { + List connectors = Bootstrap.virtualMachineManager().allConnectors(); + Iterator iter = connectors.iterator(); + while (iter.hasNext()) { + Connector connector = iter.next(); + if (connector.name().equals("com.sun.jdi.CommandLineLaunch")) { + return (LaunchingConnector)connector; + } + } + throw new Error("No launching connector"); + } + /** + * Return the launching connector's arguments. + */ + static Map connectorArguments(LaunchingConnector connector, String mainArgs) { + Map arguments = connector.defaultArguments(); + for (String key : arguments.keySet()) { + System.out.println(key); + } + + Connector.Argument mainArg = (Connector.Argument)arguments.get("main"); + if (mainArg == null) { + throw new Error("Bad launching connector"); + } + mainArg.setValue(mainArgs); + + Connector.Argument optionsArg = (Connector.Argument)arguments.get("options"); + if (optionsArg == null) { + throw new Error("Bad launching connector"); + } + optionsArg.setValue(ARGUMENTS); + return arguments; + } + + static VirtualMachine launchTarget(String mainArgs) { + LaunchingConnector connector = findLaunchingConnector(); + Map arguments = connectorArguments(connector, mainArgs); + try { + return (VirtualMachine) connector.launch(arguments); + } catch (IOException exc) { + throw new Error("Unable to launch target VM: " + exc); + } catch (IllegalConnectorArgumentsException exc) { + throw new Error("Internal error: " + exc); + } catch (VMStartException exc) { + throw new Error("Target VM failed to initialize: " + + exc.getMessage()); + } +} + + + private static void addClassWatch(VirtualMachine vm) { + EventRequestManager erm = vm.eventRequestManager(); + ClassPrepareRequest classPrepareRequest = erm + .createClassPrepareRequest(); + classPrepareRequest.addClassFilter(CLASS_NAME); + classPrepareRequest.setEnabled(true); + } + + + private static void addFieldWatch(VirtualMachine vm, + ReferenceType refType) { + EventRequestManager erm = vm.eventRequestManager(); + Field field = refType.fieldByName(FIELD_NAME); + ModificationWatchpointRequest modificationWatchpointRequest = erm + .createModificationWatchpointRequest(field); + modificationWatchpointRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD); + modificationWatchpointRequest.setEnabled(true); + } +} + +class StreamRedirectThread extends Thread { + + private final BufferedReader in; + + private static final int BUFFER_SIZE = 2048; + + /** + * Set up for copy. + * @param name Name of the thread + * @param in Stream to copy from + * @param out Stream to copy to + */ + StreamRedirectThread(String name, InputStream in) { + super(name); + this.in = new BufferedReader(new InputStreamReader(in)); + } + + /** + * Copy. + */ + public void run() { + try { + String line; + while ((line = in.readLine ()) != null) { + System.out.println ("testvm: " + line); + } + System.out.flush(); + } catch(IOException exc) { + System.err.println("Child I/O Transfer - " + exc); + } + } +} From 83806487dceaae2ece97579b224e3001bd90ff84 Mon Sep 17 00:00:00 2001 From: Staffan Larsen Date: Thu, 5 Apr 2012 14:16:23 +0200 Subject: [PATCH 03/15] 7133111: libsaproc debug print should be printed as unsigned long to fit large numbers on 64bit platform Reviewed-by: dcubed, mgronlun, dsamersoff --- hotspot/agent/src/os/linux/ps_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotspot/agent/src/os/linux/ps_core.c b/hotspot/agent/src/os/linux/ps_core.c index 9739e161471..6c229da7b79 100644 --- a/hotspot/agent/src/os/linux/ps_core.c +++ b/hotspot/agent/src/os/linux/ps_core.c @@ -440,7 +440,7 @@ static bool sort_map_array(struct ps_prochandle* ph) { int j = 0; print_debug("---- sorted virtual address map ----\n"); for (j = 0; j < ph->core->num_maps; j++) { - print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr, + print_debug("base = 0x%lx\tsize = %zd\n", ph->core->map_array[j]->vaddr, ph->core->map_array[j]->memsz); } } From 2ea955a6420d910f8fc5110c42fdc7ed90616d47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rickard=20B=C3=A4ckman?= Date: Thu, 12 Apr 2012 13:24:08 +0200 Subject: [PATCH 04/15] 7160924: jvmti: GetPhase returns incorrect phase before VMInit event is issued Reviewed-by: acorn, dcubed --- hotspot/src/share/vm/runtime/thread.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp index 767668289ce..76865f6ff2c 100644 --- a/hotspot/src/share/vm/runtime/thread.cpp +++ b/hotspot/src/share/vm/runtime/thread.cpp @@ -3468,13 +3468,13 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { create_vm_init_libraries(); } + // Notify JVMTI agents that VM initialization is complete - nop if no agents. + JvmtiExport::post_vm_initialized(); + if (!TRACE_START()) { vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); } - // Notify JVMTI agents that VM initialization is complete - nop if no agents. - JvmtiExport::post_vm_initialized(); - if (CleanChunkPoolAsync) { Chunk::start_chunk_pool_cleaner_task(); } From 7359b908135829a7225c8004dae477b8ea345a41 Mon Sep 17 00:00:00 2001 From: Coleen Phillimore Date: Thu, 12 Apr 2012 22:03:05 -0400 Subject: [PATCH 05/15] 7160467: Fix test for 7158988 Ended up checking in FieldMonitor.java as TestPostFieldModification.java Reviewed-by: kamg --- .../7158988/TestPostFieldModification.java | 243 ++---------------- 1 file changed, 26 insertions(+), 217 deletions(-) diff --git a/hotspot/test/runtime/7158988/TestPostFieldModification.java b/hotspot/test/runtime/7158988/TestPostFieldModification.java index 584d39d20ca..d730003b267 100644 --- a/hotspot/test/runtime/7158988/TestPostFieldModification.java +++ b/hotspot/test/runtime/7158988/TestPostFieldModification.java @@ -21,229 +21,38 @@ * questions. */ -/* - * @test FieldMonitor.java - * @bug 7158988 - * @summary verify jvm does not crash while debugging - * @run shell TestFieldMonitor.sh - * @author axel.siebenborn@sap.com - */ -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Reader; -import java.io.Writer; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +public class TestPostFieldModification { -import com.sun.jdi.Bootstrap; -import com.sun.jdi.Field; -import com.sun.jdi.ReferenceType; -import com.sun.jdi.VirtualMachine; -import com.sun.jdi.connect.Connector; -import com.sun.jdi.connect.IllegalConnectorArgumentsException; -import com.sun.jdi.connect.LaunchingConnector; -import com.sun.jdi.connect.VMStartException; -import com.sun.jdi.event.ClassPrepareEvent; -import com.sun.jdi.event.Event; -import com.sun.jdi.event.EventQueue; -import com.sun.jdi.event.EventSet; -import com.sun.jdi.event.ModificationWatchpointEvent; -import com.sun.jdi.event.VMDeathEvent; -import com.sun.jdi.event.VMDisconnectEvent; -import com.sun.jdi.request.ClassPrepareRequest; -import com.sun.jdi.request.EventRequest; -import com.sun.jdi.request.EventRequestManager; -import com.sun.jdi.request.ModificationWatchpointRequest; + public String value; // watch modification of value -public class FieldMonitor { + public static void main(String[] args){ - public static final String CLASS_NAME = "TestPostFieldModification"; - public static final String FIELD_NAME = "value"; - public static final String ARGUMENTS = "-Xshare:off -XX:+PrintGC"; - - public static void main(String[] args) - throws IOException, InterruptedException { - - StringBuffer sb = new StringBuffer(); - - for (int i=0; i < args.length; i++) { - sb.append(' '); - sb.append(args[i]); - } - //VirtualMachine vm = launchTarget(sb.toString()); - VirtualMachine vm = launchTarget(CLASS_NAME); - - System.out.println("Vm launched"); - // set watch field on already loaded classes - List referenceTypes = vm - .classesByName(CLASS_NAME); - for (ReferenceType refType : referenceTypes) { - addFieldWatch(vm, refType); - } - // watch for loaded classes - addClassWatch(vm); - - // process events - EventQueue eventQueue = vm.eventQueue(); - // resume the vm - - Process process = vm.process(); - - - // Copy target's output and error to our output and error. - Thread outThread = new StreamRedirectThread("out reader", process.getInputStream()); - Thread errThread = new StreamRedirectThread("error reader", process.getErrorStream()); - - errThread.start(); - outThread.start(); - - - vm.resume(); - boolean connected = true; - while (connected) { - EventSet eventSet = eventQueue.remove(); - for (Event event : eventSet) { - if (event instanceof VMDeathEvent - || event instanceof VMDisconnectEvent) { - // exit - connected = false; - } else if (event instanceof ClassPrepareEvent) { - // watch field on loaded class - System.out.println("ClassPrepareEvent"); - ClassPrepareEvent classPrepEvent = (ClassPrepareEvent) event; - ReferenceType refType = classPrepEvent - .referenceType(); - addFieldWatch(vm, refType); - } else if (event instanceof ModificationWatchpointEvent) { - System.out.println("sleep for 500 ms"); - Thread.sleep(500); - System.out.println("resume..."); - - ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event; - System.out.println("old=" - + modEvent.valueCurrent()); - System.out.println("new=" + modEvent.valueToBe()); - System.out.println(); + System.out.println("Start threads"); + // this thread modifies the field 'value' + new Thread() { + TestPostFieldModification test = new TestPostFieldModification(); + public void run() { + test.value="test"; + for(int i = 0; i < 10; i++) { + test.value += new String("_test"); } } - eventSet.resume(); - } - // Shutdown begins when event thread terminates - try { - errThread.join(); // Make sure output is forwarded - outThread.join(); - } catch (InterruptedException exc) { - // we don't interrupt - } - } + }.start(); - /** - * Find a com.sun.jdi.CommandLineLaunch connector - */ - static LaunchingConnector findLaunchingConnector() { - List connectors = Bootstrap.virtualMachineManager().allConnectors(); - Iterator iter = connectors.iterator(); - while (iter.hasNext()) { - Connector connector = iter.next(); - if (connector.name().equals("com.sun.jdi.CommandLineLaunch")) { - return (LaunchingConnector)connector; + // this thread is used to trigger a gc + Thread d = new Thread() { + public void run() { + while(true) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + + } + System.gc(); + } } - } - throw new Error("No launching connector"); - } - /** - * Return the launching connector's arguments. - */ - static Map connectorArguments(LaunchingConnector connector, String mainArgs) { - Map arguments = connector.defaultArguments(); - for (String key : arguments.keySet()) { - System.out.println(key); - } - - Connector.Argument mainArg = (Connector.Argument)arguments.get("main"); - if (mainArg == null) { - throw new Error("Bad launching connector"); - } - mainArg.setValue(mainArgs); - - Connector.Argument optionsArg = (Connector.Argument)arguments.get("options"); - if (optionsArg == null) { - throw new Error("Bad launching connector"); - } - optionsArg.setValue(ARGUMENTS); - return arguments; - } - - static VirtualMachine launchTarget(String mainArgs) { - LaunchingConnector connector = findLaunchingConnector(); - Map arguments = connectorArguments(connector, mainArgs); - try { - return (VirtualMachine) connector.launch(arguments); - } catch (IOException exc) { - throw new Error("Unable to launch target VM: " + exc); - } catch (IllegalConnectorArgumentsException exc) { - throw new Error("Internal error: " + exc); - } catch (VMStartException exc) { - throw new Error("Target VM failed to initialize: " + - exc.getMessage()); - } -} - - - private static void addClassWatch(VirtualMachine vm) { - EventRequestManager erm = vm.eventRequestManager(); - ClassPrepareRequest classPrepareRequest = erm - .createClassPrepareRequest(); - classPrepareRequest.addClassFilter(CLASS_NAME); - classPrepareRequest.setEnabled(true); - } - - - private static void addFieldWatch(VirtualMachine vm, - ReferenceType refType) { - EventRequestManager erm = vm.eventRequestManager(); - Field field = refType.fieldByName(FIELD_NAME); - ModificationWatchpointRequest modificationWatchpointRequest = erm - .createModificationWatchpointRequest(field); - modificationWatchpointRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD); - modificationWatchpointRequest.setEnabled(true); - } -} - -class StreamRedirectThread extends Thread { - - private final BufferedReader in; - - private static final int BUFFER_SIZE = 2048; - - /** - * Set up for copy. - * @param name Name of the thread - * @param in Stream to copy from - * @param out Stream to copy to - */ - StreamRedirectThread(String name, InputStream in) { - super(name); - this.in = new BufferedReader(new InputStreamReader(in)); - } - - /** - * Copy. - */ - public void run() { - try { - String line; - while ((line = in.readLine ()) != null) { - System.out.println ("testvm: " + line); - } - System.out.flush(); - } catch(IOException exc) { - System.err.println("Child I/O Transfer - " + exc); - } + }; + d.setDaemon(true); + d.start(); } } From cf406f37e35049316a6b761f4856f6300d3e1da6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rickard=20B=C3=A4ckman?= Date: Tue, 6 Mar 2012 12:36:59 +0100 Subject: [PATCH 06/15] 7160570: Intrinsification support for tracing framework Reviewed-by: sla, never --- hotspot/src/os/bsd/vm/osThread_bsd.hpp | 3 + hotspot/src/os/linux/vm/osThread_linux.hpp | 4 +- .../src/os/solaris/vm/osThread_solaris.hpp | 1 + .../src/os/windows/vm/osThread_windows.hpp | 4 +- hotspot/src/share/vm/c1/c1_GraphBuilder.cpp | 15 +++- hotspot/src/share/vm/c1/c1_LIRGenerator.cpp | 74 +++++++++++++---- hotspot/src/share/vm/c1/c1_LIRGenerator.hpp | 8 +- hotspot/src/share/vm/c1/c1_Runtime1.cpp | 3 + hotspot/src/share/vm/classfile/vmSymbols.hpp | 10 ++- hotspot/src/share/vm/oops/instanceKlass.hpp | 1 + hotspot/src/share/vm/opto/library_call.cpp | 79 ++++++++++++++++--- hotspot/src/share/vm/opto/runtime.cpp | 6 +- hotspot/src/share/vm/opto/runtime.hpp | 4 +- hotspot/src/share/vm/runtime/osThread.hpp | 3 +- hotspot/src/share/vm/trace/traceMacros.hpp | 6 +- 15 files changed, 182 insertions(+), 39 deletions(-) diff --git a/hotspot/src/os/bsd/vm/osThread_bsd.hpp b/hotspot/src/os/bsd/vm/osThread_bsd.hpp index 0e60cc3eefd..914a0439c02 100644 --- a/hotspot/src/os/bsd/vm/osThread_bsd.hpp +++ b/hotspot/src/os/bsd/vm/osThread_bsd.hpp @@ -72,15 +72,18 @@ #ifdef _ALLBSD_SOURCE #ifdef __APPLE__ + static size_t thread_id_size() { return sizeof(thread_t); } thread_t thread_id() const { return _thread_id; } #else + static size_t thread_id_size() { return sizeof(pthread_t); } pthread_t thread_id() const { return _thread_id; } #endif #else + static size_t thread_id_size() { return sizeof(pid_t); } pid_t thread_id() const { return _thread_id; } diff --git a/hotspot/src/os/linux/vm/osThread_linux.hpp b/hotspot/src/os/linux/vm/osThread_linux.hpp index 22945135b78..fe9fe6188fa 100644 --- a/hotspot/src/os/linux/vm/osThread_linux.hpp +++ b/hotspot/src/os/linux/vm/osThread_linux.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,8 @@ sigset_t caller_sigmask() const { return _caller_sigmask; } void set_caller_sigmask(sigset_t sigmask) { _caller_sigmask = sigmask; } + static size_t thread_id_size() { return sizeof(pid_t); } + pid_t thread_id() const { return _thread_id; } diff --git a/hotspot/src/os/solaris/vm/osThread_solaris.hpp b/hotspot/src/os/solaris/vm/osThread_solaris.hpp index 7fe1417e342..6e79e1855db 100644 --- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp +++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp @@ -36,6 +36,7 @@ bool _vm_created_thread; // true if the VM created this thread, // false if primary thread or attached thread public: + static size_t thread_id_size() { return sizeof(thread_t); } thread_t thread_id() const { return _thread_id; } uint lwp_id() const { return _lwp_id; } int native_priority() const { return _native_priority; } diff --git a/hotspot/src/os/windows/vm/osThread_windows.hpp b/hotspot/src/os/windows/vm/osThread_windows.hpp index 1df8925c7a7..28cd45c5c2f 100644 --- a/hotspot/src/os/windows/vm/osThread_windows.hpp +++ b/hotspot/src/os/windows/vm/osThread_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +42,8 @@ typedef void* HANDLE; HANDLE interrupt_event() const { return _interrupt_event; } void set_interrupt_event(HANDLE interrupt_event) { _interrupt_event = interrupt_event; } + + static size_t thread_id_size() { return sizeof(unsigned long); } unsigned long thread_id() const { return _thread_id; } #ifndef PRODUCT // Used for debugging, return a unique integer for each thread. diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp index 34fdb591c2e..c11a2a2c177 100644 --- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp @@ -3132,10 +3132,23 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { bool cantrap = true; vmIntrinsics::ID id = callee->intrinsic_id(); switch (id) { - case vmIntrinsics::_arraycopy : + case vmIntrinsics::_arraycopy: if (!InlineArrayCopy) return false; break; +#ifdef TRACE_HAVE_INTRINSICS + case vmIntrinsics::_classID: + case vmIntrinsics::_threadID: + preserves_state = true; + cantrap = true; + break; + + case vmIntrinsics::_counterTime: + preserves_state = true; + cantrap = false; + break; +#endif + case vmIntrinsics::_currentTimeMillis: case vmIntrinsics::_nanoTime: preserves_state = true; diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp index 3c7f305222a..6ed6edf47c3 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2879,6 +2879,50 @@ void LIRGenerator::do_IfOp(IfOp* x) { __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); } +void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) { + assert(x->number_of_arguments() == expected_arguments, "wrong type"); + LIR_Opr reg = result_register_for(x->type()); + __ call_runtime_leaf(routine, getThreadTemp(), + reg, new LIR_OprList()); + LIR_Opr result = rlock_result(x); + __ move(reg, result); +} + +#ifdef TRACE_HAVE_INTRINSICS +void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) { + LIR_Opr thread = getThreadPointer(); + LIR_Opr osthread = new_pointer_register(); + __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread); + size_t thread_id_size = OSThread::thread_id_size(); + if (thread_id_size == (size_t) BytesPerLong) { + LIR_Opr id = new_register(T_LONG); + __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id); + __ convert(Bytecodes::_l2i, id, rlock_result(x)); + } else if (thread_id_size == (size_t) BytesPerInt) { + __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x)); + } else { + ShouldNotReachHere(); + } +} + +void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { + CodeEmitInfo* info = state_for(x); + CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check + assert(info != NULL, "must have info"); + LIRItem arg(x->argument_at(1), this); + arg.load_item(); + LIR_Opr klass = new_register(T_OBJECT); + __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info); + LIR_Opr id = new_register(T_LONG); + ByteSize offset = TRACE_ID_OFFSET; + LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG); + __ move(trace_id_addr, id); + __ logical_or(id, LIR_OprFact::longConst(0x01l), id); + __ store(id, trace_id_addr); + __ logical_and(id, LIR_OprFact::longConst(~0x3l), id); + __ move(id, rlock_result(x)); +} +#endif void LIRGenerator::do_Intrinsic(Intrinsic* x) { switch (x->id()) { @@ -2890,25 +2934,21 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) { break; } - case vmIntrinsics::_currentTimeMillis: { - assert(x->number_of_arguments() == 0, "wrong type"); - LIR_Opr reg = result_register_for(x->type()); - __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(), - reg, new LIR_OprList()); - LIR_Opr result = rlock_result(x); - __ move(reg, result); +#ifdef TRACE_HAVE_INTRINSICS + case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break; + case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break; + case vmIntrinsics::_counterTime: + do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x); break; - } +#endif - case vmIntrinsics::_nanoTime: { - assert(x->number_of_arguments() == 0, "wrong type"); - LIR_Opr reg = result_register_for(x->type()); - __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(), - reg, new LIR_OprList()); - LIR_Opr result = rlock_result(x); - __ move(reg, result); + case vmIntrinsics::_currentTimeMillis: + do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x); + break; + + case vmIntrinsics::_nanoTime: + do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x); break; - } case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; case vmIntrinsics::_getClass: do_getClass(x); break; diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp index 56b28e4eb8e..67127df04d8 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -426,6 +426,12 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { SwitchRangeArray* create_lookup_ranges(LookupSwitch* x); void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux); + void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x); +#ifdef TRACE_HAVE_INTRINSICS + void do_ThreadIDIntrinsic(Intrinsic* x); + void do_ClassIDIntrinsic(Intrinsic* x); +#endif + public: Compilation* compilation() const { return _compilation; } FrameMap* frame_map() const { return _compilation->frame_map(); } diff --git a/hotspot/src/share/vm/c1/c1_Runtime1.cpp b/hotspot/src/share/vm/c1/c1_Runtime1.cpp index 765dec48098..47703492d1e 100644 --- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp @@ -295,6 +295,9 @@ const char* Runtime1::name_for_address(address entry) { FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry); FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); FUNCTION_CASE(entry, trace_block_entry); +#ifdef TRACE_HAVE_INTRINSICS + FUNCTION_CASE(entry, TRACE_TIME_METHOD); +#endif #undef FUNCTION_CASE diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp index 92c06342dad..9cee5ac0490 100644 --- a/hotspot/src/share/vm/classfile/vmSymbols.hpp +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp @@ -27,6 +27,7 @@ #include "oops/symbol.hpp" #include "memory/iterator.hpp" +#include "trace/traceMacros.hpp" // The class vmSymbols is a name space for fast lookup of // symbols commonly used in the VM. @@ -424,6 +425,7 @@ template(throwable_throwable_signature, "(Ljava/lang/Throwable;)Ljava/lang/Throwable;") \ template(class_void_signature, "(Ljava/lang/Class;)V") \ template(class_int_signature, "(Ljava/lang/Class;)I") \ + template(class_long_signature, "(Ljava/lang/Class;)J") \ template(class_boolean_signature, "(Ljava/lang/Class;)Z") \ template(throwable_string_void_signature, "(Ljava/lang/Throwable;Ljava/lang/String;)V") \ template(string_array_void_signature, "([Ljava/lang/String;)V") \ @@ -539,10 +541,12 @@ template(serializePropertiesToByteArray_signature, "()[B") \ template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \ template(classRedefinedCount_name, "classRedefinedCount") \ + \ + /* trace signatures */ \ + TRACE_TEMPLATES(template) \ + \ /*end*/ - - // Here are all the intrinsics known to the runtime and the CI. // Each intrinsic consists of a public enum name (like _hashCode), // followed by a specification of its klass, name, and signature: @@ -648,6 +652,8 @@ do_intrinsic(_nanoTime, java_lang_System, nanoTime_name, void_long_signature, F_S) \ do_name( nanoTime_name, "nanoTime") \ \ + TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \ + \ do_intrinsic(_arraycopy, java_lang_System, arraycopy_name, arraycopy_signature, F_S) \ do_name( arraycopy_name, "arraycopy") \ do_signature(arraycopy_signature, "(Ljava/lang/Object;ILjava/lang/Object;II)V") \ diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp index 32a00f5c6b0..31266375115 100644 --- a/hotspot/src/share/vm/oops/instanceKlass.hpp +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp @@ -642,6 +642,7 @@ class instanceKlass: public Klass { // support for stub routines static ByteSize init_state_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); } + TRACE_DEFINE_OFFSET; static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); } // subclass/subinterface checks diff --git a/hotspot/src/share/vm/opto/library_call.cpp b/hotspot/src/share/vm/opto/library_call.cpp index 1a38fb6131b..13c3a4327ce 100644 --- a/hotspot/src/share/vm/opto/library_call.cpp +++ b/hotspot/src/share/vm/opto/library_call.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,7 +175,11 @@ class LibraryCallKit : public GraphKit { bool inline_unsafe_allocate(); bool inline_unsafe_copyMemory(); bool inline_native_currentThread(); - bool inline_native_time_funcs(bool isNano); +#ifdef TRACE_HAVE_INTRINSICS + bool inline_native_classID(); + bool inline_native_threadID(); +#endif + bool inline_native_time_funcs(address method, const char* funcName); bool inline_native_isInterrupted(); bool inline_native_Class_query(vmIntrinsics::ID id); bool inline_native_subtype_check(); @@ -638,10 +642,18 @@ bool LibraryCallKit::try_to_inline() { case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted(); +#ifdef TRACE_HAVE_INTRINSICS + case vmIntrinsics::_classID: + return inline_native_classID(); + case vmIntrinsics::_threadID: + return inline_native_threadID(); + case vmIntrinsics::_counterTime: + return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime"); +#endif case vmIntrinsics::_currentTimeMillis: - return inline_native_time_funcs(false); + return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis"); case vmIntrinsics::_nanoTime: - return inline_native_time_funcs(true); + return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime"); case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate(); case vmIntrinsics::_copyMemory: @@ -2840,14 +2852,63 @@ bool LibraryCallKit::inline_unsafe_allocate() { return true; } +#ifdef TRACE_HAVE_INTRINSICS +/* + * oop -> myklass + * myklass->trace_id |= USED + * return myklass->trace_id & ~0x3 + */ +bool LibraryCallKit::inline_native_classID() { + int nargs = 1 + 1; + null_check_receiver(callee()); // check then ignore argument(0) + _sp += nargs; + Node* cls = do_null_check(argument(1), T_OBJECT); + _sp -= nargs; + Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0); + _sp += nargs; + kls = do_null_check(kls, T_OBJECT); + _sp -= nargs; + ByteSize offset = TRACE_ID_OFFSET; + Node* insp = basic_plus_adr(kls, in_bytes(offset)); + Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG); + Node* bits = longcon(~0x03l); // ignore bit 0 & 1 + Node* andl = _gvn.transform(new (C, 3) AndLNode(tvalue, bits)); + Node* clsused = longcon(0x01l); // set the class bit + Node* orl = _gvn.transform(new (C, 3) OrLNode(tvalue, clsused)); + + const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); + store_to_memory(control(), insp, orl, T_LONG, adr_type); + push_pair(andl); + return true; +} + +bool LibraryCallKit::inline_native_threadID() { + Node* tls_ptr = NULL; + Node* cur_thr = generate_current_thread(tls_ptr); + Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); + Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); + p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset())); + + Node* threadid = NULL; + size_t thread_id_size = OSThread::thread_id_size(); + if (thread_id_size == (size_t) BytesPerLong) { + threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG)); + push(threadid); + } else if (thread_id_size == (size_t) BytesPerInt) { + threadid = make_load(control(), p, TypeInt::INT, T_INT); + push(threadid); + } else { + ShouldNotReachHere(); + } + return true; +} +#endif + //------------------------inline_native_time_funcs-------------- // inline code for System.currentTimeMillis() and System.nanoTime() // these have the same type and signature -bool LibraryCallKit::inline_native_time_funcs(bool isNano) { - address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) : - CAST_FROM_FN_PTR(address, os::javaTimeMillis); - const char * funcName = isNano ? "nanoTime" : "currentTimeMillis"; - const TypeFunc *tf = OptoRuntime::current_time_millis_Type(); +bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) { + const TypeFunc *tf = OptoRuntime::void_long_Type(); const TypePtr* no_memory_effects = NULL; Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0)); diff --git a/hotspot/src/share/vm/opto/runtime.cpp b/hotspot/src/share/vm/opto/runtime.cpp index b97f06a024f..b03a3943a0e 100644 --- a/hotspot/src/share/vm/opto/runtime.cpp +++ b/hotspot/src/share/vm/opto/runtime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -709,9 +709,9 @@ const TypeFunc* OptoRuntime::Math_DD_D_Type() { return TypeFunc::make(domain, range); } -//-------------- currentTimeMillis +//-------------- currentTimeMillis, currentTimeNanos, etc -const TypeFunc* OptoRuntime::current_time_millis_Type() { +const TypeFunc* OptoRuntime::void_long_Type() { // create input type (domain) const Type **fields = TypeTuple::fields(0); const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); diff --git a/hotspot/src/share/vm/opto/runtime.hpp b/hotspot/src/share/vm/opto/runtime.hpp index 39702980597..11b5434a54d 100644 --- a/hotspot/src/share/vm/opto/runtime.hpp +++ b/hotspot/src/share/vm/opto/runtime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -268,7 +268,7 @@ private: static const TypeFunc* Math_DD_D_Type(); // mod,pow & friends static const TypeFunc* modf_Type(); static const TypeFunc* l2f_Type(); - static const TypeFunc* current_time_millis_Type(); + static const TypeFunc* void_long_Type(); static const TypeFunc* flush_windows_Type(); diff --git a/hotspot/src/share/vm/runtime/osThread.hpp b/hotspot/src/share/vm/runtime/osThread.hpp index 984bc9b49f1..bb3fd79637b 100644 --- a/hotspot/src/share/vm/runtime/osThread.hpp +++ b/hotspot/src/share/vm/runtime/osThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +98,7 @@ class OSThread: public CHeapObj { // For java intrinsics: static ByteSize interrupted_offset() { return byte_offset_of(OSThread, _interrupted); } + static ByteSize thread_id_offset() { return byte_offset_of(OSThread, _thread_id); } // Platform dependent stuff #ifdef TARGET_OS_FAMILY_linux diff --git a/hotspot/src/share/vm/trace/traceMacros.hpp b/hotspot/src/share/vm/trace/traceMacros.hpp index 221f4d0f889..44103192083 100644 --- a/hotspot/src/share/vm/trace/traceMacros.hpp +++ b/hotspot/src/share/vm/trace/traceMacros.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,5 +43,9 @@ #define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0) #define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1 #define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2 +#define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3 +#define TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere() +#define TRACE_TEMPLATES(template) +#define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) #endif From 023ad06e46f1126728c230b4330075f854975a89 Mon Sep 17 00:00:00 2001 From: Alejandro Murillo Date: Sat, 7 Apr 2012 09:06:55 -0700 Subject: [PATCH 07/15] 7159842: new hotspot build - hs24-b08 Reviewed-by: jcoomes --- hotspot/make/hotspot_version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version index 27ff9dd9a6a..00601747489 100644 --- a/hotspot/make/hotspot_version +++ b/hotspot/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011 HS_MAJOR_VER=24 HS_MINOR_VER=0 -HS_BUILD_NUMBER=07 +HS_BUILD_NUMBER=08 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 From bb59715dae61ee192abe837741e85344261105d8 Mon Sep 17 00:00:00 2001 From: Bengt Rutisson Date: Fri, 13 Apr 2012 01:59:38 +0200 Subject: [PATCH 08/15] 7160728: Introduce an extra logging level for G1 logging Added log levels "fine", "finer" and "finest". Let PrintGC map to "fine" and PrintGCDetails map to "finer". Separated out the per worker information in the G1 logging to the "finest" level. Reviewed-by: stefank, jwilhelm, tonyp, johnc --- .../gc_implementation/g1/concurrentMark.cpp | 10 ++-- .../g1/concurrentMarkThread.cpp | 17 +++--- .../gc_implementation/g1/g1CollectedHeap.cpp | 27 +++++---- .../g1/g1CollectorPolicy.cpp | 35 ++++++++---- .../share/vm/gc_implementation/g1/g1Log.cpp | 56 +++++++++++++++++++ .../share/vm/gc_implementation/g1/g1Log.hpp | 56 +++++++++++++++++++ .../vm/gc_implementation/g1/g1MarkSweep.cpp | 9 +-- .../vm/gc_implementation/g1/g1_globals.hpp | 6 +- .../gc_implementation/g1/vm_operations_g1.cpp | 7 ++- 9 files changed, 177 insertions(+), 46 deletions(-) create mode 100644 hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp create mode 100644 hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 015d99dd5ad..11616929792 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1ErgoVerbose.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" @@ -846,7 +847,7 @@ void ConcurrentMark::enter_first_sync_barrier(int task_num) { clear_marking_state(concurrent() /* clear_overflow */); force_overflow()->update(); - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); @@ -2105,7 +2106,7 @@ void ConcurrentMark::cleanup() { double end = os::elapsedTime(); _cleanup_times.add((end - start) * 1000.0); - if (PrintGC || PrintGCDetails) { + if (G1Log::fine()) { g1h->print_size_transition(gclog_or_tty, start_used_bytes, g1h->used(), @@ -2446,11 +2447,10 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { // Inner scope to exclude the cleaning of the string and symbol // tables from the displayed time. { - bool verbose = PrintGC && PrintGCDetails; - if (verbose) { + if (G1Log::finer()) { gclog_or_tty->put(' '); } - TraceTime t("GC ref-proc", verbose, false, gclog_or_tty); + TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); ReferenceProcessor* rp = g1h->ref_processor_cm(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp index 9dcb124ceaa..9959260d51c 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp @@ -26,6 +26,7 @@ #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "memory/resourceArea.hpp" @@ -104,7 +105,7 @@ void ConcurrentMarkThread::run() { double scan_start = os::elapsedTime(); if (!cm()->has_aborted()) { - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); @@ -113,7 +114,7 @@ void ConcurrentMarkThread::run() { _cm->scanRootRegions(); double scan_end = os::elapsedTime(); - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]", @@ -122,7 +123,7 @@ void ConcurrentMarkThread::run() { } double mark_start_sec = os::elapsedTime(); - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-mark-start]"); @@ -146,7 +147,7 @@ void ConcurrentMarkThread::run() { os::sleep(current_thread, sleep_time_ms, false); } - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]", @@ -165,7 +166,7 @@ void ConcurrentMarkThread::run() { } if (cm()->restart_for_overflow()) { - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); @@ -211,7 +212,7 @@ void ConcurrentMarkThread::run() { // reclaimed by cleanup. double cleanup_start_sec = os::elapsedTime(); - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); @@ -232,7 +233,7 @@ void ConcurrentMarkThread::run() { g1h->reset_free_regions_coming(); double cleanup_end_sec = os::elapsedTime(); - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]", @@ -273,7 +274,7 @@ void ConcurrentMarkThread::run() { _sts.leave(); if (cm()->has_aborted()) { - if (PrintGC) { + if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 1e1f70f3443..b585218e4a0 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -33,6 +33,7 @@ #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1ErgoVerbose.hpp" #include "gc_implementation/g1/g1EvacFailure.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" @@ -1255,10 +1256,10 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, // Timing bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); assert(!system_gc || explicit_gc, "invariant"); - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", - PrintGC, true, gclog_or_tty); + G1Log::fine(), true, gclog_or_tty); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -1444,7 +1445,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, heap_region_iterate(&rebuild_rs); } - if (PrintGC) { + if (G1Log::fine()) { print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); } @@ -1917,6 +1918,8 @@ jint G1CollectedHeap::initialize() { CollectedHeap::pre_initialize(); os::enable_vtime(); + G1Log::init(); + // Necessary to satisfy locking discipline assertions. MutexLocker x(Heap_lock); @@ -3609,12 +3612,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { increment_total_full_collections(); } - // if PrintGCDetails is on, we'll print long statistics information + // if the log level is "finer" is on, we'll print long statistics information // in the collector policy code, so let's not print this as the output // is messy if we do. - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); + gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); @@ -3931,8 +3934,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { } // The closing of the inner scope, immediately above, will complete - // the PrintGC logging output. The record_collection_pause_end() call - // above will complete the logging output of PrintGCDetails. + // logging at the "fine" level. The record_collection_pause_end() call + // above will complete logging at the "finer" level. // // It is not yet to safe, however, to tell the concurrent mark to // start as we have some optional output below. We don't want the @@ -5514,9 +5517,9 @@ void G1CollectedHeap::evacuate_collection_set() { if (evacuation_failed()) { remove_self_forwarding_pointers(); - if (PrintGCDetails) { + if (G1Log::finer()) { gclog_or_tty->print(" (to-space overflow)"); - } else if (PrintGC) { + } else if (G1Log::fine()) { gclog_or_tty->print("--"); } } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 8b4cc1360cb..fc0e4f92de3 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1ErgoVerbose.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" #include "runtime/arguments.hpp" @@ -885,7 +886,7 @@ void G1CollectorPolicy::record_stop_world_start() { void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, size_t start_used) { - if (PrintGCDetails) { + if (G1Log::finer()) { gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print("[GC pause"); gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed"); @@ -1022,11 +1023,16 @@ void G1CollectorPolicy::print_par_stats(int level, if (val > max) max = val; total += val; - buf.append(" %3.1lf", val); + if (G1Log::finest()) { + buf.append(" %.1lf", val); + } + } + + if (G1Log::finest()) { + buf.append_and_print_cr(""); } - buf.append_and_print_cr(""); double avg = total / (double) no_of_gc_threads(); - buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", + buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]", avg, min, max, max - min); } @@ -1223,7 +1229,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { // These values are used to update the summary information that is // displayed when TraceGen0Time is enabled, and are output as part - // of the PrintGCDetails output, in the non-parallel case. + // of the "finer" output, in the non-parallel case. double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms); double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms); @@ -1356,8 +1362,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { } } - // PrintGCDetails output - if (PrintGCDetails) { + if (G1Log::finer()) { bool print_marking_info = _g1->mark_in_progress() && !last_pause_included_initial_mark; @@ -1376,11 +1381,15 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms); } print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); - print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); + if (G1Log::finest()) { + print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); + } print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms); - print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); + if (G1Log::finest()) { + print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); + } for (int i = 0; i < _parallel_gc_threads; i++) { _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - @@ -1406,7 +1415,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { print_stats(1, "SATB Filtering", satb_filtering_time); } print_stats(1, "Update RS", update_rs_time); - print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers); + if (G1Log::finest()) { + print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers); + } print_stats(1, "Scan RS", scan_rs_time); print_stats(1, "Object Copying", obj_copy_time); } @@ -1610,7 +1621,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { proper_unit_for_byte_size((bytes)) void G1CollectorPolicy::print_heap_transition() { - if (PrintGCDetails) { + if (G1Log::finer()) { YoungList* young_list = _g1->young_list(); size_t eden_bytes = young_list->eden_used_bytes(); size_t survivor_bytes = young_list->survivor_used_bytes(); @@ -1637,7 +1648,7 @@ void G1CollectorPolicy::print_heap_transition() { EXT_SIZE_PARAMS(capacity)); _prev_eden_capacity = eden_capacity; - } else if (PrintGC) { + } else if (G1Log::fine()) { _g1->print_size_transition(gclog_or_tty, _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp new file mode 100644 index 00000000000..56d957f76b4 --- /dev/null +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1_globals.hpp" +#include "gc_implementation/g1/g1Log.hpp" +#include "runtime/globals.hpp" + +G1Log::LogLevel G1Log::_level = G1Log::LevelNone; + +// If G1LogLevel has not been set up we will use the values of PrintGC +// and PrintGCDetails for the logging level. +// - PrintGC maps to "fine". +// - PrintGCDetails maps to "finer". +void G1Log::init() { + if (G1LogLevel != NULL && G1LogLevel[0] != '\0') { + if (strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') { + _level = LevelNone; + } else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') { + _level = LevelFine; + } else if (strncmp("finer", G1LogLevel, 5) == 0 && G1LogLevel[5] == '\0') { + _level = LevelFiner; + } else if (strncmp("finest", G1LogLevel, 6) == 0 && G1LogLevel[6] == '\0') { + _level = LevelFinest; + } else { + warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel); + } + } else { + if (PrintGCDetails) { + _level = LevelFiner; + } else if (PrintGC) { + _level = LevelFine; + } + } +} diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp new file mode 100644 index 00000000000..b8da001cfd6 --- /dev/null +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP + +#include "memory/allocation.hpp" + +class G1Log : public AllStatic { + typedef enum { + LevelNone, + LevelFine, + LevelFiner, + LevelFinest + } LogLevel; + + static LogLevel _level; + + public: + inline static bool fine() { + return _level >= LevelFine; + } + + inline static bool finer() { + return _level >= LevelFiner; + } + + inline static bool finest() { + return _level == LevelFinest; + } + + static void init(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index f32030b4524..b6b27cbbb5d 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -29,6 +29,7 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" #include "memory/gcLocker.hpp" #include "memory/genCollectedHeap.hpp" @@ -126,7 +127,7 @@ void G1MarkSweep::allocate_stacks() { void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); + TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); @@ -291,7 +292,7 @@ void G1MarkSweep::mark_sweep_phase2() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); Generation* pg = g1h->perm_gen(); - TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); + TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty); GenMarkSweep::trace("2"); FindFirstRegionClosure cl; @@ -335,7 +336,7 @@ void G1MarkSweep::mark_sweep_phase3() { Generation* pg = g1h->perm_gen(); // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); + TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); @@ -399,7 +400,7 @@ void G1MarkSweep::mark_sweep_phase4() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); Generation* pg = g1h->perm_gen(); - TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); + TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty); GenMarkSweep::trace("4"); pg->compact(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp index 57e977e0691..d25e5b94722 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -26,7 +26,6 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1_GLOBALS_HPP #include "runtime/globals.hpp" - // // Defines all globals flags used by the garbage-first compiler. // @@ -309,7 +308,10 @@ \ develop(uintx, G1OldCSetRegionThresholdPercent, 10, \ "An upper bound for the number of old CSet regions expressed " \ - "as a percentage of the heap size.") + "as a percentage of the heap size.") \ + \ + experimental(ccstr, G1LogLevel, NULL, \ + "Log level for G1 logging: fine, finer, finest") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp index 05e7f35e1a0..1a330ba6483 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @@ -26,6 +26,7 @@ #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" @@ -223,9 +224,9 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() { } void VM_CGC_Operation::doit() { - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty); + gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty); SharedHeap* sh = SharedHeap::heap(); // This could go away if CollectedHeap gave access to _gc_is_active... if (sh != NULL) { From f695b75d78a0968cc91e8f71ef2435bf91d4b75e Mon Sep 17 00:00:00 2001 From: Bengt Rutisson Date: Mon, 16 Apr 2012 08:57:18 +0200 Subject: [PATCH 09/15] 4988100: oop_verify_old_oop appears to be dead Removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com Reviewed-by: jmasa, jwilhelm --- .../compactibleFreeListSpace.cpp | 4 +- .../compactibleFreeListSpace.hpp | 4 +- .../concurrentMarkSweepGeneration.cpp | 12 +++--- .../concurrentMarkSweepGeneration.hpp | 6 +-- .../gc_implementation/g1/concurrentMark.cpp | 12 ++---- .../gc_implementation/g1/g1CollectedHeap.cpp | 37 +++++++------------ .../gc_implementation/g1/g1CollectedHeap.hpp | 4 +- .../vm/gc_implementation/g1/g1MarkSweep.cpp | 3 +- .../vm/gc_implementation/g1/heapRegion.cpp | 7 ++-- .../vm/gc_implementation/g1/heapRegion.hpp | 4 +- .../parallelScavenge/parallelScavengeHeap.cpp | 8 ++-- .../parallelScavenge/parallelScavengeHeap.hpp | 2 +- .../parallelScavenge/psOldGen.cpp | 6 +-- .../parallelScavenge/psOldGen.hpp | 4 +- .../parallelScavenge/psYoungGen.cpp | 10 ++--- .../parallelScavenge/psYoungGen.hpp | 4 +- .../shared/immutableSpace.cpp | 4 +- .../shared/immutableSpace.hpp | 4 +- .../shared/mutableNUMASpace.cpp | 4 +- .../shared/mutableNUMASpace.hpp | 4 +- .../gc_implementation/shared/mutableSpace.cpp | 4 +- .../gc_implementation/shared/mutableSpace.hpp | 4 +- .../share/vm/gc_interface/collectedHeap.hpp | 2 +- .../share/vm/memory/compactingPermGenGen.cpp | 8 ++-- .../share/vm/memory/compactingPermGenGen.hpp | 4 +- .../src/share/vm/memory/defNewGeneration.cpp | 8 ++-- .../src/share/vm/memory/defNewGeneration.hpp | 4 +- .../src/share/vm/memory/genCollectedHeap.cpp | 6 +-- .../src/share/vm/memory/genCollectedHeap.hpp | 4 +- hotspot/src/share/vm/memory/generation.cpp | 6 +-- hotspot/src/share/vm/memory/generation.hpp | 6 +-- hotspot/src/share/vm/memory/space.cpp | 23 ++---------- hotspot/src/share/vm/memory/space.hpp | 8 ++-- hotspot/src/share/vm/memory/universe.cpp | 6 +-- hotspot/src/share/vm/memory/universe.hpp | 4 +- .../src/share/vm/oops/instanceRefKlass.cpp | 26 +------------ hotspot/src/share/vm/oops/klass.cpp | 8 ---- hotspot/src/share/vm/oops/klass.hpp | 2 - hotspot/src/share/vm/oops/objArrayKlass.cpp | 7 ---- hotspot/src/share/vm/oops/objArrayKlass.hpp | 4 +- hotspot/src/share/vm/oops/oop.cpp | 12 +----- hotspot/src/share/vm/oops/oop.hpp | 4 +- hotspot/src/share/vm/runtime/vmThread.cpp | 4 +- 43 files changed, 110 insertions(+), 197 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp index 2cb5e2f3cef..4f78ce5dcf7 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2444,7 +2444,7 @@ class VerifyAllOopsClosure: public OopClosure { virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } }; -void CompactibleFreeListSpace::verify(bool ignored) const { +void CompactibleFreeListSpace::verify() const { assert_lock_strong(&_freelistLock); verify_objects_initialized(); MemRegion span = _collector->_span; diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp index 90d2f5f3918..c8ffba5265f 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -492,7 +492,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { void print() const; void print_on(outputStream* st) const; void prepare_for_verify(); - void verify(bool allow_dirty) const; + void verify() const; void verifyFreeLists() const PRODUCT_RETURN; void verifyIndexedFreeLists() const; void verifyIndexedFreeList(size_t size) const; diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index ac8ac93ff9e..db8da2846ee 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -3109,21 +3109,21 @@ ConcurrentMarkSweepGeneration::prepare_for_verify() { } void -ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) { +ConcurrentMarkSweepGeneration::verify() { // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those // are not called when the heap is verified during universe initialization and // at vm shutdown. if (freelistLock()->owned_by_self()) { - cmsSpace()->verify(false /* ignored */); + cmsSpace()->verify(); } else { MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); - cmsSpace()->verify(false /* ignored */); + cmsSpace()->verify(); } } -void CMSCollector::verify(bool allow_dirty /* ignored */) { - _cmsGen->verify(allow_dirty); - _permGen->verify(allow_dirty); +void CMSCollector::verify() { + _cmsGen->verify(); + _permGen->verify(); } #ifndef PRODUCT diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index 440677403fb..a097c5bb3de 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -988,7 +988,7 @@ class CMSCollector: public CHeapObj { CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); // debugging - void verify(bool); + void verify(); bool verify_after_remark(); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; @@ -1279,7 +1279,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { // Debugging void prepare_for_verify(); - void verify(bool allow_dirty); + void verify(); void print_statistics() PRODUCT_RETURN; // Performance Counters support diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 11616929792..04bd4783a08 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -1120,8 +1120,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } @@ -1160,8 +1159,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UseNextMarking); } assert(!restart_for_overflow(), "sanity"); @@ -1950,8 +1948,7 @@ void ConcurrentMark::cleanup() { HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } @@ -2132,8 +2129,7 @@ void ConcurrentMark::cleanup() { HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index b585218e4a0..bcdf53f486a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1291,8 +1291,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } @@ -1366,8 +1365,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); prepare_for_verify(); - Universe::verify(/* allow dirty */ false, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } @@ -3036,7 +3034,6 @@ public: class VerifyRegionClosure: public HeapRegionClosure { private: - bool _allow_dirty; bool _par; VerifyOption _vo; bool _failures; @@ -3044,9 +3041,8 @@ public: // _vo == UsePrevMarking -> use "prev" marking information, // _vo == UseNextMarking -> use "next" marking information, // _vo == UseMarkWord -> use mark word from object header. - VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo) - : _allow_dirty(allow_dirty), - _par(par), + VerifyRegionClosure(bool par, VerifyOption vo) + : _par(par), _vo(vo), _failures(false) {} @@ -3059,7 +3055,7 @@ public: "Should be unclaimed at verify points."); if (!r->continuesHumongous()) { bool failures = false; - r->verify(_allow_dirty, _vo, &failures); + r->verify(_vo, &failures); if (failures) { _failures = true; } else { @@ -3127,7 +3123,6 @@ public: class G1ParVerifyTask: public AbstractGangTask { private: G1CollectedHeap* _g1h; - bool _allow_dirty; VerifyOption _vo; bool _failures; @@ -3135,10 +3130,9 @@ public: // _vo == UsePrevMarking -> use "prev" marking information, // _vo == UseNextMarking -> use "next" marking information, // _vo == UseMarkWord -> use mark word from object header. - G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) : + G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) : AbstractGangTask("Parallel verify task"), _g1h(g1h), - _allow_dirty(allow_dirty), _vo(vo), _failures(false) { } @@ -3148,7 +3142,7 @@ public: void work(uint worker_id) { HandleMark hm; - VerifyRegionClosure blk(_allow_dirty, true, _vo); + VerifyRegionClosure blk(true, _vo); _g1h->heap_region_par_iterate_chunked(&blk, worker_id, _g1h->workers()->active_workers(), HeapRegion::ParVerifyClaimValue); @@ -3158,12 +3152,11 @@ public: } }; -void G1CollectedHeap::verify(bool allow_dirty, bool silent) { - verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking); +void G1CollectedHeap::verify(bool silent) { + verify(silent, VerifyOption_G1UsePrevMarking); } -void G1CollectedHeap::verify(bool allow_dirty, - bool silent, +void G1CollectedHeap::verify(bool silent, VerifyOption vo) { if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } @@ -3215,7 +3208,7 @@ void G1CollectedHeap::verify(bool allow_dirty, assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity check"); - G1ParVerifyTask task(this, allow_dirty, vo); + G1ParVerifyTask task(this, vo); assert(UseDynamicNumberOfGCThreads || workers()->active_workers() == workers()->total_workers(), "If not dynamic should be using all the workers"); @@ -3237,7 +3230,7 @@ void G1CollectedHeap::verify(bool allow_dirty, assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity check"); } else { - VerifyRegionClosure blk(allow_dirty, false, vo); + VerifyRegionClosure blk(false, vo); heap_region_iterate(&blk); if (blk.failures()) { failures = true; @@ -3650,8 +3643,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - Universe::verify(/* allow dirty */ false, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } @@ -3895,8 +3887,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, + Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UsePrevMarking); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index ad13c52e399..5303196b454 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1504,10 +1504,10 @@ public: // Currently there is only one place where this is called with // vo == UseMarkWord, which is to verify the marking during a // full GC. - void verify(bool allow_dirty, bool silent, VerifyOption vo); + void verify(bool silent, VerifyOption vo); // Override; it uses the "prev" marking information - virtual void verify(bool allow_dirty, bool silent); + virtual void verify(bool silent); virtual void print_on(outputStream* st) const; virtual void print_extended_on(outputStream* st) const; diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index b6b27cbbb5d..02d254b6703 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -193,8 +193,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, // fail. At the end of the GC, the orginal mark word values // (including hash values) are restored to the appropriate // objects. - Universe::heap()->verify(/* allow dirty */ true, - /* silent */ false, + Universe::heap()->verify(/* silent */ false, /* option */ VerifyOption_G1UseMarkWord); G1CollectedHeap* g1h = G1CollectedHeap::heap(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp index 0fc499ebb78..1ac7e9eb43e 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -779,16 +779,15 @@ void HeapRegion::print_on(outputStream* st) const { G1OffsetTableContigSpace::print_on(st); } -void HeapRegion::verify(bool allow_dirty) const { +void HeapRegion::verify() const { bool dummy = false; - verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy); + verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); } // This really ought to be commoned up into OffsetTableContigSpace somehow. // We would need a mechanism to make that code skip dead objects. -void HeapRegion::verify(bool allow_dirty, - VerifyOption vo, +void HeapRegion::verify(VerifyOption vo, bool* failures) const { G1CollectedHeap* g1 = G1CollectedHeap::heap(); *failures = false; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp index 2e0b75200c1..b462389a652 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -823,10 +823,10 @@ class HeapRegion: public G1OffsetTableContigSpace { // Currently there is only one place where this is called with // vo == UseMarkWord, which is to verify the marking during a // full GC. - void verify(bool allow_dirty, VerifyOption vo, bool *failures) const; + void verify(VerifyOption vo, bool *failures) const; // Override; it uses the "prev" marking information - virtual void verify(bool allow_dirty) const; + virtual void verify() const; }; // HeapRegionClosure is used for iterating over regions. diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp index 8fef37eba32..9a8848d5399 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -911,23 +911,23 @@ void ParallelScavengeHeap::print_tracing_info() const { } -void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) { +void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { // Why do we need the total_collections()-filter below? if (total_collections() > 0) { if (!silent) { gclog_or_tty->print("permanent "); } - perm_gen()->verify(allow_dirty); + perm_gen()->verify(); if (!silent) { gclog_or_tty->print("tenured "); } - old_gen()->verify(allow_dirty); + old_gen()->verify(); if (!silent) { gclog_or_tty->print("eden "); } - young_gen()->verify(allow_dirty); + young_gen()->verify(); } } diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp index 5934cdfb888..e118997169d 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp @@ -257,7 +257,7 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector virtual void gc_threads_do(ThreadClosure* tc) const; virtual void print_tracing_info() const; - void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */); + void verify(bool silent, VerifyOption option /* ignored */); void print_heap_change(size_t prev_used); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp index a46ac632904..63df001b319 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -477,8 +477,8 @@ void PSOldGen::space_invariants() { } #endif -void PSOldGen::verify(bool allow_dirty) { - object_space()->verify(allow_dirty); +void PSOldGen::verify() { + object_space()->verify(); } class VerifyObjectStartArrayClosure : public ObjectClosure { PSOldGen* _gen; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp index 174db29aebb..ce45376f01e 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -174,7 +174,7 @@ class PSOldGen : public CHeapObj { virtual void print_on(outputStream* st) const; void print_used_change(size_t prev_used) const; - void verify(bool allow_dirty); + void verify(); void verify_object_start_array(); // These should not used diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp index 5355abe0a73..70c071dfe49 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -937,10 +937,10 @@ void PSYoungGen::update_counters() { } } -void PSYoungGen::verify(bool allow_dirty) { - eden_space()->verify(allow_dirty); - from_space()->verify(allow_dirty); - to_space()->verify(allow_dirty); +void PSYoungGen::verify() { + eden_space()->verify(); + from_space()->verify(); + to_space()->verify(); } #ifndef PRODUCT diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp index 640c7614c10..b5a2a14bbbf 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,7 +181,7 @@ class PSYoungGen : public CHeapObj { void print_used_change(size_t prev_used) const; virtual const char* name() const { return "PSYoungGen"; } - void verify(bool allow_dirty); + void verify(); // Space boundary invariant checker void space_invariants() PRODUCT_RETURN; diff --git a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp index de081655685..68af9ebb6ac 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ void ImmutableSpace::print() const { #endif -void ImmutableSpace::verify(bool allow_dirty) { +void ImmutableSpace::verify() { HeapWord* p = bottom(); HeapWord* t = end(); HeapWord* prev_p = NULL; diff --git a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp index bc5c1bd13b0..7d6be0d9f85 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ class ImmutableSpace: public CHeapObj { // Debugging virtual void print() const PRODUCT_RETURN; virtual void print_short() const PRODUCT_RETURN; - virtual void verify(bool allow_dirty); + virtual void verify(); }; #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_IMMUTABLESPACE_HPP diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp index 706fd3733eb..f0f6a49d958 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp @@ -891,12 +891,12 @@ void MutableNUMASpace::print_on(outputStream* st) const { } } -void MutableNUMASpace::verify(bool allow_dirty) { +void MutableNUMASpace::verify() { // This can be called after setting an arbitary value to the space's top, // so an object can cross the chunk boundary. We ensure the parsablity // of the space and just walk the objects in linear fashion. ensure_parsability(); - MutableSpace::verify(allow_dirty); + MutableSpace::verify(); } // Scan pages and gather stats about page placement and size. diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp index 7b70e6e29d9..db7207cc24b 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -225,7 +225,7 @@ class MutableNUMASpace : public MutableSpace { // Debugging virtual void print_on(outputStream* st) const; virtual void print_short_on(outputStream* st) const; - virtual void verify(bool allow_dirty); + virtual void verify(); virtual void set_top(HeapWord* value); }; diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp index 9725c4a1ebd..c47fbecab84 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -246,7 +246,7 @@ void MutableSpace::print_on(outputStream* st) const { bottom(), top(), end()); } -void MutableSpace::verify(bool allow_dirty) { +void MutableSpace::verify() { HeapWord* p = bottom(); HeapWord* t = top(); HeapWord* prev_p = NULL; diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp index 01fb23f050f..9ef8922848a 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -141,7 +141,7 @@ class MutableSpace: public ImmutableSpace { virtual void print_on(outputStream* st) const; virtual void print_short() const; virtual void print_short_on(outputStream* st) const; - virtual void verify(bool allow_dirty); + virtual void verify(); }; #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp index 2ae92076e24..105e1ea8f24 100644 --- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp +++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp @@ -659,7 +659,7 @@ class CollectedHeap : public CHeapObj { } // Heap verification - virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0; + virtual void verify(bool silent, VerifyOption option) = 0; // Non product verification and debugging. #ifndef PRODUCT diff --git a/hotspot/src/share/vm/memory/compactingPermGenGen.cpp b/hotspot/src/share/vm/memory/compactingPermGenGen.cpp index c903bf46d27..7d1515b5e67 100644 --- a/hotspot/src/share/vm/memory/compactingPermGenGen.cpp +++ b/hotspot/src/share/vm/memory/compactingPermGenGen.cpp @@ -444,11 +444,11 @@ void CompactingPermGenGen::invalidate_remembered_set() { } -void CompactingPermGenGen::verify(bool allow_dirty) { - the_space()->verify(allow_dirty); +void CompactingPermGenGen::verify() { + the_space()->verify(); if (!SharedSkipVerify && spec()->enable_shared_spaces()) { - ro_space()->verify(allow_dirty); - rw_space()->verify(allow_dirty); + ro_space()->verify(); + rw_space()->verify(); } } diff --git a/hotspot/src/share/vm/memory/compactingPermGenGen.hpp b/hotspot/src/share/vm/memory/compactingPermGenGen.hpp index e3428d72f18..3cab19cf930 100644 --- a/hotspot/src/share/vm/memory/compactingPermGenGen.hpp +++ b/hotspot/src/share/vm/memory/compactingPermGenGen.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -230,7 +230,7 @@ public: void* new_vtable_start, void* obj); - void verify(bool allow_dirty); + void verify(); // Serialization static void initialize_oops() KERNEL_RETURN; diff --git a/hotspot/src/share/vm/memory/defNewGeneration.cpp b/hotspot/src/share/vm/memory/defNewGeneration.cpp index 69ae3624a25..315a38248c9 100644 --- a/hotspot/src/share/vm/memory/defNewGeneration.cpp +++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp @@ -939,10 +939,10 @@ void DefNewGeneration::update_counters() { } } -void DefNewGeneration::verify(bool allow_dirty) { - eden()->verify(allow_dirty); - from()->verify(allow_dirty); - to()->verify(allow_dirty); +void DefNewGeneration::verify() { + eden()->verify(); + from()->verify(); + to()->verify(); } void DefNewGeneration::print_on(outputStream* st) const { diff --git a/hotspot/src/share/vm/memory/defNewGeneration.hpp b/hotspot/src/share/vm/memory/defNewGeneration.hpp index e7b85285775..1d5a4859041 100644 --- a/hotspot/src/share/vm/memory/defNewGeneration.hpp +++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -340,7 +340,7 @@ protected: // PrintHeapAtGC support. void print_on(outputStream* st) const; - void verify(bool allow_dirty); + void verify(); bool promo_failure_scan_is_complete() const { return _promo_failure_scan_stack.is_empty(); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp index 3cd791d35a2..10c3274548d 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp @@ -1247,18 +1247,18 @@ GCStats* GenCollectedHeap::gc_stats(int level) const { return _gens[level]->gc_stats(); } -void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) { +void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { if (!silent) { gclog_or_tty->print("permgen "); } - perm_gen()->verify(allow_dirty); + perm_gen()->verify(); for (int i = _n_gens-1; i >= 0; i--) { Generation* g = _gens[i]; if (!silent) { gclog_or_tty->print(g->name()); gclog_or_tty->print(" "); } - g->verify(allow_dirty); + g->verify(); } if (!silent) { gclog_or_tty->print("remset "); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp index 5f35dec41e2..557d0a96095 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -357,7 +357,7 @@ public: void prepare_for_verify(); // Override. - void verify(bool allow_dirty, bool silent, VerifyOption option); + void verify(bool silent, VerifyOption option); // Override. virtual void print_on(outputStream* st) const; diff --git a/hotspot/src/share/vm/memory/generation.cpp b/hotspot/src/share/vm/memory/generation.cpp index 75a373285d9..13e08586ba5 100644 --- a/hotspot/src/share/vm/memory/generation.cpp +++ b/hotspot/src/share/vm/memory/generation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -696,8 +696,8 @@ void OneContigSpaceCardGeneration::record_spaces_top() { the_space()->set_top_for_allocations(); } -void OneContigSpaceCardGeneration::verify(bool allow_dirty) { - the_space()->verify(allow_dirty); +void OneContigSpaceCardGeneration::verify() { + the_space()->verify(); } void OneContigSpaceCardGeneration::print_on(outputStream* st) const { diff --git a/hotspot/src/share/vm/memory/generation.hpp b/hotspot/src/share/vm/memory/generation.hpp index 61fcf187cfd..5c62e8bf2de 100644 --- a/hotspot/src/share/vm/memory/generation.hpp +++ b/hotspot/src/share/vm/memory/generation.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -599,7 +599,7 @@ class Generation: public CHeapObj { virtual void print() const; virtual void print_on(outputStream* st) const; - virtual void verify(bool allow_dirty) = 0; + virtual void verify() = 0; struct StatRecord { int invocations; @@ -753,7 +753,7 @@ class OneContigSpaceCardGeneration: public CardGeneration { virtual void record_spaces_top(); - virtual void verify(bool allow_dirty); + virtual void verify(); virtual void print_on(outputStream* st) const; }; diff --git a/hotspot/src/share/vm/memory/space.cpp b/hotspot/src/share/vm/memory/space.cpp index 7f3aceb3224..b096775cfcb 100644 --- a/hotspot/src/share/vm/memory/space.cpp +++ b/hotspot/src/share/vm/memory/space.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -531,7 +531,7 @@ void OffsetTableContigSpace::print_on(outputStream* st) const { bottom(), top(), _offsets.threshold(), end()); } -void ContiguousSpace::verify(bool allow_dirty) const { +void ContiguousSpace::verify() const { HeapWord* p = bottom(); HeapWord* t = top(); HeapWord* prev_p = NULL; @@ -965,27 +965,12 @@ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOff initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); } - -class VerifyOldOopClosure : public OopClosure { - public: - oop _the_obj; - bool _allow_dirty; - void do_oop(oop* p) { - _the_obj->verify_old_oop(p, _allow_dirty); - } - void do_oop(narrowOop* p) { - _the_obj->verify_old_oop(p, _allow_dirty); - } -}; - #define OBJ_SAMPLE_INTERVAL 0 #define BLOCK_SAMPLE_INTERVAL 100 -void OffsetTableContigSpace::verify(bool allow_dirty) const { +void OffsetTableContigSpace::verify() const { HeapWord* p = bottom(); HeapWord* prev_p = NULL; - VerifyOldOopClosure blk; // Does this do anything? - blk._allow_dirty = allow_dirty; int objs = 0; int blocks = 0; @@ -1007,8 +992,6 @@ void OffsetTableContigSpace::verify(bool allow_dirty) const { if (objs == OBJ_SAMPLE_INTERVAL) { oop(p)->verify(); - blk._the_obj = oop(p); - oop(p)->oop_iterate(&blk); objs = 0; } else { objs++; diff --git a/hotspot/src/share/vm/memory/space.hpp b/hotspot/src/share/vm/memory/space.hpp index 2d718c2a5c8..33b56cf0651 100644 --- a/hotspot/src/share/vm/memory/space.hpp +++ b/hotspot/src/share/vm/memory/space.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -306,7 +306,7 @@ class Space: public CHeapObj { } // Debugging - virtual void verify(bool allow_dirty) const = 0; + virtual void verify() const = 0; }; // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an @@ -948,7 +948,7 @@ class ContiguousSpace: public CompactibleSpace { } // Debugging - virtual void verify(bool allow_dirty) const; + virtual void verify() const; // Used to increase collection frequency. "factor" of 0 means entire // space. @@ -1100,7 +1100,7 @@ class OffsetTableContigSpace: public ContiguousSpace { virtual void print_on(outputStream* st) const; // Debugging - void verify(bool allow_dirty) const; + void verify() const; // Shared space support void serialize_block_offset_array_offsets(SerializeOopClosure* soc); diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp index 2651f4d8e30..9282828a7e9 100644 --- a/hotspot/src/share/vm/memory/universe.cpp +++ b/hotspot/src/share/vm/memory/universe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1326,7 +1326,7 @@ void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) { st->print_cr("}"); } -void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) { +void Universe::verify(bool silent, VerifyOption option) { if (SharedSkipVerify) { return; } @@ -1350,7 +1350,7 @@ void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) { if (!silent) gclog_or_tty->print("[Verifying "); if (!silent) gclog_or_tty->print("threads "); Threads::verify(); - heap()->verify(allow_dirty, silent, option); + heap()->verify(silent, option); if (!silent) gclog_or_tty->print("syms "); SymbolTable::verify(); diff --git a/hotspot/src/share/vm/memory/universe.hpp b/hotspot/src/share/vm/memory/universe.hpp index 5c1587ee44a..073a5c2ca75 100644 --- a/hotspot/src/share/vm/memory/universe.hpp +++ b/hotspot/src/share/vm/memory/universe.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -412,7 +412,7 @@ class Universe: AllStatic { // Debugging static bool verify_in_progress() { return _verify_in_progress; } - static void verify(bool allow_dirty = true, bool silent = false, + static void verify(bool silent = false, VerifyOption option = VerifyOption_Default ); static int verify_count() { return _verify_count; } // The default behavior is to call print_on() on gclog_or_tty. diff --git a/hotspot/src/share/vm/oops/instanceRefKlass.cpp b/hotspot/src/share/vm/oops/instanceRefKlass.cpp index 71a7a1fcff3..1cabe507eae 100644 --- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp +++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -497,36 +497,12 @@ void instanceRefKlass::oop_verify_on(oop obj, outputStream* st) { if (referent != NULL) { guarantee(referent->is_oop(), "referent field heap failed"); - if (gch != NULL && !gch->is_in_young(obj)) { - // We do a specific remembered set check here since the referent - // field is not part of the oop mask and therefore skipped by the - // regular verify code. - if (UseCompressedOops) { - narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj); - obj->verify_old_oop(referent_addr, true); - } else { - oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj); - obj->verify_old_oop(referent_addr, true); - } - } } // Verify next field oop next = java_lang_ref_Reference::next(obj); if (next != NULL) { guarantee(next->is_oop(), "next field verify failed"); guarantee(next->is_instanceRef(), "next field verify failed"); - if (gch != NULL && !gch->is_in_young(obj)) { - // We do a specific remembered set check here since the next field is - // not part of the oop mask and therefore skipped by the regular - // verify code. - if (UseCompressedOops) { - narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); - obj->verify_old_oop(next_addr, true); - } else { - oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); - obj->verify_old_oop(next_addr, true); - } - } } } diff --git a/hotspot/src/share/vm/oops/klass.cpp b/hotspot/src/share/vm/oops/klass.cpp index 84abea68a80..8b21fdd99ed 100644 --- a/hotspot/src/share/vm/oops/klass.cpp +++ b/hotspot/src/share/vm/oops/klass.cpp @@ -581,14 +581,6 @@ void Klass::oop_verify_on(oop obj, outputStream* st) { guarantee(obj->klass()->is_klass(), "klass field is not a klass"); } - -void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) { - /* $$$ I think this functionality should be handled by verification of - RememberedSet::verify_old_oop(obj, p, allow_dirty, false); - the card table. */ -} -void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { } - #ifndef PRODUCT void Klass::verify_vtable_index(int i) { diff --git a/hotspot/src/share/vm/oops/klass.hpp b/hotspot/src/share/vm/oops/klass.hpp index 1b26932e8fa..bcbd4e736f4 100644 --- a/hotspot/src/share/vm/oops/klass.hpp +++ b/hotspot/src/share/vm/oops/klass.hpp @@ -805,8 +805,6 @@ class Klass : public Klass_vtbl { // Verification virtual const char* internal_name() const = 0; virtual void oop_verify_on(oop obj, outputStream* st); - virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty); - virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty); // tells whether obj is partially constructed (gc during class loading) virtual bool oop_partially_loaded(oop obj) const { return false; } virtual void oop_set_partially_loaded(oop obj) {}; diff --git a/hotspot/src/share/vm/oops/objArrayKlass.cpp b/hotspot/src/share/vm/oops/objArrayKlass.cpp index 79b1df24ff4..c152664bf58 100644 --- a/hotspot/src/share/vm/oops/objArrayKlass.cpp +++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp @@ -545,10 +545,3 @@ void objArrayKlass::oop_verify_on(oop obj, outputStream* st) { guarantee(oa->obj_at(index)->is_oop_or_null(), "should be oop"); } } - -void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) { - /* $$$ move into remembered set verification? - RememberedSet::verify_old_oop(obj, p, allow_dirty, true); - */ -} -void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {} diff --git a/hotspot/src/share/vm/oops/objArrayKlass.hpp b/hotspot/src/share/vm/oops/objArrayKlass.hpp index 44717ec6954..ebf6a9e4187 100644 --- a/hotspot/src/share/vm/oops/objArrayKlass.hpp +++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,8 +144,6 @@ class objArrayKlass : public arrayKlass { // Verification const char* internal_name() const; void oop_verify_on(oop obj, outputStream* st); - void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty); - void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty); }; #endif // SHARE_VM_OOPS_OBJARRAYKLASS_HPP diff --git a/hotspot/src/share/vm/oops/oop.cpp b/hotspot/src/share/vm/oops/oop.cpp index f836fb792d8..61cf38d313f 100644 --- a/hotspot/src/share/vm/oops/oop.cpp +++ b/hotspot/src/share/vm/oops/oop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,16 +107,6 @@ void oopDesc::verify() { verify_on(tty); } - -// XXX verify_old_oop doesn't do anything (should we remove?) -void oopDesc::verify_old_oop(oop* p, bool allow_dirty) { - blueprint()->oop_verify_old_oop(this, p, allow_dirty); -} - -void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) { - blueprint()->oop_verify_old_oop(this, p, allow_dirty); -} - bool oopDesc::partially_loaded() { return blueprint()->oop_partially_loaded(this); } diff --git a/hotspot/src/share/vm/oops/oop.hpp b/hotspot/src/share/vm/oops/oop.hpp index 4d2f4537024..694d92113a1 100644 --- a/hotspot/src/share/vm/oops/oop.hpp +++ b/hotspot/src/share/vm/oops/oop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,8 +293,6 @@ class oopDesc { // verification operations void verify_on(outputStream* st); void verify(); - void verify_old_oop(oop* p, bool allow_dirty); - void verify_old_oop(narrowOop* p, bool allow_dirty); // tells whether this oop is partially constructed (gc during class loading) bool partially_loaded(); diff --git a/hotspot/src/share/vm/runtime/vmThread.cpp b/hotspot/src/share/vm/runtime/vmThread.cpp index d4fced26dcb..9e421488b26 100644 --- a/hotspot/src/share/vm/runtime/vmThread.cpp +++ b/hotspot/src/share/vm/runtime/vmThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -304,7 +304,7 @@ void VMThread::run() { os::check_heap(); // Silent verification so as not to pollute normal output, // unless we really asked for it. - Universe::verify(true, !(PrintGCDetails || Verbose)); + Universe::verify(!(PrintGCDetails || Verbose)); } CompileBroker::set_should_block(); From 99802ac63d8b65e717654a83cce42b7c9433122c Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Wed, 11 Apr 2012 16:18:45 +0200 Subject: [PATCH 10/15] 7160613: VerifyRememberedSets doesn't work with CompressedOops Use load_decode_heap_oop instead of load_decode_heap_oop_not_null Reviewed-by: tonyp, brutisso --- .../gc_implementation/parallelScavenge/cardTableExtension.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp index 6e42facf29e..b5ee675b17b 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp @@ -42,7 +42,7 @@ class CheckForUnmarkedOops : public OopClosure { protected: template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = oopDesc::load_decode_heap_oop(p); if (_young_gen->is_in_reserved(obj) && !_card_table->addr_is_marked_imprecise(p)) { // Don't overwrite the first missing card mark From 01ea4199c71e6da26cedf2ec1fd954980ce09419 Mon Sep 17 00:00:00 2001 From: Antonios Printezis Date: Wed, 18 Apr 2012 07:21:15 -0400 Subject: [PATCH 11/15] 7157073: G1: type change size_t -> uint for region counts / indexes Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint. Reviewed-by: iveresov, brutisso, jmasa, jwilhelm --- .../gc_implementation/g1/HeapRegionSeq.java | 4 +- .../g1/HeapRegionSetBase.java | 4 +- .../g1/collectionSetChooser.cpp | 23 ++- .../g1/collectionSetChooser.hpp | 8 +- .../gc_implementation/g1/concurrentMark.cpp | 66 ++++---- .../gc_implementation/g1/concurrentMark.hpp | 2 +- .../g1/concurrentMark.inline.hpp | 2 +- .../vm/gc_implementation/g1/g1AllocRegion.cpp | 6 +- .../vm/gc_implementation/g1/g1AllocRegion.hpp | 6 +- .../gc_implementation/g1/g1CollectedHeap.cpp | 152 +++++++++--------- .../gc_implementation/g1/g1CollectedHeap.hpp | 46 +++--- .../g1/g1CollectorPolicy.cpp | 127 ++++++++------- .../g1/g1CollectorPolicy.hpp | 93 ++++++----- .../vm/gc_implementation/g1/g1ErgoVerbose.hpp | 4 +- .../g1/g1MonitoringSupport.cpp | 24 +-- .../g1/g1MonitoringSupport.hpp | 4 +- .../vm/gc_implementation/g1/heapRegion.cpp | 10 +- .../vm/gc_implementation/g1/heapRegion.hpp | 11 +- .../gc_implementation/g1/heapRegionRemSet.cpp | 45 +++--- .../gc_implementation/g1/heapRegionRemSet.hpp | 8 +- .../vm/gc_implementation/g1/heapRegionSeq.cpp | 87 +++++----- .../vm/gc_implementation/g1/heapRegionSeq.hpp | 36 ++--- .../g1/heapRegionSeq.inline.hpp | 10 +- .../vm/gc_implementation/g1/heapRegionSet.cpp | 51 +++--- .../vm/gc_implementation/g1/heapRegionSet.hpp | 22 +-- .../g1/heapRegionSet.inline.hpp | 8 +- .../vm/gc_implementation/g1/sparsePRT.cpp | 7 +- .../vm/gc_implementation/g1/vmStructs_g1.hpp | 6 +- 28 files changed, 432 insertions(+), 440 deletions(-) diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java index d0da28bdd8a..5bd7f443de6 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ import sun.jvm.hotspot.types.TypeDataBase; public class HeapRegionSeq extends VMObject { // HeapRegion** _regions; static private AddressField regionsField; - // size_t _length; + // uint _length; static private CIntegerField lengthField; static { diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java index 2fbdce7f06c..4ac8f72c25f 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java @@ -40,9 +40,9 @@ import sun.jvm.hotspot.types.TypeDataBase; // Mirror class for HeapRegionSetBase. Represents a group of regions. public class HeapRegionSetBase extends VMObject { - // size_t _length; + // uint _length; static private CIntegerField lengthField; - // size_t _region_num; + // uint _region_num; static private CIntegerField regionNumField; // size_t _total_used_bytes; static private CIntegerField totalUsedBytesField; diff --git a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp index 3a383267b08..29a03275c61 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @@ -273,7 +273,7 @@ void CollectionSetChooser::sortMarkedHeapRegions() { assert(verify(), "CSet chooser verification"); } -size_t CollectionSetChooser::calcMinOldCSetLength() { +uint CollectionSetChooser::calcMinOldCSetLength() { // The min old CSet region bound is based on the maximum desired // number of mixed GCs after a cycle. I.e., even if some old regions // look expensive, we should add them to the CSet anyway to make @@ -291,10 +291,10 @@ size_t CollectionSetChooser::calcMinOldCSetLength() { if (result * gc_num < region_num) { result += 1; } - return result; + return (uint) result; } -size_t CollectionSetChooser::calcMaxOldCSetLength() { +uint CollectionSetChooser::calcMaxOldCSetLength() { // The max old CSet region bound is based on the threshold expressed // as a percentage of the heap size. I.e., it should bound the // number of old regions added to the CSet irrespective of how many @@ -308,7 +308,7 @@ size_t CollectionSetChooser::calcMaxOldCSetLength() { if (100 * result < region_num * perc) { result += 1; } - return result; + return (uint) result; } void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { @@ -321,10 +321,10 @@ void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { hr->calc_gc_efficiency(); } -void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, - size_t chunkSize) { +void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions, + uint chunkSize) { _first_par_unreserved_idx = 0; - int n_threads = ParallelGCThreads; + uint n_threads = (uint) ParallelGCThreads; if (UseDynamicNumberOfGCThreads) { assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, "Should have been set earlier"); @@ -335,12 +335,11 @@ void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 1U); } - size_t max_waste = n_threads * chunkSize; + uint max_waste = n_threads * chunkSize; // it should be aligned with respect to chunkSize - size_t aligned_n_regions = - (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; - assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); - _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL); + uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize; + assert(aligned_n_regions % chunkSize == 0, "should be aligned"); + _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL); } jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp index 3bf90ebff30..caf18206066 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp @@ -150,18 +150,18 @@ public: // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. - size_t calcMinOldCSetLength(); + uint calcMinOldCSetLength(); // Calculate the maximum number of old regions we'll add to the CSet // during a mixed GC. - size_t calcMaxOldCSetLength(); + uint calcMaxOldCSetLength(); // Serial version. void addMarkedHeapRegion(HeapRegion *hr); // Must be called before calls to getParMarkedHeapRegionChunk. // "n_regions" is the number of regions, "chunkSize" the chunk size. - void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize); + void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize); // Returns the first index in a contiguous chunk of "n_regions" indexes // that the calling thread has reserved. These must be set by the // calling thread using "setMarkedHeapRegion" (to NULL if necessary). @@ -176,7 +176,7 @@ public: void clearMarkedHeapRegions(); // Return the number of candidate regions that remain to be collected. - size_t remainingRegions() { return _length - _curr_index; } + uint remainingRegions() { return (uint) (_length - _curr_index); } // Determine whether the CSet chooser has more candidate regions or not. bool isEmpty() { return remainingRegions() == 0; } diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 04bd4783a08..15eb1b6ea56 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -403,8 +403,7 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { return MAX2((n_par_threads + 2) / 4, 1U); } -ConcurrentMark::ConcurrentMark(ReservedSpace rs, - int max_regions) : +ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) : _markBitMap1(rs, MinObjAlignment - 1), _markBitMap2(rs, MinObjAlignment - 1), @@ -415,7 +414,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, _cleanup_sleep_factor(0.0), _cleanup_task_overhead(1.0), _cleanup_list("Cleanup List"), - _region_bm(max_regions, false /* in_resource_area*/), + _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/), _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> CardTableModRefBS::card_shift, false /* in_resource_area*/), @@ -497,7 +496,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs, _task_queues->register_queue(i, task_queue); _count_card_bitmaps[i] = BitMap(card_bm_size, false); - _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions); + _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions); _tasks[i] = new CMTask(i, this, _count_marked_bytes[i], @@ -1228,18 +1227,17 @@ public: void set_bit_for_region(HeapRegion* hr) { assert(!hr->continuesHumongous(), "should have filtered those out"); - size_t index = hr->hrs_index(); + BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); if (!hr->startsHumongous()) { // Normal (non-humongous) case: just set the bit. - _region_bm->par_at_put((BitMap::idx_t) index, true); + _region_bm->par_at_put(index, true); } else { // Starts humongous case: calculate how many regions are part of // this humongous region and then set the bit range. G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1); - size_t end_index = last_hr->hrs_index() + 1; - _region_bm->par_at_put_range((BitMap::idx_t) index, - (BitMap::idx_t) end_index, true); + BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1; + _region_bm->par_at_put_range(index, end_index, true); } } @@ -1418,7 +1416,7 @@ public: // Verify that _top_at_conc_count == ntams if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) { if (_verbose) { - gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: " + gclog_or_tty->print_cr("Region %u: top at conc count incorrect: " "expected " PTR_FORMAT ", actual: " PTR_FORMAT, hr->hrs_index(), hr->next_top_at_mark_start(), hr->top_at_conc_mark_count()); @@ -1434,7 +1432,7 @@ public: // we have missed accounting some objects during the actual marking. if (exp_marked_bytes > act_marked_bytes) { if (_verbose) { - gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: " + gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, hr->hrs_index(), exp_marked_bytes, act_marked_bytes); } @@ -1445,15 +1443,16 @@ public: // (which was just calculated) region bit maps. // We're not OK if the bit in the calculated expected region // bitmap is set and the bit in the actual region bitmap is not. - BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index(); + BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); bool expected = _exp_region_bm->at(index); bool actual = _region_bm->at(index); if (expected && !actual) { if (_verbose) { - gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: " - "expected: %d, actual: %d", - hr->hrs_index(), expected, actual); + gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " + "expected: %s, actual: %s", + hr->hrs_index(), + BOOL_TO_STR(expected), BOOL_TO_STR(actual)); } failures += 1; } @@ -1471,9 +1470,10 @@ public: if (expected && !actual) { if (_verbose) { - gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": " - "expected: %d, actual: %d", - hr->hrs_index(), i, expected, actual); + gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " + "expected: %s, actual: %s", + hr->hrs_index(), i, + BOOL_TO_STR(expected), BOOL_TO_STR(actual)); } failures += 1; } @@ -1603,18 +1603,17 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure { void set_bit_for_region(HeapRegion* hr) { assert(!hr->continuesHumongous(), "should have filtered those out"); - size_t index = hr->hrs_index(); + BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); if (!hr->startsHumongous()) { // Normal (non-humongous) case: just set the bit. - _region_bm->par_set_bit((BitMap::idx_t) index); + _region_bm->par_set_bit(index); } else { // Starts humongous case: calculate how many regions are part of // this humongous region and then set the bit range. G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1); - size_t end_index = last_hr->hrs_index() + 1; - _region_bm->par_at_put_range((BitMap::idx_t) index, - (BitMap::idx_t) end_index, true); + BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1; + _region_bm->par_at_put_range(index, end_index, true); } } @@ -1718,8 +1717,8 @@ public: _n_workers = 1; } - _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); - _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); + _live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); + _used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); } ~G1ParFinalCountTask() { @@ -1768,7 +1767,7 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { G1CollectedHeap* _g1; int _worker_num; size_t _max_live_bytes; - size_t _regions_claimed; + uint _regions_claimed; size_t _freed_bytes; FreeRegionList* _local_cleanup_list; OldRegionSet* _old_proxy_set; @@ -1821,7 +1820,7 @@ public: } size_t max_live_bytes() { return _max_live_bytes; } - size_t regions_claimed() { return _regions_claimed; } + uint regions_claimed() { return _regions_claimed; } double claimed_region_time_sec() { return _claimed_region_time; } double max_region_time_sec() { return _max_region_time; } }; @@ -2146,7 +2145,7 @@ void ConcurrentMark::completeCleanup() { if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " - "cleanup list has "SIZE_FORMAT" entries", + "cleanup list has %u entries", _cleanup_list.length()); } @@ -2168,9 +2167,8 @@ void ConcurrentMark::completeCleanup() { _cleanup_list.is_empty()) { if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " - "appending "SIZE_FORMAT" entries to the " - "secondary_free_list, clean list still has " - SIZE_FORMAT" entries", + "appending %u entries to the secondary_free_list, " + "cleanup list still has %u entries", tmp_free_list.length(), _cleanup_list.length()); } @@ -3140,7 +3138,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure { assert(limit_idx <= end_idx, "or else use atomics"); // Aggregate the "stripe" in the count data associated with hr. - size_t hrs_index = hr->hrs_index(); + uint hrs_index = hr->hrs_index(); size_t marked_bytes = 0; for (int i = 0; (size_t)i < _max_task_num; i += 1) { @@ -3248,7 +3246,7 @@ void ConcurrentMark::clear_all_count_data() { // of the final counting task. _region_bm.clear(); - size_t max_regions = _g1h->max_regions(); + uint max_regions = _g1h->max_regions(); assert(_max_task_num != 0, "unitialized"); for (int i = 0; (size_t) i < _max_task_num; i += 1) { @@ -3258,7 +3256,7 @@ void ConcurrentMark::clear_all_count_data() { assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); assert(marked_bytes_array != NULL, "uninitialized"); - memset(marked_bytes_array, 0, (max_regions * sizeof(size_t))); + memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); task_card_bm->clear(); } } diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp index deb471adbec..ac1eff7aa40 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -636,7 +636,7 @@ public: return _task_queues->steal(task_num, hash_seed, obj); } - ConcurrentMark(ReservedSpace rs, int max_regions); + ConcurrentMark(ReservedSpace rs, uint max_regions); ~ConcurrentMark(); ConcurrentMarkThread* cmThread() { return _cmThread; } diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp index 27c3411051d..aca12a885e1 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp @@ -49,7 +49,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr, HeapWord* start = mr.start(); HeapWord* last = mr.last(); size_t region_size_bytes = mr.byte_size(); - size_t index = hr->hrs_index(); + uint index = hr->hrs_index(); assert(!hr->continuesHumongous(), "should not be HC region"); assert(hr == g1h->heap_region_containing(start), "sanity"); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp index ca31817197b..bb02d6acead 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, } void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) { - msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT, + msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT, _name, message, _count, BOOL_TO_STR(_bot_updates), _alloc_region, _used_bytes_before); } @@ -215,7 +215,7 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) { jio_snprintf(rest_buffer, buffer_length, ""); } - tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s", + tty->print_cr("[%s] %u %s : %s %s", _name, _count, hr_buffer, str, rest_buffer); } } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp index caf7ff9888f..1f2c6cbdc2f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ private: // the region that is re-used using the set() method. This count can // be used in any heuristics that might want to bound how many // distinct regions this object can used during an active interval. - size_t _count; + uint _count; // When we set up a new active region we save its used bytes in this // field so that, when we retire it, we can calculate how much space @@ -136,7 +136,7 @@ public: return (_alloc_region == _dummy_region) ? NULL : _alloc_region; } - size_t count() { return _count; } + uint count() { return _count; } // The following two are the building blocks for the allocation method. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index bcdf53f486a..924296718e8 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -234,7 +234,7 @@ void YoungList::empty_list() { bool YoungList::check_list_well_formed() { bool ret = true; - size_t length = 0; + uint length = 0; HeapRegion* curr = _head; HeapRegion* last = NULL; while (curr != NULL) { @@ -253,7 +253,7 @@ bool YoungList::check_list_well_formed() { if (!ret) { gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); - gclog_or_tty->print_cr("### list has %d entries, _length is %d", + gclog_or_tty->print_cr("### list has %u entries, _length is %u", length, _length); } @@ -264,7 +264,7 @@ bool YoungList::check_list_empty(bool check_sample) { bool ret = true; if (_length != 0) { - gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", + gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u", _length); ret = false; } @@ -337,8 +337,7 @@ YoungList::reset_auxilary_lists() { _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); young_index_in_cset += 1; } - assert((size_t) young_index_in_cset == _survivor_length, - "post-condition"); + assert((uint) young_index_in_cset == _survivor_length, "post-condition"); _g1h->g1_policy()->note_stop_adding_survivor_regions(); _head = _survivor_head; @@ -533,7 +532,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() { if (!_secondary_free_list.is_empty()) { if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "secondary_free_list has "SIZE_FORMAT" entries", + "secondary_free_list has %u entries", _secondary_free_list.length()); } // It looks as if there are free regions available on the @@ -619,12 +618,12 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { return res; } -size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, - size_t word_size) { +uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions, + size_t word_size) { assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); - size_t first = G1_NULL_HRS_INDEX; + uint first = G1_NULL_HRS_INDEX; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower // path. The caller will attempt the expasion if this fails, so @@ -650,7 +649,7 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, if (free_regions() >= num_regions) { first = _hrs.find_contiguous(num_regions); if (first != G1_NULL_HRS_INDEX) { - for (size_t i = first; i < first + num_regions; ++i) { + for (uint i = first; i < first + num_regions; ++i) { HeapRegion* hr = region_at(i); assert(hr->is_empty(), "sanity"); assert(is_on_master_free_list(hr), "sanity"); @@ -664,15 +663,15 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, } HeapWord* -G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, - size_t num_regions, +G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, + uint num_regions, size_t word_size) { assert(first != G1_NULL_HRS_INDEX, "pre-condition"); assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); // Index of last region in the series + 1. - size_t last = first + num_regions; + uint last = first + num_regions; // We need to initialize the region(s) we just discovered. This is // a bit tricky given that it can happen concurrently with @@ -683,7 +682,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, // a specific order. // The word size sum of all the regions we will allocate. - size_t word_size_sum = num_regions * HeapRegion::GrainWords; + size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords; assert(word_size <= word_size_sum, "sanity"); // This will be the "starts humongous" region. @@ -722,7 +721,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; - for (size_t i = first + 1; i < last; ++i) { + for (uint i = first + 1; i < last; ++i) { hr = region_at(i); hr->set_continuesHumongous(first_hr); } @@ -768,7 +767,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, // last one) is actually used when we will free up the humongous // region in free_humongous_region(). hr = NULL; - for (size_t i = first + 1; i < last; ++i) { + for (uint i = first + 1; i < last; ++i) { hr = region_at(i); if ((i + 1) == last) { // last continues humongous region @@ -804,14 +803,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { verify_region_sets_optional(); - size_t num_regions = - round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; - size_t x_size = expansion_regions(); - size_t fs = _hrs.free_suffix(); - size_t first = humongous_obj_allocate_find_first(num_regions, word_size); + size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords); + uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords); + uint x_num = expansion_regions(); + uint fs = _hrs.free_suffix(); + uint first = humongous_obj_allocate_find_first(num_regions, word_size); if (first == G1_NULL_HRS_INDEX) { // The only thing we can do now is attempt expansion. - if (fs + x_size >= num_regions) { + if (fs + x_num >= num_regions) { // If the number of regions we're trying to allocate for this // object is at most the number of regions in the free suffix, // then the call to humongous_obj_allocate_find_first() above @@ -1781,7 +1780,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, HeapRegion::GrainBytes); - size_t num_regions_deleted = 0; + uint num_regions_deleted = 0; MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); HeapWord* old_end = (HeapWord*) _g1_storage.high(); assert(mr.end() == old_end, "post-condition"); @@ -2004,7 +2003,7 @@ jint G1CollectedHeap::initialize() { _reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); - _expansion_regions = max_byte_size/HeapRegion::GrainBytes; + _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); // Create the gen rem set (and barrier set) for the entire reserved region. _rem_set = collector_policy()->create_rem_set(_reserved, 2); @@ -2041,7 +2040,7 @@ jint G1CollectedHeap::initialize() { // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. - const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; + const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; @@ -2057,13 +2056,14 @@ jint G1CollectedHeap::initialize() { _g1h = this; _in_cset_fast_test_length = max_regions(); - _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); + _in_cset_fast_test_base = + NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length); // We're biasing _in_cset_fast_test to avoid subtracting the // beginning of the heap every time we want to index; basically // it's the same with what we do with the card table. _in_cset_fast_test = _in_cset_fast_test_base - - ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); + ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); // Clear the _cset_fast_test bitmap in anticipation of adding // regions to the incremental collection set for the first @@ -2072,7 +2072,7 @@ jint G1CollectedHeap::initialize() { // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) - _cm = new ConcurrentMark(heap_rs, (int) max_regions()); + _cm = new ConcurrentMark(heap_rs, max_regions()); _cmThread = _cm->cmThread(); // Initialize the from_card cache structure of HeapRegionRemSet. @@ -2581,7 +2581,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, uint worker, uint no_of_par_workers, jint claim_value) { - const size_t regions = n_regions(); + const uint regions = n_regions(); const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1); @@ -2589,11 +2589,11 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, no_of_par_workers == workers()->total_workers(), "Non dynamic should use fixed number of workers"); // try to spread out the starting points of the workers - const size_t start_index = regions / max_workers * (size_t) worker; + const uint start_index = regions / max_workers * worker; // each worker will actually look at all regions - for (size_t count = 0; count < regions; ++count) { - const size_t index = (start_index + count) % regions; + for (uint count = 0; count < regions; ++count) { + const uint index = (start_index + count) % regions; assert(0 <= index && index < regions, "sanity"); HeapRegion* r = region_at(index); // we'll ignore "continues humongous" regions (we'll process them @@ -2615,7 +2615,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, // result, we might end up processing them twice. So, we'll do // them first (notice: most closures will ignore them anyway) and // then we'll do the "starts humongous" region. - for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { + for (uint ch_index = index + 1; ch_index < regions; ++ch_index) { HeapRegion* chr = region_at(ch_index); // if the region has already been claimed or it's not @@ -2683,8 +2683,9 @@ void G1CollectedHeap::reset_cset_heap_region_claim_values() { class CheckClaimValuesClosure : public HeapRegionClosure { private: jint _claim_value; - size_t _failures; + uint _failures; HeapRegion* _sh_region; + public: CheckClaimValuesClosure(jint claim_value) : _claim_value(claim_value), _failures(0), _sh_region(NULL) { } @@ -2712,9 +2713,7 @@ public: } return false; } - size_t failures() { - return _failures; - } + uint failures() { return _failures; } }; bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { @@ -2724,17 +2723,15 @@ bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { } class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure { - jint _claim_value; - size_t _failures; +private: + jint _claim_value; + uint _failures; public: CheckClaimValuesInCSetHRClosure(jint claim_value) : - _claim_value(claim_value), - _failures(0) { } + _claim_value(claim_value), _failures(0) { } - size_t failures() { - return _failures; - } + uint failures() { return _failures; } bool doHeapRegion(HeapRegion* hr) { assert(hr->in_collection_set(), "how?"); @@ -2801,14 +2798,14 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) { result = g1_policy()->collection_set(); if (G1CollectedHeap::use_parallel_gc_threads()) { - size_t cs_size = g1_policy()->cset_region_length(); + uint cs_size = g1_policy()->cset_region_length(); uint active_workers = workers()->active_workers(); assert(UseDynamicNumberOfGCThreads || active_workers == workers()->total_workers(), "Unless dynamic should use total workers"); - size_t end_ind = (cs_size * worker_i) / active_workers; - size_t start_ind = 0; + uint end_ind = (cs_size * worker_i) / active_workers; + uint start_ind = 0; if (worker_i > 0 && _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) { @@ -2818,7 +2815,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) { result = _worker_cset_start_region[worker_i - 1]; } - for (size_t i = start_ind; i < end_ind; i++) { + for (uint i = start_ind; i < end_ind; i++) { result = result->next_in_collection_set(); } } @@ -3280,12 +3277,12 @@ void G1CollectedHeap::print_on(outputStream* st) const { _g1_storage.high_boundary()); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); - size_t young_regions = _young_list->length(); - st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", - young_regions, young_regions * HeapRegion::GrainBytes / K); - size_t survivor_regions = g1_policy()->recorded_survivor_regions(); - st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", - survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); + uint young_regions = _young_list->length(); + st->print("%u young (" SIZE_FORMAT "K), ", young_regions, + (size_t) young_regions * HeapRegion::GrainBytes / K); + uint survivor_regions = g1_policy()->recorded_survivor_regions(); + st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, + (size_t) survivor_regions * HeapRegion::GrainBytes / K); st->cr(); perm()->as_gen()->print_on(st); } @@ -3295,7 +3292,11 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const { // Print the per-region information. st->cr(); - st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)"); + st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), " + "HS=humongous(starts), HC=humongous(continues), " + "CS=collection set, F=free, TS=gc time stamp, " + "PTAMS=previous top-at-mark-start, " + "NTAMS=next top-at-mark-start)"); PrintRegionClosure blk(st); heap_region_iterate(&blk); } @@ -3473,16 +3474,16 @@ size_t G1CollectedHeap::cards_scanned() { void G1CollectedHeap::setup_surviving_young_words() { - guarantee( _surviving_young_words == NULL, "pre-condition" ); - size_t array_length = g1_policy()->young_cset_region_length(); - _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); + assert(_surviving_young_words == NULL, "pre-condition"); + uint array_length = g1_policy()->young_cset_region_length(); + _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length); if (_surviving_young_words == NULL) { vm_exit_out_of_memory(sizeof(size_t) * array_length, "Not enough space for young surv words summary."); } - memset(_surviving_young_words, 0, array_length * sizeof(size_t)); + memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t)); #ifdef ASSERT - for (size_t i = 0; i < array_length; ++i) { + for (uint i = 0; i < array_length; ++i) { assert( _surviving_young_words[i] == 0, "memset above" ); } #endif // !ASSERT @@ -3491,9 +3492,10 @@ G1CollectedHeap::setup_surviving_young_words() { void G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); - size_t array_length = g1_policy()->young_cset_region_length(); - for (size_t i = 0; i < array_length; ++i) + uint array_length = g1_policy()->young_cset_region_length(); + for (uint i = 0; i < array_length; ++i) { _surviving_young_words[i] += surv_young_words[i]; + } } void @@ -4242,16 +4244,16 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) // non-young regions (where the age is -1) // We also add a few elements at the beginning and at the end in // an attempt to eliminate cache contention - size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); - size_t array_length = PADDING_ELEM_NUM + - real_length + - PADDING_ELEM_NUM; + uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); + uint array_length = PADDING_ELEM_NUM + + real_length + + PADDING_ELEM_NUM; _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); if (_surviving_young_words_base == NULL) vm_exit_out_of_memory(array_length * sizeof(size_t), "Not enough space for young surv histo."); _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; - memset(_surviving_young_words, 0, real_length * sizeof(size_t)); + memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; @@ -4388,7 +4390,7 @@ void G1ParCopyClosure template oop G1ParCopyClosure ::copy_to_survivor_space(oop old) { - size_t word_sz = old->size(); + size_t word_sz = old->size(); HeapRegion* from_region = _g1->heap_region_containing_raw(old); // +1 to make the -1 indexes valid... int young_index = from_region->young_index_in_cset()+1; @@ -5585,8 +5587,8 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr, hr->set_notHumongous(); free_region(hr, &hr_pre_used, free_list, par); - size_t i = hr->hrs_index() + 1; - size_t num = 1; + uint i = hr->hrs_index() + 1; + uint num = 1; while (i < n_regions()) { HeapRegion* curr_hr = region_at(i); if (!curr_hr->continuesHumongous()) { @@ -5795,7 +5797,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { if (cur->is_young()) { int index = cur->young_index_in_cset(); assert(index != -1, "invariant"); - assert((size_t) index < policy->young_cset_region_length(), "invariant"); + assert((uint) index < policy->young_cset_region_length(), "invariant"); size_t words_survived = _surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); @@ -6135,7 +6137,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, // Methods for the GC alloc regions HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, - size_t count, + uint count, GCAllocPurpose ap) { assert(FreeList_lock->owned_by_self(), "pre-condition"); @@ -6207,7 +6209,7 @@ private: FreeRegionList* _free_list; OldRegionSet* _old_set; HumongousRegionSet* _humongous_set; - size_t _region_count; + uint _region_count; public: VerifyRegionListsClosure(OldRegionSet* old_set, @@ -6216,7 +6218,7 @@ public: _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list), _region_count(0) { } - size_t region_count() { return _region_count; } + uint region_count() { return _region_count; } bool doHeapRegion(HeapRegion* hr) { _region_count += 1; @@ -6238,7 +6240,7 @@ public: } }; -HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, +HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index, HeapWord* bottom) { HeapWord* end = bottom + HeapRegion::GrainWords; MemRegion mr(bottom, end); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 5303196b454..9b8e795c024 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -85,8 +85,8 @@ private: HeapRegion* _curr; - size_t _length; - size_t _survivor_length; + uint _length; + uint _survivor_length; size_t _last_sampled_rs_lengths; size_t _sampled_rs_lengths; @@ -101,8 +101,8 @@ public: void empty_list(); bool is_empty() { return _length == 0; } - size_t length() { return _length; } - size_t survivor_length() { return _survivor_length; } + uint length() { return _length; } + uint survivor_length() { return _survivor_length; } // Currently we do not keep track of the used byte sum for the // young list and the survivors and it'd be quite a lot of work to @@ -111,10 +111,10 @@ public: // we'll report the more accurate information then. size_t eden_used_bytes() { assert(length() >= survivor_length(), "invariant"); - return (length() - survivor_length()) * HeapRegion::GrainBytes; + return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes; } size_t survivor_used_bytes() { - return survivor_length() * HeapRegion::GrainBytes; + return (size_t) survivor_length() * HeapRegion::GrainBytes; } void rs_length_sampling_init(); @@ -247,7 +247,7 @@ private: MasterHumongousRegionSet _humongous_set; // The number of regions we could create by expansion. - size_t _expansion_regions; + uint _expansion_regions; // The block offset table for the G1 heap. G1BlockOffsetSharedArray* _bot_shared; @@ -339,7 +339,7 @@ private: bool* _in_cset_fast_test_base; // The length of the _in_cset_fast_test_base array. - size_t _in_cset_fast_test_length; + uint _in_cset_fast_test_length; volatile unsigned _gc_time_stamp; @@ -458,14 +458,14 @@ protected: // length and remove them from the master free list. Return the // index of the first region or G1_NULL_HRS_INDEX if the search // was unsuccessful. - size_t humongous_obj_allocate_find_first(size_t num_regions, - size_t word_size); + uint humongous_obj_allocate_find_first(uint num_regions, + size_t word_size); // Initialize a contiguous set of free regions of length num_regions // and starting at index first so that they appear as a single // humongous region. - HeapWord* humongous_obj_allocate_initialize_regions(size_t first, - size_t num_regions, + HeapWord* humongous_obj_allocate_initialize_regions(uint first, + uint num_regions, size_t word_size); // Attempt to allocate a humongous object of the given size. Return @@ -574,7 +574,7 @@ protected: size_t allocated_bytes); // For GC alloc regions. - HeapRegion* new_gc_alloc_region(size_t word_size, size_t count, + HeapRegion* new_gc_alloc_region(size_t word_size, uint count, GCAllocPurpose ap); void retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, GCAllocPurpose ap); @@ -641,7 +641,7 @@ public: void register_region_with_in_cset_fast_test(HeapRegion* r) { assert(_in_cset_fast_test_base != NULL, "sanity"); assert(r->in_collection_set(), "invariant"); - size_t index = r->hrs_index(); + uint index = r->hrs_index(); assert(index < _in_cset_fast_test_length, "invariant"); assert(!_in_cset_fast_test_base[index], "invariant"); _in_cset_fast_test_base[index] = true; @@ -655,7 +655,7 @@ public: if (_g1_committed.contains((HeapWord*) obj)) { // no need to subtract the bottom of the heap from obj, // _in_cset_fast_test is biased - size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; + uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes; bool ret = _in_cset_fast_test[index]; // let's make sure the result is consistent with what the slower // test returns @@ -670,7 +670,7 @@ public: void clear_cset_fast_test() { assert(_in_cset_fast_test_base != NULL, "sanity"); memset(_in_cset_fast_test_base, false, - _in_cset_fast_test_length * sizeof(bool)); + (size_t) _in_cset_fast_test_length * sizeof(bool)); } // This is called at the end of either a concurrent cycle or a Full @@ -1101,23 +1101,23 @@ public: } // The total number of regions in the heap. - size_t n_regions() { return _hrs.length(); } + uint n_regions() { return _hrs.length(); } // The max number of regions in the heap. - size_t max_regions() { return _hrs.max_length(); } + uint max_regions() { return _hrs.max_length(); } // The number of regions that are completely free. - size_t free_regions() { return _free_list.length(); } + uint free_regions() { return _free_list.length(); } // The number of regions that are not completely free. - size_t used_regions() { return n_regions() - free_regions(); } + uint used_regions() { return n_regions() - free_regions(); } // The number of regions available for "regular" expansion. - size_t expansion_regions() { return _expansion_regions; } + uint expansion_regions() { return _expansion_regions; } // Factory method for HeapRegion instances. It will return NULL if // the allocation fails. - HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom); + HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; @@ -1301,7 +1301,7 @@ public: void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const; // Return the region with the given index. It assumes the index is valid. - HeapRegion* region_at(size_t index) const { return _hrs.at(index); } + HeapRegion* region_at(uint index) const { return _hrs.at(index); } // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index fc0e4f92de3..44409fee471 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -431,31 +431,36 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size( } if (FLAG_IS_CMDLINE(NewSize)) { - _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes); + _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), + 1U); if (FLAG_IS_CMDLINE(MaxNewSize)) { - _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); + _max_desired_young_length = + MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), + 1U); _sizer_kind = SizerMaxAndNewSize; _adaptive_size = _min_desired_young_length == _max_desired_young_length; } else { _sizer_kind = SizerNewSizeOnly; } } else if (FLAG_IS_CMDLINE(MaxNewSize)) { - _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); + _max_desired_young_length = + MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), + 1U); _sizer_kind = SizerMaxNewSizeOnly; } } -size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) { - size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100; - return MAX2((size_t)1, default_value); +uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { + uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100; + return MAX2(1U, default_value); } -size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) { - size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100; - return MAX2((size_t)1, default_value); +uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { + uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100; + return MAX2(1U, default_value); } -void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) { +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { assert(new_number_of_heap_regions > 0, "Heap must be initialized"); switch (_sizer_kind) { @@ -512,16 +517,16 @@ void G1CollectorPolicy::initialize_gc_policy_counters() { _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); } -bool G1CollectorPolicy::predict_will_fit(size_t young_length, +bool G1CollectorPolicy::predict_will_fit(uint young_length, double base_time_ms, - size_t base_free_regions, + uint base_free_regions, double target_pause_time_ms) { if (young_length >= base_free_regions) { // end condition 1: not enough space for the young regions return false; } - double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1)); + double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); size_t bytes_to_copy = (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); @@ -533,7 +538,7 @@ bool G1CollectorPolicy::predict_will_fit(size_t young_length, } size_t free_bytes = - (base_free_regions - young_length) * HeapRegion::GrainBytes; + (base_free_regions - young_length) * HeapRegion::GrainBytes; if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { // end condition 3: out-of-space (conservatively!) return false; @@ -543,25 +548,25 @@ bool G1CollectorPolicy::predict_will_fit(size_t young_length, return true; } -void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) { +void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { // re-calculate the necessary reserve double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; // We use ceiling so that if reserve_regions_d is > 0.0 (but // smaller than 1.0) we'll get 1. - _reserve_regions = (size_t) ceil(reserve_regions_d); + _reserve_regions = (uint) ceil(reserve_regions_d); _young_gen_sizer->heap_size_changed(new_number_of_regions); } -size_t G1CollectorPolicy::calculate_young_list_desired_min_length( - size_t base_min_length) { - size_t desired_min_length = 0; +uint G1CollectorPolicy::calculate_young_list_desired_min_length( + uint base_min_length) { + uint desired_min_length = 0; if (adaptive_young_list_length()) { if (_alloc_rate_ms_seq->num() > 3) { double now_sec = os::elapsedTime(); double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; double alloc_rate_ms = predict_alloc_rate_ms(); - desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms); + desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); } else { // otherwise we don't have enough info to make the prediction } @@ -571,7 +576,7 @@ size_t G1CollectorPolicy::calculate_young_list_desired_min_length( return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); } -size_t G1CollectorPolicy::calculate_young_list_desired_max_length() { +uint G1CollectorPolicy::calculate_young_list_desired_max_length() { // Here, we might want to also take into account any additional // constraints (i.e., user-defined minimum bound). Currently, we // effectively don't set this bound. @@ -588,11 +593,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { // Calculate the absolute and desired min bounds. // This is how many young regions we already have (currently: the survivors). - size_t base_min_length = recorded_survivor_regions(); + uint base_min_length = recorded_survivor_regions(); // This is the absolute minimum young length, which ensures that we // can allocate one eden region in the worst-case. - size_t absolute_min_length = base_min_length + 1; - size_t desired_min_length = + uint absolute_min_length = base_min_length + 1; + uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); if (desired_min_length < absolute_min_length) { desired_min_length = absolute_min_length; @@ -601,16 +606,16 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { // Calculate the absolute and desired max bounds. // We will try our best not to "eat" into the reserve. - size_t absolute_max_length = 0; + uint absolute_max_length = 0; if (_free_regions_at_end_of_collection > _reserve_regions) { absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; } - size_t desired_max_length = calculate_young_list_desired_max_length(); + uint desired_max_length = calculate_young_list_desired_max_length(); if (desired_max_length > absolute_max_length) { desired_max_length = absolute_max_length; } - size_t young_list_target_length = 0; + uint young_list_target_length = 0; if (adaptive_young_list_length()) { if (gcs_are_young()) { young_list_target_length = @@ -648,11 +653,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { update_max_gc_locker_expansion(); } -size_t +uint G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, - size_t base_min_length, - size_t desired_min_length, - size_t desired_max_length) { + uint base_min_length, + uint desired_min_length, + uint desired_max_length) { assert(adaptive_young_list_length(), "pre-condition"); assert(gcs_are_young(), "only call this for young GCs"); @@ -667,9 +672,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, // will be reflected in the predictions by the // survivor_regions_evac_time prediction. assert(desired_min_length > base_min_length, "invariant"); - size_t min_young_length = desired_min_length - base_min_length; + uint min_young_length = desired_min_length - base_min_length; assert(desired_max_length > base_min_length, "invariant"); - size_t max_young_length = desired_max_length - base_min_length; + uint max_young_length = desired_max_length - base_min_length; double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double survivor_regions_evac_time = predict_survivor_regions_evac_time(); @@ -679,8 +684,8 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + survivor_regions_evac_time; - size_t available_free_regions = _free_regions_at_end_of_collection; - size_t base_free_regions = 0; + uint available_free_regions = _free_regions_at_end_of_collection; + uint base_free_regions = 0; if (available_free_regions > _reserve_regions) { base_free_regions = available_free_regions - _reserve_regions; } @@ -717,9 +722,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, // the new max. This way we maintain the loop invariants. assert(min_young_length < max_young_length, "invariant"); - size_t diff = (max_young_length - min_young_length) / 2; + uint diff = (max_young_length - min_young_length) / 2; while (diff > 0) { - size_t young_length = min_young_length + diff; + uint young_length = min_young_length + diff; if (predict_will_fit(young_length, base_time_ms, base_free_regions, target_pause_time_ms)) { min_young_length = young_length; @@ -1322,7 +1327,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { // given that humongous object allocations do not really affect // either the pause's duration nor when the next pause will take // place we can safely ignore them here. - size_t regions_allocated = eden_cset_region_length(); + uint regions_allocated = eden_cset_region_length(); double alloc_rate_ms = (double) regions_allocated / app_time_ms; _alloc_rate_ms_seq->add(alloc_rate_ms); @@ -1506,8 +1511,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { double pause_time_ms = elapsed_ms; size_t diff = 0; - if (_max_pending_cards >= _pending_cards) + if (_max_pending_cards >= _pending_cards) { diff = _max_pending_cards - _pending_cards; + } _pending_card_diff_seq->add((double) diff); double cost_per_card_ms = 0.0; @@ -1741,8 +1747,7 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, return region_elapsed_time_ms; } -size_t -G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { +size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { size_t bytes_to_copy; if (hr->is_marked()) bytes_to_copy = hr->max_live_bytes(); @@ -1756,8 +1761,8 @@ G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { } void -G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length, - size_t survivor_cset_region_length) { +G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, + uint survivor_cset_region_length) { _eden_cset_region_length = eden_cset_region_length; _survivor_cset_region_length = survivor_cset_region_length; _old_cset_region_length = 0; @@ -2021,7 +2026,7 @@ region_num_to_mbs(int length) { } #endif // PRODUCT -size_t G1CollectorPolicy::max_regions(int purpose) { +uint G1CollectorPolicy::max_regions(int purpose) { switch (purpose) { case GCAllocForSurvived: return _max_survivor_regions; @@ -2034,13 +2039,13 @@ size_t G1CollectorPolicy::max_regions(int purpose) { } void G1CollectorPolicy::update_max_gc_locker_expansion() { - size_t expansion_region_num = 0; + uint expansion_region_num = 0; if (GCLockerEdenExpansionPercent > 0) { double perc = (double) GCLockerEdenExpansionPercent / 100.0; double expansion_region_num_d = perc * (double) _young_list_target_length; // We use ceiling so that if expansion_region_num_d is > 0.0 (but // less than 1.0) we'll get 1. - expansion_region_num = (size_t) ceil(expansion_region_num_d); + expansion_region_num = (uint) ceil(expansion_region_num_d); } else { assert(expansion_region_num == 0, "sanity"); } @@ -2054,7 +2059,7 @@ void G1CollectorPolicy::update_survivors_policy() { (double) _young_list_target_length / (double) SurvivorRatio; // We use ceiling so that if max_survivor_regions_d is > 0.0 (but // smaller than 1.0) we'll get 1. - _max_survivor_regions = (size_t) ceil(max_survivor_regions_d); + _max_survivor_regions = (uint) ceil(max_survivor_regions_d); _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( HeapRegion::GrainWords * _max_survivor_regions); @@ -2288,27 +2293,25 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { (clear_marked_end_sec - start_sec) * 1000.0); } + uint region_num = _g1->n_regions(); if (G1CollectedHeap::use_parallel_gc_threads()) { - const size_t OverpartitionFactor = 4; - size_t WorkUnit; + const uint OverpartitionFactor = 4; + uint WorkUnit; // The use of MinChunkSize = 8 in the original code // causes some assertion failures when the total number of // region is less than 8. The code here tries to fix that. // Should the original code also be fixed? if (no_of_gc_threads > 0) { - const size_t MinWorkUnit = - MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U); - WorkUnit = - MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor), - MinWorkUnit); + const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); + WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), + MinWorkUnit); } else { assert(no_of_gc_threads > 0, "The active gc workers should be greater than 0"); // In a product build do something reasonable to avoid a crash. - const size_t MinWorkUnit = - MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U); + const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); WorkUnit = - MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), + MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), MinWorkUnit); } _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), @@ -2624,8 +2627,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { // pause are appended to the RHS of the young list, i.e. // [Newly Young Regions ++ Survivors from last pause]. - size_t survivor_region_length = young_list->survivor_length(); - size_t eden_region_length = young_list->length() - survivor_region_length; + uint survivor_region_length = young_list->survivor_length(); + uint eden_region_length = young_list->length() - survivor_region_length; init_cset_region_lengths(eden_region_length, survivor_region_length); hr = young_list->first_survivor_region(); while (hr != NULL) { @@ -2664,10 +2667,10 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { if (!gcs_are_young()) { CollectionSetChooser* cset_chooser = _collectionSetChooser; assert(cset_chooser->verify(), "CSet Chooser verification - pre"); - const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength(); - const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); + const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength(); + const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); - size_t expensive_region_num = 0; + uint expensive_region_num = 0; bool check_time_remaining = adaptive_young_list_length(); HeapRegion* hr = cset_chooser->peek(); while (hr != NULL) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index 1a51e4c757c..f897ffdb644 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -128,19 +128,19 @@ private: SizerNewRatio }; SizerKind _sizer_kind; - size_t _min_desired_young_length; - size_t _max_desired_young_length; + uint _min_desired_young_length; + uint _max_desired_young_length; bool _adaptive_size; - size_t calculate_default_min_length(size_t new_number_of_heap_regions); - size_t calculate_default_max_length(size_t new_number_of_heap_regions); + uint calculate_default_min_length(uint new_number_of_heap_regions); + uint calculate_default_max_length(uint new_number_of_heap_regions); public: G1YoungGenSizer(); - void heap_size_changed(size_t new_number_of_heap_regions); - size_t min_desired_young_length() { + void heap_size_changed(uint new_number_of_heap_regions); + uint min_desired_young_length() { return _min_desired_young_length; } - size_t max_desired_young_length() { + uint max_desired_young_length() { return _max_desired_young_length; } bool adaptive_young_list_length() { @@ -175,7 +175,7 @@ private: double _cur_collection_start_sec; size_t _cur_collection_pause_used_at_start_bytes; - size_t _cur_collection_pause_used_regions_at_start; + uint _cur_collection_pause_used_regions_at_start; double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; @@ -233,13 +233,13 @@ private: // indicates whether we are in young or mixed GC mode bool _gcs_are_young; - size_t _young_list_target_length; - size_t _young_list_fixed_length; + uint _young_list_target_length; + uint _young_list_fixed_length; size_t _prev_eden_capacity; // used for logging // The max number of regions we can extend the eden by while the GC // locker is active. This should be >= _young_list_target_length; - size_t _young_list_max_length; + uint _young_list_max_length; bool _last_gc_was_young; @@ -257,7 +257,7 @@ private: double _gc_overhead_perc; double _reserve_factor; - size_t _reserve_regions; + uint _reserve_regions; bool during_marking() { return _during_marking; @@ -292,18 +292,18 @@ private: G1YoungGenSizer* _young_gen_sizer; - size_t _eden_cset_region_length; - size_t _survivor_cset_region_length; - size_t _old_cset_region_length; + uint _eden_cset_region_length; + uint _survivor_cset_region_length; + uint _old_cset_region_length; - void init_cset_region_lengths(size_t eden_cset_region_length, - size_t survivor_cset_region_length); + void init_cset_region_lengths(uint eden_cset_region_length, + uint survivor_cset_region_length); - size_t eden_cset_region_length() { return _eden_cset_region_length; } - size_t survivor_cset_region_length() { return _survivor_cset_region_length; } - size_t old_cset_region_length() { return _old_cset_region_length; } + uint eden_cset_region_length() { return _eden_cset_region_length; } + uint survivor_cset_region_length() { return _survivor_cset_region_length; } + uint old_cset_region_length() { return _old_cset_region_length; } - size_t _free_regions_at_end_of_collection; + uint _free_regions_at_end_of_collection; size_t _recorded_rs_lengths; size_t _max_rs_lengths; @@ -496,10 +496,10 @@ public: void set_recorded_rs_lengths(size_t rs_lengths); - size_t cset_region_length() { return young_cset_region_length() + - old_cset_region_length(); } - size_t young_cset_region_length() { return eden_cset_region_length() + - survivor_cset_region_length(); } + uint cset_region_length() { return young_cset_region_length() + + old_cset_region_length(); } + uint young_cset_region_length() { return eden_cset_region_length() + + survivor_cset_region_length(); } void record_young_free_cset_time_ms(double time_ms) { _recorded_young_free_cset_time_ms = time_ms; @@ -720,12 +720,12 @@ private: // Calculate and return the minimum desired young list target // length. This is the minimum desired young list length according // to the user's inputs. - size_t calculate_young_list_desired_min_length(size_t base_min_length); + uint calculate_young_list_desired_min_length(uint base_min_length); // Calculate and return the maximum desired young list target // length. This is the maximum desired young list length according // to the user's inputs. - size_t calculate_young_list_desired_max_length(); + uint calculate_young_list_desired_max_length(); // Calculate and return the maximum young list target length that // can fit into the pause time goal. The parameters are: rs_lengths @@ -733,18 +733,18 @@ private: // be, base_min_length is the alreay existing number of regions in // the young list, min_length and max_length are the desired min and // max young list length according to the user's inputs. - size_t calculate_young_list_target_length(size_t rs_lengths, - size_t base_min_length, - size_t desired_min_length, - size_t desired_max_length); + uint calculate_young_list_target_length(size_t rs_lengths, + uint base_min_length, + uint desired_min_length, + uint desired_max_length); // Check whether a given young length (young_length) fits into the // given target pause time and whether the prediction for the amount // of objects to be copied for the given length will fit into the // given free space (expressed by base_free_regions). It is used by // calculate_young_list_target_length(). - bool predict_will_fit(size_t young_length, double base_time_ms, - size_t base_free_regions, double target_pause_time_ms); + bool predict_will_fit(uint young_length, double base_time_ms, + uint base_free_regions, double target_pause_time_ms); // Count the number of bytes used in the CS. void count_CS_bytes_used(); @@ -773,7 +773,7 @@ public: } // This should be called after the heap is resized. - void record_new_heap_size(size_t new_number_of_regions); + void record_new_heap_size(uint new_number_of_regions); void init(); @@ -1048,18 +1048,18 @@ public: } bool is_young_list_full() { - size_t young_list_length = _g1->young_list()->length(); - size_t young_list_target_length = _young_list_target_length; + uint young_list_length = _g1->young_list()->length(); + uint young_list_target_length = _young_list_target_length; return young_list_length >= young_list_target_length; } bool can_expand_young_list() { - size_t young_list_length = _g1->young_list()->length(); - size_t young_list_max_length = _young_list_max_length; + uint young_list_length = _g1->young_list()->length(); + uint young_list_max_length = _young_list_max_length; return young_list_length < young_list_max_length; } - size_t young_list_max_length() { + uint young_list_max_length() { return _young_list_max_length; } @@ -1097,7 +1097,7 @@ private: int _tenuring_threshold; // The limit on the number of regions allocated for survivors. - size_t _max_survivor_regions; + uint _max_survivor_regions; // For reporting purposes. size_t _eden_bytes_before_gc; @@ -1105,7 +1105,7 @@ private: size_t _capacity_before_gc; // The amount of survor regions after a collection. - size_t _recorded_survivor_regions; + uint _recorded_survivor_regions; // List of survivor regions. HeapRegion* _recorded_survivor_head; HeapRegion* _recorded_survivor_tail; @@ -1127,9 +1127,9 @@ public: return purpose == GCAllocForSurvived; } - static const size_t REGIONS_UNLIMITED = ~(size_t)0; + static const uint REGIONS_UNLIMITED = (uint) -1; - size_t max_regions(int purpose); + uint max_regions(int purpose); // The limit on regions for a particular purpose is reached. void note_alloc_region_limit_reached(int purpose) { @@ -1146,7 +1146,7 @@ public: _survivor_surv_rate_group->stop_adding_regions(); } - void record_survivor_regions(size_t regions, + void record_survivor_regions(uint regions, HeapRegion* head, HeapRegion* tail) { _recorded_survivor_regions = regions; @@ -1154,12 +1154,11 @@ public: _recorded_survivor_tail = tail; } - size_t recorded_survivor_regions() { + uint recorded_survivor_regions() { return _recorded_survivor_regions; } - void record_thread_age_table(ageTable* age_table) - { + void record_thread_age_table(ageTable* age_table) { _survivors_age_table.merge_par(age_table); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp index 1e738fd9af0..20d34ddb7ff 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,7 +120,7 @@ public: // Single parameter format strings #define ergo_format_str(_name_) ", " _name_ ": %s" -#define ergo_format_region(_name_) ", " _name_ ": "SIZE_FORMAT" regions" +#define ergo_format_region(_name_) ", " _name_ ": %u regions" #define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes" #define ergo_format_double(_name_) ", " _name_ ": %1.2f" #define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%" diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp index 34542618f22..55627cb5100 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -177,19 +177,19 @@ void G1MonitoringSupport::recalculate_sizes() { // values we read here are possible (i.e., at a STW phase at the end // of a GC). - size_t young_list_length = g1->young_list()->length(); - size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions(); + uint young_list_length = g1->young_list()->length(); + uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions(); assert(young_list_length >= survivor_list_length, "invariant"); - size_t eden_list_length = young_list_length - survivor_list_length; + uint eden_list_length = young_list_length - survivor_list_length; // Max length includes any potential extensions to the young gen // we'll do when the GC locker is active. - size_t young_list_max_length = g1->g1_policy()->young_list_max_length(); + uint young_list_max_length = g1->g1_policy()->young_list_max_length(); assert(young_list_max_length >= survivor_list_length, "invariant"); - size_t eden_list_max_length = young_list_max_length - survivor_list_length; + uint eden_list_max_length = young_list_max_length - survivor_list_length; _overall_used = g1->used_unlocked(); - _eden_used = eden_list_length * HeapRegion::GrainBytes; - _survivor_used = survivor_list_length * HeapRegion::GrainBytes; + _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes; + _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes; _young_region_num = young_list_length; _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used); @@ -207,7 +207,7 @@ void G1MonitoringSupport::recalculate_sizes() { committed -= _survivor_committed + _old_committed; // Next, calculate and remove the committed size for the eden. - _eden_committed = eden_list_max_length * HeapRegion::GrainBytes; + _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes; // Somewhat defensive: be robust in case there are inaccuracies in // the calculations _eden_committed = MIN2(_eden_committed, committed); @@ -237,10 +237,10 @@ void G1MonitoringSupport::recalculate_eden_size() { // When a new eden region is allocated, only the eden_used size is // affected (since we have recalculated everything else at the last GC). - size_t young_region_num = g1h()->young_list()->length(); + uint young_region_num = g1h()->young_list()->length(); if (young_region_num > _young_region_num) { - size_t diff = young_region_num - _young_region_num; - _eden_used += diff * HeapRegion::GrainBytes; + uint diff = young_region_num - _young_region_num; + _eden_used += (size_t) diff * HeapRegion::GrainBytes; // Somewhat defensive: cap the eden used size to make sure it // never exceeds the committed size. _eden_used = MIN2(_eden_used, _eden_committed); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp index a428b10378d..61e278a7ff7 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -147,7 +147,7 @@ class G1MonitoringSupport : public CHeapObj { size_t _overall_committed; size_t _overall_used; - size_t _young_region_num; + uint _young_region_num; size_t _young_gen_committed; size_t _eden_committed; size_t _eden_used; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp index 1ac7e9eb43e..44e3ac83346 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -334,7 +334,7 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) { guarantee(GrainWords == 0, "we should only set it once"); GrainWords = GrainBytes >> LogHeapWordSize; - guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity"); + guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); guarantee(CardsPerRegion == 0, "we should only set it once"); CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; @@ -482,10 +482,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { #endif // _MSC_VER -HeapRegion:: -HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray, - MemRegion mr, bool is_zeroed) - : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), +HeapRegion::HeapRegion(uint hrs_index, + G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed) : + G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), _hrs_index(hrs_index), _humongous_type(NotHumongous), _humongous_start_region(NULL), _in_collection_set(false), diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp index b462389a652..8a99d2d3f7b 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -52,12 +52,15 @@ class HeapRegionRemSetIterator; class HeapRegion; class HeapRegionSetBase; -#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" +#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" #define HR_FORMAT_PARAMS(_hr_) \ (_hr_)->hrs_index(), \ (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \ (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end() +// sentinel value for hrs_index +#define G1_NULL_HRS_INDEX ((uint) -1) + // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap // in the concurrent marker used by G1 to filter remembered @@ -235,7 +238,7 @@ class HeapRegion: public G1OffsetTableContigSpace { protected: // The index of this region in the heap region sequence. - size_t _hrs_index; + uint _hrs_index; HumongousType _humongous_type; // For a humongous region, region in which it starts. @@ -342,7 +345,7 @@ class HeapRegion: public G1OffsetTableContigSpace { public: // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. - HeapRegion(size_t hrs_index, + HeapRegion(uint hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr, bool is_zeroed); @@ -389,7 +392,7 @@ class HeapRegion: public G1OffsetTableContigSpace { // If this region is a member of a HeapRegionSeq, the index in that // sequence, otherwise -1. - size_t hrs_index() const { return _hrs_index; } + uint hrs_index() const { return _hrs_index; } // The number of bytes marked live in the region in the last marking phase. size_t marked_bytes() { return _prev_marked_bytes; } diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp index 47c41553838..a23bd79a7b3 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -577,7 +577,7 @@ void OtherRegionsTable::print_from_card_cache() { #endif void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { - size_t cur_hrs_ind = hr()->hrs_index(); + size_t cur_hrs_ind = (size_t) hr()->hrs_index(); #if HRRS_VERBOSE gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", @@ -841,7 +841,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() { #endif // Set the corresponding coarse bit. - size_t max_hrs_index = max->hr()->hrs_index(); + size_t max_hrs_index = (size_t) max->hr()->hrs_index(); if (!_coarse_map.at(max_hrs_index)) { _coarse_map.at_put(max_hrs_index, true); _n_coarse_entries++; @@ -866,17 +866,20 @@ PosParPRT* OtherRegionsTable::delete_region_table() { void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm) { // First eliminated garbage regions from the coarse map. - if (G1RSScrubVerbose) - gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":", - hr()->hrs_index()); + if (G1RSScrubVerbose) { + gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index()); + } assert(_coarse_map.size() == region_bm->size(), "Precondition"); - if (G1RSScrubVerbose) - gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries); + if (G1RSScrubVerbose) { + gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...", + _n_coarse_entries); + } _coarse_map.set_intersection(*region_bm); _n_coarse_entries = _coarse_map.count_one_bits(); - if (G1RSScrubVerbose) - gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries); + if (G1RSScrubVerbose) { + gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries); + } // Now do the fine-grained maps. for (size_t i = 0; i < _max_fine_entries; i++) { @@ -885,23 +888,27 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, while (cur != NULL) { PosParPRT* nxt = cur->next(); // If the entire region is dead, eliminate. - if (G1RSScrubVerbose) - gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":", + if (G1RSScrubVerbose) { + gclog_or_tty->print_cr(" For other region %u:", cur->hr()->hrs_index()); - if (!region_bm->at(cur->hr()->hrs_index())) { + } + if (!region_bm->at((size_t) cur->hr()->hrs_index())) { *prev = nxt; cur->set_next(NULL); _n_fine_entries--; - if (G1RSScrubVerbose) + if (G1RSScrubVerbose) { gclog_or_tty->print_cr(" deleted via region map."); + } PosParPRT::free(cur); } else { // Do fine-grain elimination. - if (G1RSScrubVerbose) + if (G1RSScrubVerbose) { gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); + } cur->scrub(ctbs, card_bm); - if (G1RSScrubVerbose) + if (G1RSScrubVerbose) { gclog_or_tty->print_cr(" after = %4d.", cur->occupied()); + } // Did that empty the table completely? if (cur->occupied() == 0) { *prev = nxt; @@ -1003,7 +1010,7 @@ void OtherRegionsTable::clear() { void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); - size_t hrs_ind = from_hr->hrs_index(); + size_t hrs_ind = (size_t) from_hr->hrs_index(); size_t ind = hrs_ind & _mod_max_fine_entries_mask; if (del_single_region_table(ind, from_hr)) { assert(!_coarse_map.at(hrs_ind), "Inv"); @@ -1011,7 +1018,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { _coarse_map.par_at_put(hrs_ind, 0); } // Check to see if any of the fcc entries come from here. - size_t hr_ind = hr()->hrs_index(); + size_t hr_ind = (size_t) hr()->hrs_index(); for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { int fcc_ent = _from_card_cache[tid][hr_ind]; if (fcc_ent != -1) { @@ -1223,7 +1230,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { _coarse_cur_region_cur_card = 0; HeapWord* r_bot = - _g1h->region_at(_coarse_cur_region_index)->bottom(); + _g1h->region_at((uint) _coarse_cur_region_index)->bottom(); _cur_region_card_offset = _bosa->index_for(r_bot); } else { return false; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp index 453435098bb..504afa2ef04 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @@ -329,13 +329,13 @@ public: // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // (Uses it to initialize from_card_cache). - static void init_heap(size_t max_regions) { - OtherRegionsTable::init_from_card_cache(max_regions); + static void init_heap(uint max_regions) { + OtherRegionsTable::init_from_card_cache((size_t) max_regions); } // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. - static void shrink_heap(size_t new_n_regs) { - OtherRegionsTable::shrink_from_card_cache(new_n_regs); + static void shrink_heap(uint new_n_regs) { + OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs); } #ifndef PRODUCT diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp index fecdca15515..dfac7d47d29 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,16 +31,15 @@ // Private -size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) { - size_t len = length(); +uint HeapRegionSeq::find_contiguous_from(uint from, uint num) { + uint len = length(); assert(num > 1, "use this only for sequences of length 2 or greater"); assert(from <= len, - err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT, - from, len)); + err_msg("from: %u should be valid and <= than %u", from, len)); - size_t curr = from; - size_t first = G1_NULL_HRS_INDEX; - size_t num_so_far = 0; + uint curr = from; + uint first = G1_NULL_HRS_INDEX; + uint num_so_far = 0; while (curr < len && num_so_far < num) { if (at(curr)->is_empty()) { if (first == G1_NULL_HRS_INDEX) { @@ -60,7 +59,7 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) { // we found enough space for the humongous object assert(from <= first && first < len, "post-condition"); assert(first < curr && (curr - first) == num, "post-condition"); - for (size_t i = first; i < first + num; ++i) { + for (uint i = first; i < first + num; ++i) { assert(at(i)->is_empty(), "post-condition"); } return first; @@ -73,10 +72,10 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) { // Public void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, - size_t max_length) { - assert((size_t) bottom % HeapRegion::GrainBytes == 0, + uint max_length) { + assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, "bottom should be heap region aligned"); - assert((size_t) end % HeapRegion::GrainBytes == 0, + assert((uintptr_t) end % HeapRegion::GrainBytes == 0, "end should be heap region aligned"); _length = 0; @@ -88,8 +87,8 @@ void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, _max_length = max_length; _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length); - memset(_regions, 0, max_length * sizeof(HeapRegion*)); - _regions_biased = _regions - ((size_t) bottom >> _region_shift); + memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*)); + _regions_biased = _regions - ((uintx) bottom >> _region_shift); assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)], "bottom should be included in the region with index 0"); @@ -105,7 +104,7 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, assert(_heap_bottom <= next_bottom, "invariant"); while (next_bottom < new_end) { assert(next_bottom < _heap_end, "invariant"); - size_t index = length(); + uint index = length(); assert(index < _max_length, "otherwise we cannot expand further"); if (index == 0) { @@ -139,9 +138,9 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, return MemRegion(old_end, next_bottom); } -size_t HeapRegionSeq::free_suffix() { - size_t res = 0; - size_t index = length(); +uint HeapRegionSeq::free_suffix() { + uint res = 0; + uint index = length(); while (index > 0) { index -= 1; if (!at(index)->is_empty()) { @@ -152,27 +151,24 @@ size_t HeapRegionSeq::free_suffix() { return res; } -size_t HeapRegionSeq::find_contiguous(size_t num) { +uint HeapRegionSeq::find_contiguous(uint num) { assert(num > 1, "use this only for sequences of length 2 or greater"); assert(_next_search_index <= length(), - err_msg("_next_search_indeex: "SIZE_FORMAT" " - "should be valid and <= than "SIZE_FORMAT, + err_msg("_next_search_index: %u should be valid and <= than %u", _next_search_index, length())); - size_t start = _next_search_index; - size_t res = find_contiguous_from(start, num); + uint start = _next_search_index; + uint res = find_contiguous_from(start, num); if (res == G1_NULL_HRS_INDEX && start > 0) { // Try starting from the beginning. If _next_search_index was 0, // no point in doing this again. res = find_contiguous_from(0, num); } if (res != G1_NULL_HRS_INDEX) { - assert(res < length(), - err_msg("res: "SIZE_FORMAT" should be valid", res)); + assert(res < length(), err_msg("res: %u should be valid", res)); _next_search_index = res + num; assert(_next_search_index <= length(), - err_msg("_next_search_indeex: "SIZE_FORMAT" " - "should be valid and <= than "SIZE_FORMAT, + err_msg("_next_search_index: %u should be valid and <= than %u", _next_search_index, length())); } return res; @@ -183,20 +179,20 @@ void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { } void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { - size_t hr_index = 0; + uint hr_index = 0; if (hr != NULL) { - hr_index = (size_t) hr->hrs_index(); + hr_index = hr->hrs_index(); } - size_t len = length(); - for (size_t i = hr_index; i < len; i += 1) { + uint len = length(); + for (uint i = hr_index; i < len; i += 1) { bool res = blk->doHeapRegion(at(i)); if (res) { blk->incomplete(); return; } } - for (size_t i = 0; i < hr_index; i += 1) { + for (uint i = 0; i < hr_index; i += 1) { bool res = blk->doHeapRegion(at(i)); if (res) { blk->incomplete(); @@ -206,7 +202,7 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { } MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, - size_t* num_regions_deleted) { + uint* num_regions_deleted) { // Reset this in case it's currently pointing into the regions that // we just removed. _next_search_index = 0; @@ -218,7 +214,7 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, assert(_allocated_length > 0, "we should have at least one region committed"); // around the loop, i will be the next region to be removed - size_t i = length() - 1; + uint i = length() - 1; assert(i > 0, "we should never remove all regions"); // [last_start, end) is the MemRegion that covers the regions we will remove. HeapWord* end = at(i)->end(); @@ -249,29 +245,24 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, #ifndef PRODUCT void HeapRegionSeq::verify_optional() { guarantee(_length <= _allocated_length, - err_msg("invariant: _length: "SIZE_FORMAT" " - "_allocated_length: "SIZE_FORMAT, + err_msg("invariant: _length: %u _allocated_length: %u", _length, _allocated_length)); guarantee(_allocated_length <= _max_length, - err_msg("invariant: _allocated_length: "SIZE_FORMAT" " - "_max_length: "SIZE_FORMAT, + err_msg("invariant: _allocated_length: %u _max_length: %u", _allocated_length, _max_length)); guarantee(_next_search_index <= _length, - err_msg("invariant: _next_search_index: "SIZE_FORMAT" " - "_length: "SIZE_FORMAT, + err_msg("invariant: _next_search_index: %u _length: %u", _next_search_index, _length)); HeapWord* prev_end = _heap_bottom; - for (size_t i = 0; i < _allocated_length; i += 1) { + for (uint i = 0; i < _allocated_length; i += 1) { HeapRegion* hr = _regions[i]; - guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i)); + guarantee(hr != NULL, err_msg("invariant: i: %u", i)); guarantee(hr->bottom() == prev_end, - err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" " - "prev_end: "PTR_FORMAT, + err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), prev_end)); guarantee(hr->hrs_index() == i, - err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT, - i, hr->hrs_index())); + err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); if (i < _length) { // Asserts will fire if i is >= _length HeapWord* addr = hr->bottom(); @@ -290,8 +281,8 @@ void HeapRegionSeq::verify_optional() { prev_end = hr->end(); } } - for (size_t i = _allocated_length; i < _max_length; i += 1) { - guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i)); + for (uint i = _allocated_length; i < _max_length; i += 1) { + guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i)); } } #endif // PRODUCT diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp index 3df8d738bdc..94f4c0f7699 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,6 @@ class HeapRegion; class HeapRegionClosure; class FreeRegionList; -#define G1_NULL_HRS_INDEX ((size_t) -1) - // This class keeps track of the region metadata (i.e., HeapRegion // instances). They are kept in the _regions array in address // order. A region's index in the array corresponds to its index in @@ -65,7 +63,7 @@ class HeapRegionSeq: public CHeapObj { HeapRegion** _regions_biased; // The number of regions committed in the heap. - size_t _length; + uint _length; // The address of the first reserved word in the heap. HeapWord* _heap_bottom; @@ -74,32 +72,32 @@ class HeapRegionSeq: public CHeapObj { HeapWord* _heap_end; // The log of the region byte size. - size_t _region_shift; + uint _region_shift; // A hint for which index to start searching from for humongous // allocations. - size_t _next_search_index; + uint _next_search_index; // The number of regions for which we have allocated HeapRegions for. - size_t _allocated_length; + uint _allocated_length; // The maximum number of regions in the heap. - size_t _max_length; + uint _max_length; // Find a contiguous set of empty regions of length num, starting // from the given index. - size_t find_contiguous_from(size_t from, size_t num); + uint find_contiguous_from(uint from, uint num); // Map a heap address to a biased region index. Assume that the // address is valid. - inline size_t addr_to_index_biased(HeapWord* addr) const; + inline uintx addr_to_index_biased(HeapWord* addr) const; - void increment_length(size_t* length) { + void increment_length(uint* length) { assert(*length < _max_length, "pre-condition"); *length += 1; } - void decrement_length(size_t* length) { + void decrement_length(uint* length) { assert(*length > 0, "pre-condition"); *length -= 1; } @@ -108,11 +106,11 @@ class HeapRegionSeq: public CHeapObj { // Empty contructor, we'll initialize it with the initialize() method. HeapRegionSeq() { } - void initialize(HeapWord* bottom, HeapWord* end, size_t max_length); + void initialize(HeapWord* bottom, HeapWord* end, uint max_length); // Return the HeapRegion at the given index. Assume that the index // is valid. - inline HeapRegion* at(size_t index) const; + inline HeapRegion* at(uint index) const; // If addr is within the committed space return its corresponding // HeapRegion, otherwise return NULL. @@ -123,10 +121,10 @@ class HeapRegionSeq: public CHeapObj { inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; // Return the number of regions that have been committed in the heap. - size_t length() const { return _length; } + uint length() const { return _length; } // Return the maximum number of regions in the heap. - size_t max_length() const { return _max_length; } + uint max_length() const { return _max_length; } // Expand the sequence to reflect that the heap has grown from // old_end to new_end. Either create new HeapRegions, or re-use @@ -139,12 +137,12 @@ class HeapRegionSeq: public CHeapObj { // Return the number of contiguous regions at the end of the sequence // that are available for allocation. - size_t free_suffix(); + uint free_suffix(); // Find a contiguous set of empty regions of length num and return // the index of the first region or G1_NULL_HRS_INDEX if the // search was unsuccessful. - size_t find_contiguous(size_t num); + uint find_contiguous(uint num); // Apply blk->doHeapRegion() on all committed regions in address order, // terminating the iteration early if doHeapRegion() returns true. @@ -159,7 +157,7 @@ class HeapRegionSeq: public CHeapObj { // sequence. Return a MemRegion that corresponds to the address // range of the uncommitted regions. Assume shrink_bytes is page and // heap region aligned. - MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted); + MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted); // Do some sanity checking. void verify_optional() PRODUCT_RETURN; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp index 3cc5aa8a619..e840287edc7 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,11 +28,11 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" -inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const { +inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const { assert(_heap_bottom <= addr && addr < _heap_end, err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, addr, _heap_bottom, _heap_end)); - size_t index = (size_t) addr >> _region_shift; + uintx index = (uintx) addr >> _region_shift; return index; } @@ -40,7 +40,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { assert(_heap_bottom <= addr && addr < _heap_end, err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, addr, _heap_bottom, _heap_end)); - size_t index_biased = addr_to_index_biased(addr); + uintx index_biased = addr_to_index_biased(addr); HeapRegion* hr = _regions_biased[index_biased]; assert(hr != NULL, "invariant"); return hr; @@ -55,7 +55,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { return NULL; } -inline HeapRegion* HeapRegionSeq::at(size_t index) const { +inline HeapRegion* HeapRegionSeq::at(uint index) const { assert(index < length(), "pre-condition"); HeapRegion* hr = _regions[index]; assert(hr != NULL, "sanity"); diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp index e21cdd74d85..ac5f96b9093 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,28 +25,26 @@ #include "precompiled.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" -size_t HeapRegionSetBase::_unrealistically_long_length = 0; +uint HeapRegionSetBase::_unrealistically_long_length = 0; HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone; //////////////////// HeapRegionSetBase //////////////////// -void HeapRegionSetBase::set_unrealistically_long_length(size_t len) { +void HeapRegionSetBase::set_unrealistically_long_length(uint len) { guarantee(_unrealistically_long_length == 0, "should only be set once"); _unrealistically_long_length = len; } -size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) { +uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) { assert(hr->startsHumongous(), "pre-condition"); assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant"); - size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes; + uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes); assert(region_num > 0, "sanity"); return region_num; } void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) { - msg->append("[%s] %s " - "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" " - "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT, + msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT, name(), message, length(), region_num(), total_capacity_bytes(), total_used_bytes()); fill_in_ext_msg_extra(msg); @@ -170,13 +168,11 @@ void HeapRegionSetBase::verify_end() { hrs_ext_msg(this, "verification should be in progress")); guarantee(length() == _calc_length, - hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == " - "calc length: "SIZE_FORMAT, + hrs_err_msg("[%s] length: %u should be == calc length: %u", name(), length(), _calc_length)); guarantee(region_num() == _calc_region_num, - hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == " - "calc region num: "SIZE_FORMAT, + hrs_err_msg("[%s] region num: %u should be == calc region num: %u", name(), region_num(), _calc_region_num)); guarantee(total_capacity_bytes() == _calc_total_capacity_bytes, @@ -211,8 +207,8 @@ void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) { out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous())); out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty())); out->print_cr(" Attributes"); - out->print_cr(" length : "SIZE_FORMAT_W(14), length()); - out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num()); + out->print_cr(" length : %14u", length()); + out->print_cr(" region num : %14u", region_num()); out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes", total_capacity_bytes()); out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes", @@ -243,14 +239,12 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { if (proxy_set->is_empty()) return; assert(proxy_set->length() <= _length, - hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" " - "should be <= length: "SIZE_FORMAT, + hrs_err_msg("[%s] proxy set length: %u should be <= length: %u", name(), proxy_set->length(), _length)); _length -= proxy_set->length(); assert(proxy_set->region_num() <= _region_num, - hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " - "should be <= region num: "SIZE_FORMAT, + hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u", name(), proxy_set->region_num(), _region_num)); _region_num -= proxy_set->region_num(); @@ -369,17 +363,17 @@ void HeapRegionLinkedList::remove_all() { verify_optional(); } -void HeapRegionLinkedList::remove_all_pending(size_t target_count) { +void HeapRegionLinkedList::remove_all_pending(uint target_count) { hrs_assert_mt_safety_ok(this); assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); verify_optional(); - DEBUG_ONLY(size_t old_length = length();) + DEBUG_ONLY(uint old_length = length();) HeapRegion* curr = _head; HeapRegion* prev = NULL; - size_t count = 0; + uint count = 0; while (curr != NULL) { hrs_assert_region_ok(this, curr, this); HeapRegion* next = curr->next(); @@ -387,7 +381,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) { if (curr->pending_removal()) { assert(count < target_count, hrs_err_msg("[%s] should not come across more regions " - "pending for removal than target_count: "SIZE_FORMAT, + "pending for removal than target_count: %u", name(), target_count)); if (prev == NULL) { @@ -422,12 +416,11 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) { } assert(count == target_count, - hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == " - "target_count: "SIZE_FORMAT, name(), count, target_count)); + hrs_err_msg("[%s] count: %u should be == target_count: %u", + name(), count, target_count)); assert(length() + target_count == old_length, hrs_err_msg("[%s] new length should be consistent " - "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" " - "target_count: "SIZE_FORMAT, + "new length: %u old length: %u target_count: %u", name(), length(), old_length, target_count)); verify_optional(); @@ -444,16 +437,16 @@ void HeapRegionLinkedList::verify() { HeapRegion* curr = _head; HeapRegion* prev1 = NULL; HeapRegion* prev0 = NULL; - size_t count = 0; + uint count = 0; while (curr != NULL) { verify_next_region(curr); count += 1; guarantee(count < _unrealistically_long_length, - hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" " + hrs_err_msg("[%s] the calculated length: %u " "seems very long, is there maybe a cycle? " "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " - "prev1: "PTR_FORMAT" length: "SIZE_FORMAT, + "prev1: "PTR_FORMAT" length: %u", name(), count, curr, prev0, prev1, length())); prev1 = prev0; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp index 8231c772d3b..1f0ffe1670c 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp @@ -62,20 +62,20 @@ class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { friend class VMStructs; protected: - static size_t calculate_region_num(HeapRegion* hr); + static uint calculate_region_num(HeapRegion* hr); - static size_t _unrealistically_long_length; + static uint _unrealistically_long_length; // The number of regions added to the set. If the set contains // only humongous regions, this reflects only 'starts humongous' // regions and does not include 'continues humongous' ones. - size_t _length; + uint _length; // The total number of regions represented by the set. If the set // does not contain humongous regions, this should be the same as // _length. If the set contains only humongous regions, this will // include the 'continues humongous' regions. - size_t _region_num; + uint _region_num; // We don't keep track of the total capacity explicitly, we instead // recalculate it based on _region_num and the heap region size. @@ -86,8 +86,8 @@ protected: const char* _name; bool _verify_in_progress; - size_t _calc_length; - size_t _calc_region_num; + uint _calc_length; + uint _calc_region_num; size_t _calc_total_capacity_bytes; size_t _calc_total_used_bytes; @@ -153,18 +153,18 @@ protected: HeapRegionSetBase(const char* name); public: - static void set_unrealistically_long_length(size_t len); + static void set_unrealistically_long_length(uint len); const char* name() { return _name; } - size_t length() { return _length; } + uint length() { return _length; } bool is_empty() { return _length == 0; } - size_t region_num() { return _region_num; } + uint region_num() { return _region_num; } size_t total_capacity_bytes() { - return region_num() << HeapRegion::LogOfHRGrainBytes; + return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes; } size_t total_used_bytes() { return _total_used_bytes; } @@ -341,7 +341,7 @@ public: // of regions that are pending for removal in the list, and // target_count should be > 1 (currently, we never need to remove a // single region using this). - void remove_all_pending(size_t target_count); + void remove_all_pending(uint target_count); virtual void verify(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp index 9cb40b52754..8705f40cf95 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,15 +54,15 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) { assert(_length > 0, hrs_ext_msg(this, "pre-condition")); _length -= 1; - size_t region_num_diff; + uint region_num_diff; if (!hr->isHumongous()) { region_num_diff = 1; } else { region_num_diff = calculate_region_num(hr); } assert(region_num_diff <= _region_num, - hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" " - "should be <= region num: "SIZE_FORMAT, + hrs_err_msg("[%s] region's region num: %u " + "should be <= region num: %u", name(), region_num_diff, _region_num)); _region_num -= region_num_diff; diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp index 5cdd101404b..64b1be2460f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -481,8 +481,7 @@ size_t SparsePRT::mem_size() const { bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { #if SPARSE_PRT_VERBOSE - gclog_or_tty->print_cr(" Adding card %d from region %d to region " - SIZE_FORMAT" sparse.", + gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.", card_index, region_id, _hr->hrs_index()); #endif if (_next->occupied_entries() * 2 > _next->capacity()) { @@ -534,7 +533,7 @@ void SparsePRT::expand() { _next = new RSHashTable(last->capacity() * 2); #if SPARSE_PRT_VERBOSE - gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.", + gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.", _hr->hrs_index(), _next->capacity()); #endif for (size_t i = 0; i < last->capacity(); i++) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp index a646b48b0c5..5507dee5f80 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp @@ -34,7 +34,7 @@ static_field(HeapRegion, GrainBytes, size_t) \ \ nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \ - nonstatic_field(HeapRegionSeq, _length, size_t) \ + nonstatic_field(HeapRegionSeq, _length, uint) \ \ nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \ @@ -50,8 +50,8 @@ nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \ nonstatic_field(G1MonitoringSupport, _old_used, size_t) \ \ - nonstatic_field(HeapRegionSetBase, _length, size_t) \ - nonstatic_field(HeapRegionSetBase, _region_num, size_t) \ + nonstatic_field(HeapRegionSetBase, _length, uint) \ + nonstatic_field(HeapRegionSetBase, _region_num, uint) \ nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \ From 0afaaf8d367118039c73cf26cde591b882be7c17 Mon Sep 17 00:00:00 2001 From: Antonios Printezis Date: Wed, 18 Apr 2012 13:39:55 -0400 Subject: [PATCH 12/15] 7145441: G1: collection set chooser-related cleanup Cleanup of the CSet chooser class: standardize on uints for region num and indexes (instead of int, jint, etc.), make the method / field naming style more consistent, remove a lot of dead code. Reviewed-by: johnc, brutisso --- .../g1/collectionSetChooser.cpp | 255 ++++-------------- .../g1/collectionSetChooser.hpp | 152 ++++------- .../gc_implementation/g1/concurrentMark.cpp | 105 +------- .../gc_implementation/g1/g1CollectedHeap.cpp | 2 - .../g1/g1CollectorPolicy.cpp | 146 +++------- .../g1/g1CollectorPolicy.hpp | 42 --- .../vm/gc_implementation/g1/g1_globals.hpp | 3 - .../vm/gc_implementation/g1/heapRegion.cpp | 4 +- .../vm/gc_implementation/g1/heapRegion.hpp | 16 +- 9 files changed, 147 insertions(+), 578 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp index 29a03275c61..663011a77b1 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp @@ -29,102 +29,6 @@ #include "gc_implementation/g1/g1ErgoVerbose.hpp" #include "memory/space.inline.hpp" -CSetChooserCache::CSetChooserCache() { - for (int i = 0; i < CacheLength; ++i) - _cache[i] = NULL; - clear(); -} - -void CSetChooserCache::clear() { - _occupancy = 0; - _first = 0; - for (int i = 0; i < CacheLength; ++i) { - HeapRegion *hr = _cache[i]; - if (hr != NULL) - hr->set_sort_index(-1); - _cache[i] = NULL; - } -} - -#ifndef PRODUCT -bool CSetChooserCache::verify() { - guarantee(false, "CSetChooserCache::verify(): don't call this any more"); - - int index = _first; - HeapRegion *prev = NULL; - for (int i = 0; i < _occupancy; ++i) { - guarantee(_cache[index] != NULL, "cache entry should not be empty"); - HeapRegion *hr = _cache[index]; - guarantee(!hr->is_young(), "should not be young!"); - if (prev != NULL) { - guarantee(prev->gc_efficiency() >= hr->gc_efficiency(), - "cache should be correctly ordered"); - } - guarantee(hr->sort_index() == get_sort_index(index), - "sort index should be correct"); - index = trim_index(index + 1); - prev = hr; - } - - for (int i = 0; i < (CacheLength - _occupancy); ++i) { - guarantee(_cache[index] == NULL, "cache entry should be empty"); - index = trim_index(index + 1); - } - - guarantee(index == _first, "we should have reached where we started from"); - return true; -} -#endif // PRODUCT - -void CSetChooserCache::insert(HeapRegion *hr) { - guarantee(false, "CSetChooserCache::insert(): don't call this any more"); - - assert(!is_full(), "cache should not be empty"); - hr->calc_gc_efficiency(); - - int empty_index; - if (_occupancy == 0) { - empty_index = _first; - } else { - empty_index = trim_index(_first + _occupancy); - assert(_cache[empty_index] == NULL, "last slot should be empty"); - int last_index = trim_index(empty_index - 1); - HeapRegion *last = _cache[last_index]; - assert(last != NULL,"as the cache is not empty, last should not be empty"); - while (empty_index != _first && - last->gc_efficiency() < hr->gc_efficiency()) { - _cache[empty_index] = last; - last->set_sort_index(get_sort_index(empty_index)); - empty_index = last_index; - last_index = trim_index(last_index - 1); - last = _cache[last_index]; - } - } - _cache[empty_index] = hr; - hr->set_sort_index(get_sort_index(empty_index)); - - ++_occupancy; - assert(verify(), "cache should be consistent"); -} - -HeapRegion *CSetChooserCache::remove_first() { - guarantee(false, "CSetChooserCache::remove_first(): " - "don't call this any more"); - - if (_occupancy > 0) { - assert(_cache[_first] != NULL, "cache should have at least one region"); - HeapRegion *ret = _cache[_first]; - _cache[_first] = NULL; - ret->set_sort_index(-1); - --_occupancy; - _first = trim_index(_first + 1); - assert(verify(), "cache should be consistent"); - return ret; - } else { - return NULL; - } -} - // Even though we don't use the GC efficiency in our heuristics as // much as we used to, we still order according to GC efficiency. This // will cause regions with a lot of live objects and large RSets to @@ -134,7 +38,7 @@ HeapRegion *CSetChooserCache::remove_first() { // the ones we'll skip are ones with both large RSets and a lot of // live objects, not the ones with just a lot of live objects if we // ordered according to the amount of reclaimable bytes per region. -static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { +static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { if (hr1 == NULL) { if (hr2 == NULL) { return 0; @@ -156,8 +60,8 @@ static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { } } -static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { - return orderRegions(*hr1p, *hr2p); +static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) { + return order_regions(*hr1p, *hr2p); } CollectionSetChooser::CollectionSetChooser() : @@ -175,105 +79,74 @@ CollectionSetChooser::CollectionSetChooser() : // // Note: containing object is allocated on C heap since it is CHeapObj. // - _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, + _regions((ResourceObj::set_allocation_type((address) &_regions, ResourceObj::C_HEAP), 100), true /* C_Heap */), - _curr_index(0), _length(0), - _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0), - _first_par_unreserved_idx(0) { - _regionLiveThresholdBytes = + _curr_index(0), _length(0), _first_par_unreserved_idx(0), + _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) { + _region_live_threshold_bytes = HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100; } #ifndef PRODUCT -bool CollectionSetChooser::verify() { - guarantee(_length >= 0, err_msg("_length: %d", _length)); - guarantee(0 <= _curr_index && _curr_index <= _length, - err_msg("_curr_index: %d _length: %d", _curr_index, _length)); - int index = 0; +void CollectionSetChooser::verify() { + guarantee(_length <= regions_length(), + err_msg("_length: %u regions length: %u", _length, regions_length())); + guarantee(_curr_index <= _length, + err_msg("_curr_index: %u _length: %u", _curr_index, _length)); + uint index = 0; size_t sum_of_reclaimable_bytes = 0; while (index < _curr_index) { - guarantee(_markedRegions.at(index) == NULL, + guarantee(regions_at(index) == NULL, "all entries before _curr_index should be NULL"); index += 1; } HeapRegion *prev = NULL; while (index < _length) { - HeapRegion *curr = _markedRegions.at(index++); - guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); - int si = curr->sort_index(); + HeapRegion *curr = regions_at(index++); + guarantee(curr != NULL, "Regions in _regions array cannot be NULL"); guarantee(!curr->is_young(), "should not be young!"); guarantee(!curr->isHumongous(), "should not be humongous!"); - guarantee(si > -1 && si == (index-1), "sort index invariant"); if (prev != NULL) { - guarantee(orderRegions(prev, curr) != 1, + guarantee(order_regions(prev, curr) != 1, err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", prev->gc_efficiency(), curr->gc_efficiency())); } sum_of_reclaimable_bytes += curr->reclaimable_bytes(); prev = curr; } - guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, + guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes, err_msg("reclaimable bytes inconsistent, " "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, - _remainingReclaimableBytes, sum_of_reclaimable_bytes)); - return true; + _remaining_reclaimable_bytes, sum_of_reclaimable_bytes)); } -#endif +#endif // !PRODUCT -void CollectionSetChooser::fillCache() { - guarantee(false, "fillCache: don't call this any more"); - - while (!_cache.is_full() && (_curr_index < _length)) { - HeapRegion* hr = _markedRegions.at(_curr_index); - assert(hr != NULL, - err_msg("Unexpected NULL hr in _markedRegions at index %d", - _curr_index)); - _curr_index += 1; - assert(!hr->is_young(), "should not be young!"); - assert(hr->sort_index() == _curr_index-1, "sort_index invariant"); - _markedRegions.at_put(hr->sort_index(), NULL); - _cache.insert(hr); - assert(!_cache.is_empty(), "cache should not be empty"); - } - assert(verify(), "cache should be consistent"); -} - -void CollectionSetChooser::sortMarkedHeapRegions() { +void CollectionSetChooser::sort_regions() { // First trim any unused portion of the top in the parallel case. if (_first_par_unreserved_idx > 0) { - if (G1PrintParCleanupStats) { - gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n", - _markedRegions.length(), _first_par_unreserved_idx); - } - assert(_first_par_unreserved_idx <= _markedRegions.length(), + assert(_first_par_unreserved_idx <= regions_length(), "Or we didn't reserved enough length"); - _markedRegions.trunc_to(_first_par_unreserved_idx); + regions_trunc_to(_first_par_unreserved_idx); } - _markedRegions.sort(orderRegions); - assert(_length <= _markedRegions.length(), "Requirement"); - assert(_length == 0 || _markedRegions.at(_length - 1) != NULL, - "Testing _length"); - assert(_length == _markedRegions.length() || - _markedRegions.at(_length) == NULL, "Testing _length"); - if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr(" Sorted %d marked regions.", _length); - } - for (int i = 0; i < _length; i++) { - assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); - _markedRegions.at(i)->set_sort_index(i); + _regions.sort(order_regions); + assert(_length <= regions_length(), "Requirement"); +#ifdef ASSERT + for (uint i = 0; i < _length; i++) { + assert(regions_at(i) != NULL, "Should be true by sorting!"); } +#endif // ASSERT if (G1PrintRegionLivenessInfo) { G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); - for (int i = 0; i < _length; ++i) { - HeapRegion* r = _markedRegions.at(i); + for (uint i = 0; i < _length; ++i) { + HeapRegion* r = regions_at(i); cl.doHeapRegion(r); } } - assert(verify(), "CSet chooser verification"); + verify(); } -uint CollectionSetChooser::calcMinOldCSetLength() { +uint CollectionSetChooser::calc_min_old_cset_length() { // The min old CSet region bound is based on the maximum desired // number of mixed GCs after a cycle. I.e., even if some old regions // look expensive, we should add them to the CSet anyway to make @@ -294,7 +167,7 @@ uint CollectionSetChooser::calcMinOldCSetLength() { return (uint) result; } -uint CollectionSetChooser::calcMaxOldCSetLength() { +uint CollectionSetChooser::calc_max_old_cset_length() { // The max old CSet region bound is based on the threshold expressed // as a percentage of the heap size. I.e., it should bound the // number of old regions added to the CSet irrespective of how many @@ -311,18 +184,18 @@ uint CollectionSetChooser::calcMaxOldCSetLength() { return (uint) result; } -void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { +void CollectionSetChooser::add_region(HeapRegion* hr) { assert(!hr->isHumongous(), "Humongous regions shouldn't be added to the collection set"); assert(!hr->is_young(), "should not be young!"); - _markedRegions.append(hr); + _regions.append(hr); _length++; - _remainingReclaimableBytes += hr->reclaimable_bytes(); + _remaining_reclaimable_bytes += hr->reclaimable_bytes(); hr->calc_gc_efficiency(); } -void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions, - uint chunkSize) { +void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions, + uint chunk_size) { _first_par_unreserved_idx = 0; uint n_threads = (uint) ParallelGCThreads; if (UseDynamicNumberOfGCThreads) { @@ -335,56 +208,46 @@ void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions, n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 1U); } - uint max_waste = n_threads * chunkSize; - // it should be aligned with respect to chunkSize - uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize; - assert(aligned_n_regions % chunkSize == 0, "should be aligned"); - _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL); + uint max_waste = n_threads * chunk_size; + // it should be aligned with respect to chunk_size + uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size; + assert(aligned_n_regions % chunk_size == 0, "should be aligned"); + regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL); } -jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { - // Don't do this assert because this can be called at a point - // where the loop up stream will not execute again but might - // try to claim more chunks (loop test has not been done yet). - // assert(_markedRegions.length() > _first_par_unreserved_idx, - // "Striding beyond the marked regions"); - jint res = Atomic::add(n_regions, &_first_par_unreserved_idx); - assert(_markedRegions.length() > res + n_regions - 1, +uint CollectionSetChooser::claim_array_chunk(uint chunk_size) { + uint res = (uint) Atomic::add((jint) chunk_size, + (volatile jint*) &_first_par_unreserved_idx); + assert(regions_length() > res + chunk_size - 1, "Should already have been expanded"); - return res - n_regions; + return res - chunk_size; } -void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { - assert(_markedRegions.at(index) == NULL, "precondition"); +void CollectionSetChooser::set_region(uint index, HeapRegion* hr) { + assert(regions_at(index) == NULL, "precondition"); assert(!hr->is_young(), "should not be young!"); - _markedRegions.at_put(index, hr); + regions_at_put(index, hr); hr->calc_gc_efficiency(); } -void CollectionSetChooser::updateTotals(jint region_num, - size_t reclaimable_bytes) { +void CollectionSetChooser::update_totals(uint region_num, + size_t reclaimable_bytes) { // Only take the lock if we actually need to update the totals. if (region_num > 0) { assert(reclaimable_bytes > 0, "invariant"); // We could have just used atomics instead of taking the // lock. However, we currently don't have an atomic add for size_t. MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); - _length += (int) region_num; - _remainingReclaimableBytes += reclaimable_bytes; + _length += region_num; + _remaining_reclaimable_bytes += reclaimable_bytes; } else { assert(reclaimable_bytes == 0, "invariant"); } } -void CollectionSetChooser::clearMarkedHeapRegions() { - for (int i = 0; i < _markedRegions.length(); i++) { - HeapRegion* r = _markedRegions.at(i); - if (r != NULL) { - r->set_sort_index(-1); - } - } - _markedRegions.clear(); +void CollectionSetChooser::clear() { + _regions.clear(); _curr_index = 0; _length = 0; - _remainingReclaimableBytes = 0; + _remaining_reclaimable_bytes = 0; }; diff --git a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp index caf18206066..e52476586c5 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp @@ -28,77 +28,42 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "utilities/growableArray.hpp" -class CSetChooserCache VALUE_OBJ_CLASS_SPEC { -private: - enum { - CacheLength = 16 - } PrivateConstants; - - HeapRegion* _cache[CacheLength]; - int _occupancy; // number of regions in cache - int _first; // (index of) "first" region in the cache - - // adding CacheLength to deal with negative values - inline int trim_index(int index) { - return (index + CacheLength) % CacheLength; - } - - inline int get_sort_index(int index) { - return -index-2; - } - inline int get_index(int sort_index) { - return -sort_index-2; - } - -public: - CSetChooserCache(void); - - inline int occupancy(void) { return _occupancy; } - inline bool is_full() { return _occupancy == CacheLength; } - inline bool is_empty() { return _occupancy == 0; } - - void clear(void); - void insert(HeapRegion *hr); - HeapRegion *remove_first(void); - inline HeapRegion *get_first(void) { - return _cache[_first]; - } - -#ifndef PRODUCT - bool verify (void); - bool region_in_cache(HeapRegion *hr) { - int sort_index = hr->sort_index(); - if (sort_index < -1) { - int index = get_index(sort_index); - guarantee(index < CacheLength, "should be within bounds"); - return _cache[index] == hr; - } else - return 0; - } -#endif // PRODUCT -}; - class CollectionSetChooser: public CHeapObj { - GrowableArray _markedRegions; + GrowableArray _regions; + + // Unfortunately, GrowableArray uses ints for length and indexes. To + // avoid excessive casting in the rest of the class the following + // wrapper methods are provided that use uints. + + uint regions_length() { return (uint) _regions.length(); } + HeapRegion* regions_at(uint i) { return _regions.at((int) i); } + void regions_at_put(uint i, HeapRegion* hr) { + _regions.at_put((int) i, hr); + } + void regions_at_put_grow(uint i, HeapRegion* hr) { + _regions.at_put_grow((int) i, hr); + } + void regions_trunc_to(uint i) { _regions.trunc_to((uint) i); } // The index of the next candidate old region to be considered for // addition to the CSet. - int _curr_index; + uint _curr_index; // The number of candidate old regions added to the CSet chooser. - int _length; + uint _length; - CSetChooserCache _cache; - jint _first_par_unreserved_idx; + // Keeps track of the start of the next array chunk to be claimed by + // parallel GC workers. + uint _first_par_unreserved_idx; // If a region has more live bytes than this threshold, it will not // be added to the CSet chooser and will not be a candidate for // collection. - size_t _regionLiveThresholdBytes; + size_t _region_live_threshold_bytes; // The sum of reclaimable bytes over all the regions in the CSet chooser. - size_t _remainingReclaimableBytes; + size_t _remaining_reclaimable_bytes; public: @@ -107,9 +72,9 @@ public: HeapRegion* peek() { HeapRegion* res = NULL; if (_curr_index < _length) { - res = _markedRegions.at(_curr_index); + res = regions_at(_curr_index); assert(res != NULL, - err_msg("Unexpected NULL hr in _markedRegions at index %d", + err_msg("Unexpected NULL hr in _regions at index %u", _curr_index)); } return res; @@ -121,90 +86,71 @@ public: void remove_and_move_to_next(HeapRegion* hr) { assert(hr != NULL, "pre-condition"); assert(_curr_index < _length, "pre-condition"); - assert(_markedRegions.at(_curr_index) == hr, "pre-condition"); - hr->set_sort_index(-1); - _markedRegions.at_put(_curr_index, NULL); - assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes, + assert(regions_at(_curr_index) == hr, "pre-condition"); + regions_at_put(_curr_index, NULL); + assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes, err_msg("remaining reclaimable bytes inconsistent " "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT, - hr->reclaimable_bytes(), _remainingReclaimableBytes)); - _remainingReclaimableBytes -= hr->reclaimable_bytes(); + hr->reclaimable_bytes(), _remaining_reclaimable_bytes)); + _remaining_reclaimable_bytes -= hr->reclaimable_bytes(); _curr_index += 1; } CollectionSetChooser(); - void sortMarkedHeapRegions(); - void fillCache(); + void sort_regions(); // Determine whether to add the given region to the CSet chooser or // not. Currently, we skip humongous regions (we never add them to // the CSet, we only reclaim them during cleanup) and regions whose // live bytes are over the threshold. - bool shouldAdd(HeapRegion* hr) { + bool should_add(HeapRegion* hr) { assert(hr->is_marked(), "pre-condition"); assert(!hr->is_young(), "should never consider young regions"); return !hr->isHumongous() && - hr->live_bytes() < _regionLiveThresholdBytes; + hr->live_bytes() < _region_live_threshold_bytes; } // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. - uint calcMinOldCSetLength(); + uint calc_min_old_cset_length(); // Calculate the maximum number of old regions we'll add to the CSet // during a mixed GC. - uint calcMaxOldCSetLength(); + uint calc_max_old_cset_length(); // Serial version. - void addMarkedHeapRegion(HeapRegion *hr); + void add_region(HeapRegion *hr); - // Must be called before calls to getParMarkedHeapRegionChunk. - // "n_regions" is the number of regions, "chunkSize" the chunk size. - void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize); - // Returns the first index in a contiguous chunk of "n_regions" indexes + // Must be called before calls to claim_array_chunk(). + // n_regions is the number of regions, chunk_size the chunk size. + void prepare_for_par_region_addition(uint n_regions, uint chunk_size); + // Returns the first index in a contiguous chunk of chunk_size indexes // that the calling thread has reserved. These must be set by the - // calling thread using "setMarkedHeapRegion" (to NULL if necessary). - jint getParMarkedHeapRegionChunk(jint n_regions); + // calling thread using set_region() (to NULL if necessary). + uint claim_array_chunk(uint chunk_size); // Set the marked array entry at index to hr. Careful to claim the index // first if in parallel. - void setMarkedHeapRegion(jint index, HeapRegion* hr); + void set_region(uint index, HeapRegion* hr); // Atomically increment the number of added regions by region_num // and the amount of reclaimable bytes by reclaimable_bytes. - void updateTotals(jint region_num, size_t reclaimable_bytes); + void update_totals(uint region_num, size_t reclaimable_bytes); - void clearMarkedHeapRegions(); + void clear(); // Return the number of candidate regions that remain to be collected. - uint remainingRegions() { return (uint) (_length - _curr_index); } + uint remaining_regions() { return _length - _curr_index; } // Determine whether the CSet chooser has more candidate regions or not. - bool isEmpty() { return remainingRegions() == 0; } + bool is_empty() { return remaining_regions() == 0; } // Return the reclaimable bytes that remain to be collected on // all the candidate regions in the CSet chooser. - size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; } + size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; } - // Returns true if the used portion of "_markedRegions" is properly + // Returns true if the used portion of "_regions" is properly // sorted, otherwise asserts false. -#ifndef PRODUCT - bool verify(void); - bool regionProperlyOrdered(HeapRegion* r) { - int si = r->sort_index(); - if (si > -1) { - guarantee(_curr_index <= si && si < _length, - err_msg("curr: %d sort index: %d: length: %d", - _curr_index, si, _length)); - guarantee(_markedRegions.at(si) == r, - err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT, - si, _markedRegions.at(si), r)); - } else { - guarantee(si == -1, err_msg("sort index: %d", si)); - } - return true; - } -#endif - + void verify() PRODUCT_RETURN; }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 15eb1b6ea56..ba29b3120db 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -1192,11 +1192,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure { BitMap* _region_bm; BitMap* _card_bm; - // Debugging - size_t _tot_words_done; - size_t _tot_live; - size_t _tot_used; - size_t _region_marked_bytes; intptr_t _bottom_card_num; @@ -1215,9 +1210,7 @@ public: CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm, BitMap* region_bm, BitMap* card_bm) : _bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm), - _region_marked_bytes(0), _tot_words_done(0), - _tot_live(0), _tot_used(0), - _bottom_card_num(cm->heap_bottom_card_num()) { } + _region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { } // It takes a region that's not empty (i.e., it has at least one // live object in it and sets its corresponding bit on the region @@ -1262,9 +1255,6 @@ public: "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT, start, nextTop, hr->end())); - // Record the number of word's we'll examine. - size_t words_done = (nextTop - start); - // Find the first marked object at or after "start". start = _bm->getNextMarkedWordAddress(start, nextTop); @@ -1343,19 +1333,10 @@ public: // it can be queried by a calling verificiation routine _region_marked_bytes = marked_bytes; - _tot_live += hr->next_live_bytes(); - _tot_used += hr->used(); - _tot_words_done = words_done; - return false; } size_t region_marked_bytes() const { return _region_marked_bytes; } - - // Debugging - size_t tot_words_done() const { return _tot_words_done; } - size_t tot_live() const { return _tot_live; } - size_t tot_used() const { return _tot_used; } }; // Heap region closure used for verifying the counting data @@ -1574,10 +1555,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure { BitMap* _region_bm; BitMap* _card_bm; - size_t _total_live_bytes; - size_t _total_used_bytes; - size_t _total_words_done; - void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) { assert(start_idx <= last_idx, "sanity"); @@ -1621,8 +1598,7 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure { FinalCountDataUpdateClosure(ConcurrentMark* cm, BitMap* region_bm, BitMap* card_bm) : - _cm(cm), _region_bm(region_bm), _card_bm(card_bm), - _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { } + _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { } bool doHeapRegion(HeapRegion* hr) { @@ -1644,8 +1620,6 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure { assert(hr->bottom() <= start && start <= hr->end() && hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); - size_t words_done = ntams - hr->bottom(); - if (start < ntams) { // Region was changed between remark and cleanup pauses // We need to add (ntams - start) to the marked bytes @@ -1676,16 +1650,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure { set_bit_for_region(hr); } - _total_words_done += words_done; - _total_used_bytes += hr->used(); - _total_live_bytes += hr->next_marked_bytes(); - return false; } - - size_t total_words_done() const { return _total_words_done; } - size_t total_live_bytes() const { return _total_live_bytes; } - size_t total_used_bytes() const { return _total_used_bytes; } }; class G1ParFinalCountTask: public AbstractGangTask { @@ -1697,9 +1663,6 @@ protected: uint _n_workers; - size_t *_live_bytes; - size_t *_used_bytes; - public: G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) : AbstractGangTask("G1 final counting"), @@ -1707,8 +1670,7 @@ public: _actual_region_bm(region_bm), _actual_card_bm(card_bm), _n_workers(0) { // Use the value already set as the number of active threads - // in the call to run_task(). Needed for the allocation of - // _live_bytes and _used_bytes. + // in the call to run_task(). if (G1CollectedHeap::use_parallel_gc_threads()) { assert( _g1h->workers()->active_workers() > 0, "Should have been previously set"); @@ -1716,14 +1678,6 @@ public: } else { _n_workers = 1; } - - _live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); - _used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); - } - - ~G1ParFinalCountTask() { - FREE_C_HEAP_ARRAY(size_t, _live_bytes); - FREE_C_HEAP_ARRAY(size_t, _used_bytes); } void work(uint worker_id) { @@ -1741,23 +1695,6 @@ public: } else { _g1h->heap_region_iterate(&final_update_cl); } - - _live_bytes[worker_id] = final_update_cl.total_live_bytes(); - _used_bytes[worker_id] = final_update_cl.total_used_bytes(); - } - - size_t live_bytes() { - size_t live_bytes = 0; - for (uint i = 0; i < _n_workers; ++i) - live_bytes += _live_bytes[i]; - return live_bytes; - } - - size_t used_bytes() { - size_t used_bytes = 0; - for (uint i = 0; i < _n_workers; ++i) - used_bytes += _used_bytes[i]; - return used_bytes; } }; @@ -1892,15 +1829,6 @@ public: HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); } - double end = os::elapsedTime(); - if (G1PrintParCleanupStats) { - gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " - "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n", - worker_id, start, end, (end-start)*1000.0, - g1_note_end.regions_claimed(), - g1_note_end.claimed_region_time_sec()*1000.0, - g1_note_end.max_region_time_sec()*1000.0); - } } size_t max_live_bytes() { return _max_live_bytes; } size_t freed_bytes() { return _freed_bytes; } @@ -2011,29 +1939,11 @@ void ConcurrentMark::cleanup() { guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); } - size_t known_garbage_bytes = - g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); - g1p->set_known_garbage_bytes(known_garbage_bytes); - size_t start_used_bytes = g1h->used(); g1h->set_marking_complete(); - ergo_verbose4(ErgoConcCycles, - "finish cleanup", - ergo_format_byte("occupancy") - ergo_format_byte("capacity") - ergo_format_byte_perc("known garbage"), - start_used_bytes, g1h->capacity(), - known_garbage_bytes, - ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0); - double count_end = os::elapsedTime(); double this_final_counting_time = (count_end - start); - if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr("Cleanup:"); - gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", - this_final_counting_time*1000.0); - } _total_counting_time += this_final_counting_time; if (G1PrintRegionLivenessInfo) { @@ -2047,7 +1957,6 @@ void ConcurrentMark::cleanup() { g1h->reset_gc_time_stamp(); // Note end of marking in all heap regions. - double note_end_start = os::elapsedTime(); G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); if (G1CollectedHeap::use_parallel_gc_threads()) { g1h->set_par_threads((int)n_workers); @@ -2066,11 +1975,6 @@ void ConcurrentMark::cleanup() { // regions that there will be more free regions coming soon. g1h->set_free_regions_coming(); } - double note_end_end = os::elapsedTime(); - if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", - (note_end_end - note_end_start)*1000.0); - } // call below, since it affects the metric by which we sort the heap // regions. @@ -2109,9 +2013,6 @@ void ConcurrentMark::cleanup() { g1h->capacity()); } - size_t cleaned_up_bytes = start_used_bytes - g1h->used(); - g1p->decrease_known_garbage_bytes(cleaned_up_bytes); - // Clean up will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 924296718e8..2711a52d01b 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4064,7 +4064,6 @@ void G1CollectedHeap::finalize_for_evac_failure() { void G1CollectedHeap::remove_self_forwarding_pointers() { assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); - assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); G1ParRemoveSelfForwardPtrsTask rsfp_task(this); @@ -4082,7 +4081,6 @@ void G1CollectedHeap::remove_self_forwarding_pointers() { reset_cset_heap_region_claim_values(); assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); - assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); // Now restore saved marks, if any. if (_objs_with_preserved_marks != NULL) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 44409fee471..d656f260ba1 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -192,11 +192,6 @@ G1CollectorPolicy::G1CollectorPolicy() : _in_marking_window(false), _in_marking_window_im(false), - _known_garbage_ratio(0.0), - _known_garbage_bytes(0), - - _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), - _recent_prev_end_times_for_all_gcs_sec( new TruncatedSeq(NumPrevPausesForHeuristics)), @@ -868,8 +863,6 @@ void G1CollectorPolicy::record_full_collection_end() { _last_young_gc = false; clear_initiate_conc_mark_if_possible(); clear_during_initial_mark_pause(); - _known_garbage_bytes = 0; - _known_garbage_ratio = 0.0; _in_marking_window = false; _in_marking_window_im = false; @@ -882,7 +875,7 @@ void G1CollectorPolicy::record_full_collection_end() { // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_target_length(); - _collectionSetChooser->clearMarkedHeapRegions(); + _collectionSetChooser->clear(); } void G1CollectorPolicy::record_stop_world_start() { @@ -1456,16 +1449,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { } } - // Update the efficiency-since-mark vars. - double proc_ms = elapsed_ms * (double) _parallel_gc_threads; - if (elapsed_ms < MIN_TIMER_GRANULARITY) { - // This usually happens due to the timer not having the required - // granularity. Some Linuxes are the usual culprits. - // We'll just set it to something (arbitrarily) small. - proc_ms = 1.0; - } - double cur_efficiency = (double) freed_bytes / proc_ms; - bool new_in_marking_window = _in_marking_window; bool new_in_marking_window_im = false; if (during_initial_mark_pause()) { @@ -1500,10 +1483,6 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { } } - if (_last_gc_was_young && !_during_marking) { - _young_gc_eff_seq->add(cur_efficiency); - } - _short_lived_surv_rate_group->start_adding_regions(); // do that for any other surv rate groupsx @@ -1618,7 +1597,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); - assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); + _collectionSetChooser->verify(); } #define EXT_SIZE_FORMAT "%d%s" @@ -2065,28 +2044,6 @@ void G1CollectorPolicy::update_survivors_policy() { HeapRegion::GrainWords * _max_survivor_regions); } -#ifndef PRODUCT -class HRSortIndexIsOKClosure: public HeapRegionClosure { - CollectionSetChooser* _chooser; -public: - HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : - _chooser(chooser) {} - - bool doHeapRegion(HeapRegion* r) { - if (!r->continuesHumongous()) { - assert(_chooser->regionProperlyOrdered(r), "Ought to be."); - } - return false; - } -}; - -bool G1CollectorPolicy::assertMarkedBytesDataOK() { - HRSortIndexIsOKClosure cl(_collectionSetChooser); - _g1->heap_region_iterate(&cl); - return true; -} -#endif - bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( GCCause::Cause gc_cause) { bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); @@ -2184,8 +2141,8 @@ public: // We will skip any region that's currently used as an old GC // alloc region (we should not consider those for collection // before we fill them up). - if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { - _hrSorted->addMarkedHeapRegion(r); + if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { + _hrSorted->add_region(r); } } return false; @@ -2195,16 +2152,14 @@ public: class ParKnownGarbageHRClosure: public HeapRegionClosure { G1CollectedHeap* _g1h; CollectionSetChooser* _hrSorted; - jint _marked_regions_added; + uint _marked_regions_added; size_t _reclaimable_bytes_added; - jint _chunk_size; - jint _cur_chunk_idx; - jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) - int _worker; - int _invokes; + uint _chunk_size; + uint _cur_chunk_idx; + uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) void get_new_chunk() { - _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); + _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size); _cur_chunk_end = _cur_chunk_idx + _chunk_size; } void add_region(HeapRegion* r) { @@ -2212,7 +2167,7 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure { get_new_chunk(); } assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); - _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); + _hrSorted->set_region(_cur_chunk_idx, r); _marked_regions_added++; _reclaimable_bytes_added += r->reclaimable_bytes(); _cur_chunk_idx++; @@ -2220,78 +2175,55 @@ class ParKnownGarbageHRClosure: public HeapRegionClosure { public: ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, - jint chunk_size, - int worker) : + uint chunk_size) : _g1h(G1CollectedHeap::heap()), - _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), + _hrSorted(hrSorted), _chunk_size(chunk_size), _marked_regions_added(0), _reclaimable_bytes_added(0), - _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { } + _cur_chunk_idx(0), _cur_chunk_end(0) { } bool doHeapRegion(HeapRegion* r) { - // We only include humongous regions in collection - // sets when concurrent mark shows that their contained object is - // unreachable. - _invokes++; - // Do we have any marking information for this region? if (r->is_marked()) { // We will skip any region that's currently used as an old GC // alloc region (we should not consider those for collection // before we fill them up). - if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { + if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { add_region(r); } } return false; } - jint marked_regions_added() { return _marked_regions_added; } + uint marked_regions_added() { return _marked_regions_added; } size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; } - int invokes() { return _invokes; } }; class ParKnownGarbageTask: public AbstractGangTask { CollectionSetChooser* _hrSorted; - jint _chunk_size; + uint _chunk_size; G1CollectedHeap* _g1; public: - ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : + ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : AbstractGangTask("ParKnownGarbageTask"), _hrSorted(hrSorted), _chunk_size(chunk_size), _g1(G1CollectedHeap::heap()) { } void work(uint worker_id) { - ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, - _chunk_size, - worker_id); + ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); + // Back to zero for the claim value. _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, _g1->workers()->active_workers(), HeapRegion::InitialClaimValue); - jint regions_added = parKnownGarbageCl.marked_regions_added(); + uint regions_added = parKnownGarbageCl.marked_regions_added(); size_t reclaimable_bytes_added = parKnownGarbageCl.reclaimable_bytes_added(); - _hrSorted->updateTotals(regions_added, reclaimable_bytes_added); - if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", - worker_id, parKnownGarbageCl.invokes(), regions_added); - } + _hrSorted->update_totals(regions_added, reclaimable_bytes_added); } }; void G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { - double start_sec; - if (G1PrintParCleanupStats) { - start_sec = os::elapsedTime(); - } - - _collectionSetChooser->clearMarkedHeapRegions(); - double clear_marked_end_sec; - if (G1PrintParCleanupStats) { - clear_marked_end_sec = os::elapsedTime(); - gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", - (clear_marked_end_sec - start_sec) * 1000.0); - } + _collectionSetChooser->clear(); uint region_num = _g1->n_regions(); if (G1CollectedHeap::use_parallel_gc_threads()) { @@ -2314,8 +2246,8 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), MinWorkUnit); } - _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), - WorkUnit); + _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), + WorkUnit); ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, (int) WorkUnit); _g1->workers()->run_task(&parKnownGarbageTask); @@ -2326,20 +2258,10 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { KnownGarbageClosure knownGarbagecl(_collectionSetChooser); _g1->heap_region_iterate(&knownGarbagecl); } - double known_garbage_end_sec; - if (G1PrintParCleanupStats) { - known_garbage_end_sec = os::elapsedTime(); - gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", - (known_garbage_end_sec - clear_marked_end_sec) * 1000.0); - } - _collectionSetChooser->sortMarkedHeapRegions(); + _collectionSetChooser->sort_regions(); + double end_sec = os::elapsedTime(); - if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr(" sorting: %8.3f ms.", - (end_sec - known_garbage_end_sec) * 1000.0); - } - double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); _cur_mark_stop_world_time_ms += elapsed_time_ms; @@ -2555,13 +2477,13 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) { CollectionSetChooser* cset_chooser = _collectionSetChooser; - if (cset_chooser->isEmpty()) { + if (cset_chooser->is_empty()) { ergo_verbose0(ErgoMixedGCs, false_action_str, ergo_format_reason("candidate old regions not available")); return false; } - size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes(); + size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); size_t capacity_bytes = _g1->capacity(); double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; double threshold = (double) G1HeapWastePercent; @@ -2572,7 +2494,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, ergo_format_region("candidate old regions") ergo_format_byte_perc("reclaimable") ergo_format_perc("threshold"), - cset_chooser->remainingRegions(), + cset_chooser->remaining_regions(), reclaimable_bytes, perc, threshold); return false; } @@ -2583,7 +2505,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, ergo_format_region("candidate old regions") ergo_format_byte_perc("reclaimable") ergo_format_perc("threshold"), - cset_chooser->remainingRegions(), + cset_chooser->remaining_regions(), reclaimable_bytes, perc, threshold); return true; } @@ -2666,9 +2588,9 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { if (!gcs_are_young()) { CollectionSetChooser* cset_chooser = _collectionSetChooser; - assert(cset_chooser->verify(), "CSet Chooser verification - pre"); - const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength(); - const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); + cset_chooser->verify(); + const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length(); + const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length(); uint expensive_region_num = 0; bool check_time_remaining = adaptive_young_list_length(); @@ -2755,7 +2677,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { time_remaining_ms); } - assert(cset_chooser->verify(), "CSet Chooser verification - post"); + cset_chooser->verify(); } stop_incremental_cset_building(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index f897ffdb644..7cdf79d7feb 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -288,8 +288,6 @@ private: TruncatedSeq* _cost_per_byte_ms_during_cm_seq; - TruncatedSeq* _young_gc_eff_seq; - G1YoungGenSizer* _young_gen_sizer; uint _eden_cset_region_length; @@ -315,9 +313,6 @@ private: size_t _rs_lengths_prediction; - size_t _known_garbage_bytes; - double _known_garbage_ratio; - double sigma() { return _sigma; } // A function that prevents us putting too much stock in small sample @@ -509,10 +504,6 @@ public: _recorded_non_young_free_cset_time_ms = time_ms; } - double predict_young_gc_eff() { - return get_new_neg_prediction(_young_gc_eff_seq); - } - double predict_survivor_regions_evac_time(); void cset_regions_freed() { @@ -522,20 +513,6 @@ public: // also call it on any more surv rate groups } - void set_known_garbage_bytes(size_t known_garbage_bytes) { - _known_garbage_bytes = known_garbage_bytes; - size_t heap_bytes = _g1->capacity(); - _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; - } - - void decrease_known_garbage_bytes(size_t known_garbage_bytes) { - guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); - - _known_garbage_bytes -= known_garbage_bytes; - size_t heap_bytes = _g1->capacity(); - _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; - } - G1MMUTracker* mmu_tracker() { return _mmu_tracker; } @@ -1026,12 +1003,6 @@ public: // exceeded the desired limit, return an amount to expand by. size_t expansion_amount(); -#ifndef PRODUCT - // Check any appropriate marked bytes info, asserting false if - // something's wrong, else returning "true". - bool assertMarkedBytesDataOK(); -#endif - // Print tracing information. void print_tracing_info() const; @@ -1074,19 +1045,6 @@ public: return _young_gen_sizer->adaptive_young_list_length(); } - inline double get_gc_eff_factor() { - double ratio = _known_garbage_ratio; - - double square = ratio * ratio; - // square = square * square; - double ret = square * 9.0 + 1.0; -#if 0 - gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); -#endif // 0 - guarantee(0.0 <= ret && ret < 10.0, "invariant!"); - return ret; - } - private: // // Survivor regions policy. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp index d25e5b94722..0378688c015 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -127,9 +127,6 @@ "Prints the liveness information for all regions in the heap " \ "at the end of a marking cycle.") \ \ - develop(bool, G1PrintParCleanupStats, false, \ - "When true, print extra stats about parallel cleanup.") \ - \ product(intx, G1UpdateBufferSize, 256, \ "Size of an update buffer") \ \ diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp index 44e3ac83346..09b80cce581 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -370,7 +370,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) { _claimed = InitialClaimValue; } zero_marked_bytes(); - set_sort_index(-1); _offsets.resize(HeapRegion::GrainWords); init_top_at_mark_start(); @@ -491,8 +490,7 @@ HeapRegion::HeapRegion(uint hrs_index, _in_collection_set(false), _next_in_special_set(NULL), _orig_end(NULL), _claimed(InitialClaimValue), _evacuation_failed(false), - _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), - _gc_efficiency(0.0), + _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), _young_type(NotYoung), _next_young_region(NULL), _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), #ifdef ASSERT diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp index 8a99d2d3f7b..cd6e164e136 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -281,12 +281,8 @@ class HeapRegion: public G1OffsetTableContigSpace { size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. - // See "sort_index" method. -1 means is not in the array. - int _sort_index; - - // + // The calculated GC efficiency of the region. double _gc_efficiency; - // enum YoungType { NotYoung, // a region is not young @@ -629,16 +625,6 @@ class HeapRegion: public G1OffsetTableContigSpace { // last mark phase ended. bool is_marked() { return _prev_top_at_mark_start != bottom(); } - // If "is_marked()" is true, then this is the index of the region in - // an array constructed at the end of marking of the regions in a - // "desirability" order. - int sort_index() { - return _sort_index; - } - void set_sort_index(int i) { - _sort_index = i; - } - void init_top_at_conc_mark_count() { _top_at_conc_mark_count = bottom(); } From d7ad8620589ac0a85618257ebac27333a0c06c3b Mon Sep 17 00:00:00 2001 From: David Katleman Date: Thu, 19 Apr 2012 12:18:32 -0700 Subject: [PATCH 13/15] Added tag jdk8-b35 for changeset 499f1fc13620 --- .hgtags-top-repo | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgtags-top-repo b/.hgtags-top-repo index 7314e1ff24a..13d4a6d1f39 100644 --- a/.hgtags-top-repo +++ b/.hgtags-top-repo @@ -156,3 +156,4 @@ cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21 88176171e940f02916a312c265a34c32552a8376 jdk8-b32 42f275168fa5d9e7c70b246614dca8cf81f52c2e jdk8-b33 894a478d2c4819a1a0f230bd7bdd09f3b2de9a8c jdk8-b34 +5285317ebb4e8e4f6d8d52b5616fa801e2ea844d jdk8-b35 From 17f06f1623eefec81f1615f286821ba852753afa Mon Sep 17 00:00:00 2001 From: David Katleman Date: Thu, 19 Apr 2012 12:18:46 -0700 Subject: [PATCH 14/15] Added tag jdk8-b35 for changeset f81ec813e7ee --- hotspot/.hgtags | 1 + 1 file changed, 1 insertion(+) diff --git a/hotspot/.hgtags b/hotspot/.hgtags index 63988b7a690..dcd090c5c36 100644 --- a/hotspot/.hgtags +++ b/hotspot/.hgtags @@ -239,3 +239,4 @@ cd47da9383cd932cb2b659064057feafa2a91134 hs24-b06 785bcf415ead2eaa5f6677aaf528481008140bac jdk8-b33 7c6aba65acd2c334f1c3512b574f9038cddac24b hs24-b07 f284b08835584517c1ca3dd67341f569e763841f jdk8-b34 +f621660a297baa48fab9dca28e99d318826e8304 jdk8-b35 From 240c3c4839306d8aee7c174cf4b029f986f9cf3e Mon Sep 17 00:00:00 2001 From: Alejandro Murillo Date: Fri, 20 Apr 2012 16:23:49 -0700 Subject: [PATCH 15/15] Added tag hs24-b08 for changeset 55ac5f20c7bf --- hotspot/.hgtags | 1 + 1 file changed, 1 insertion(+) diff --git a/hotspot/.hgtags b/hotspot/.hgtags index dcd090c5c36..d8da4eb47f9 100644 --- a/hotspot/.hgtags +++ b/hotspot/.hgtags @@ -240,3 +240,4 @@ cd47da9383cd932cb2b659064057feafa2a91134 hs24-b06 7c6aba65acd2c334f1c3512b574f9038cddac24b hs24-b07 f284b08835584517c1ca3dd67341f569e763841f jdk8-b34 f621660a297baa48fab9dca28e99d318826e8304 jdk8-b35 +dff6e3459210f8dd0430b9b03ccc99280560da30 hs24-b08