This commit is contained in:
Phil Race 2018-04-20 09:05:05 -07:00
commit 568874f545
169 changed files with 3213 additions and 2410 deletions

View File

@ -481,3 +481,4 @@ f7363de371c9a1f668bd0a01b7df3d1ddb9cc58b jdk-11+7
755e1b55a4dff510f9639cdb5c5e82549a7e09b3 jdk-11+8
0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10

View File

@ -310,9 +310,13 @@ else # HAS_SPEC=true
ifneq ($(PARALLEL_TARGETS), )
$(call StartGlobalTimer)
$(call PrepareSmartJavac)
# JOBS will only be empty for a bootcycle-images recursive call
# or if specified via a make argument directly. In those cases
# treat it as NOT using jobs at all.
( cd $(TOPDIR) && \
$(NICE) $(MAKE) $(MAKE_ARGS) $(OUTPUT_SYNC_FLAG) \
-j $(JOBS) -f make/Main.gmk $(USER_MAKE_VARS) \
$(if $(JOBS), -j $(JOBS)) \
-f make/Main.gmk $(USER_MAKE_VARS) \
$(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
( exitcode=$$? && \
$(PRINTF) "\nERROR: Build failed for $(TARGET_DESCRIPTION) (exit code $$exitcode) \n" \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -326,6 +326,8 @@ charset windows-1252 MS1252
ascii true
alias cp1252 # JDK historical
alias cp5348 # Euro IBM CCSID
alias ibm-1252
alias ibm1252
charset windows-1253 MS1253
package sun.nio.cs
@ -933,11 +935,16 @@ charset x-IBM942 IBM942 # IBM & PC/MSDOS encodings
charset x-IBM942C IBM942C
package sun.nio.cs.ext
type source
type template
alias cp942C # JDK historical
alias ibm942C
alias ibm-942C
alias 942C
alias cp932
alias ibm932
alias ibm-932
alias 932
alias x-ibm932
charset x-IBM943 IBM943
package sun.nio.cs.ext
@ -952,7 +959,7 @@ charset x-IBM943 IBM943
charset x-IBM943C IBM943C
package sun.nio.cs.ext
type source
type template
alias cp943C # JDK historical
alias ibm943C
alias ibm-943C
@ -1519,6 +1526,9 @@ charset x-IBM1383 IBM1383
alias ibm1383
alias ibm-1383
alias 1383
alias ibmeuccn
alias ibm-euccn
alias cpeuccn
charset x-IBM970 IBM970
package sun.nio.cs.ext

View File

@ -1,6 +1,26 @@
#
# generate these charsets into sun.nio.cs
#
Big5
Big5_Solaris
Big5_HKSCS
EUC_CN
EUC_KR
GBK
GB18030
IBM856
IBM921
IBM922
IBM942
IBM942C
IBM943
IBM943C
IBM950
IBM970
IBM1046
IBM1124
IBM1383
ISO_8859_6
ISO_8859_8
MS1252
TIS_620

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,7 +129,7 @@ public class SPI {
} else if (line.indexOf("_INCLUDE_ALIASES_MAP_") != -1) {
Hasher.genClass(out, aliasKeys, aliasValues,
null, "Aliases", "String",
11, 3, true, false, false);
12, 3, true, false, false);
} else if (line.indexOf("_INCLUDE_CLASSES_MAP_") != -1) {
Hasher.genClass(out, clzKeys, clzValues,
null, "Classes", "String",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,13 +86,17 @@ public interface MessageType {
NAME("name", "Name", "com.sun.tools.javac.util"),
NUMBER("number", "int", null),
OPTION_NAME("option name", "Option", "com.sun.tools.javac.main"),
PROFILE("profile", "Profile", "com.sun.tools.javac.jvm"),
SOURCE("source", "Source", "com.sun.tools.javac.code"),
SOURCE_VERSION("source version", "SourceVersion", "javax.lang.model"),
STRING("string", "String", null),
SYMBOL("symbol", "Symbol", "com.sun.tools.javac.code"),
SYMBOL_KIND("symbol kind", "Kind", "com.sun.tools.javac.code.Kinds"),
KIND_NAME("kind name", "KindName", "com.sun.tools.javac.code.Kinds"),
TARGET("target", "Target", "com.sun.tools.javac.jvm"),
TOKEN("token", "TokenKind", "com.sun.tools.javac.parser.Tokens"),
TYPE("type", "Type", "com.sun.tools.javac.code"),
URL("url", "URL", "java.net"),
SET("set", "Set", "java.util"),
LIST("list", "List", "java.util"),
OBJECT("object", "Object", null),

View File

@ -224,7 +224,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBAWT, \
format-nonliteral parentheses, \
DISABLED_WARNINGS_clang := logical-op-parentheses extern-initializer, \
DISABLED_WARNINGS_solstudio := E_DECLARATION_IN_CODE, \
DISABLED_WARNINGS_microsoft := 4297 4244 4267 4996, \
DISABLED_WARNINGS_microsoft := 4297 4244 4267 4291 4302 4311 4996, \
ASFLAGS := $(LIBAWT_ASFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_macosx := -L$(INSTALL_LIBRARIES_HERE), \

View File

@ -44,12 +44,6 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
}
}
void ModRefBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
}
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
if (type == T_OBJECT || type == T_ARRAY) {

View File

@ -40,7 +40,7 @@ protected:
Register start, Register end, Register tmp, RegSet saved_regs) {}
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
Address dst, Register val, Register tmp1, Register tmp2) = 0;
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,

View File

@ -56,12 +56,6 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
}
}
void ModRefBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
}
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {

View File

@ -41,7 +41,7 @@ protected:
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame);
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) = 0;
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count, Register preserve1, Register preserve2);

View File

@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"

View File

@ -50,11 +50,6 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
}
}
void ModRefBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
}
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
if (type == T_OBJECT || type == T_ARRAY) {

View File

@ -39,7 +39,7 @@ protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return);
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3);
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) = 0;
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);

View File

@ -445,6 +445,8 @@ void AOTCodeHeap::link_shared_runtime_symbols() {
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_handle_wrong_method_stub", address, SharedRuntime::get_handle_wrong_method_stub());
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notify", address, JVMCIRuntime::object_notify);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notifyAll", address, JVMCIRuntime::object_notifyAll);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_dynamic_invoke", address, CompilerRuntime::resolve_dynamic_invoke);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol);

View File

@ -80,13 +80,13 @@
// Entry points in zip.dll for loading zip/jar file entries
typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
typedef void (JNICALL *ZipClose_t)(jzfile *zip);
typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
typedef jboolean (JNICALL *ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
typedef jint (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
typedef void * * (*ZipOpen_t)(const char *name, char **pmsg);
typedef void (*ZipClose_t)(jzfile *zip);
typedef jzentry* (*FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
typedef jboolean (*ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
typedef jzentry* (*GetNextEntry_t)(jzfile *zip, jint n);
typedef jboolean (*ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
typedef jint (*Crc32_t)(jint crc, const jbyte *buf, jint len);
static ZipOpen_t ZipOpen = NULL;
static ZipClose_t ZipClose = NULL;

View File

@ -54,6 +54,7 @@
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
@ -102,8 +103,25 @@ void ClassLoaderData::init_null_class_loader_data() {
}
}
// JFR and logging support so that the name and klass are available after the
// class_loader oop is no longer alive, during unloading.
void ClassLoaderData::initialize_name_and_klass(Handle class_loader) {
_class_loader_klass = class_loader->klass();
oop class_loader_name = java_lang_ClassLoader::name(class_loader());
if (class_loader_name != NULL) {
Thread* THREAD = Thread::current();
ResourceMark rm(THREAD);
const char* class_loader_instance_name =
java_lang_String::as_utf8_string(class_loader_name);
if (class_loader_instance_name != NULL && class_loader_instance_name[0] != '\0') {
// Can't throw InternalError and SymbolTable doesn't throw OOM anymore.
_class_loader_name = SymbolTable::new_symbol(class_loader_instance_name, CATCH);
}
}
}
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
_class_loader(h_class_loader()),
_is_anonymous(is_anonymous),
// An anonymous class loader data doesn't have anything to keep
// it from being unloaded during parsing of the anonymous class.
@ -114,9 +132,14 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
_claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
_jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
_next(NULL),
_class_loader_klass(NULL), _class_loader_name(NULL),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)) {
if (!h_class_loader.is_null()) {
_class_loader = _handles.add(h_class_loader());
}
if (!is_anonymous) {
// The holder is initialized later for anonymous classes, and before calling anything
// that call class_loader().
@ -269,7 +292,6 @@ void ClassLoaderData::oops_do(OopClosure* f, bool must_claim, bool clear_mod_oop
clear_modified_oops();
}
f->do_oop(&_class_loader);
_handles.oops_do(f);
}
@ -556,7 +578,7 @@ void ClassLoaderData::unload() {
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
ls.print("unload ");
ls.print("unload");
print_value_on(&ls);
ls.cr();
}
@ -631,7 +653,7 @@ oop ClassLoaderData::holder_phantom() const {
// Unloading support
bool ClassLoaderData::is_alive() const {
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| (_holder.peek() != NULL); // not cleaned by weak reference processing
|| (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing.
return alive;
}
@ -887,13 +909,23 @@ ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) {
}
const char* ClassLoaderData::loader_name() const {
// Handles null class loader
return SystemDictionary::loader_name(class_loader());
if (is_unloading()) {
if (_class_loader_klass == NULL) {
return "<bootloader>";
} else if (_class_loader_name != NULL) {
return _class_loader_name->as_C_string();
} else {
return _class_loader_klass->name()->as_C_string();
}
} else {
// Handles null class loader
return SystemDictionary::loader_name(class_loader());
}
}
void ClassLoaderData::print_value_on(outputStream* out) const {
if (class_loader() != NULL) {
if (!is_unloading() && class_loader() != NULL) {
out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this));
class_loader()->print_value_on(out); // includes loader_name() and address of class loader instance
} else {
@ -908,7 +940,7 @@ void ClassLoaderData::print_value_on(outputStream* out) const {
#ifndef PRODUCT
void ClassLoaderData::print_on(outputStream* out) const {
out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {",
p2i(this), p2i((void *)class_loader()), loader_name());
p2i(this), p2i(_class_loader.ptr_raw()), loader_name());
if (is_anonymous()) out->print(" anonymous");
if (claimed()) out->print(" claimed");
if (is_unloading()) out->print(" unloading");
@ -962,10 +994,10 @@ bool ClassLoaderDataGraph::_metaspace_oom = false;
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anonymous) {
NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD
// contains unhandled oops
// contains oops in _handles that must be walked.
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
@ -1002,6 +1034,16 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
} while (true);
}
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous);
// Initialize name and class after the loader data is added to the CLDG
// because adding the Symbol for the name might safepoint.
if (loader.not_null()) {
loader_data->initialize_name_and_klass(loader);
}
return loader_data;
}
void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, must_claim);
@ -1237,8 +1279,7 @@ bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
// Move class loader data from main list to the unloaded list for unloading
// and deallocation later.
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
bool clean_previous_versions) {
bool ClassLoaderDataGraph::do_unloading(bool clean_previous_versions) {
ClassLoaderData* data = _head;
ClassLoaderData* prev = NULL;
@ -1296,7 +1337,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
// Remove entries in the dictionary of live class loader that have
// initiated loading classes in a dead class loader.
if (data->dictionary() != NULL) {
data->dictionary()->do_unloading(is_alive_closure);
data->dictionary()->do_unloading();
}
// Walk a ModuleEntry's reads, and a PackageEntry's exports
// lists to determine if there are modules on those lists that are now

View File

@ -28,7 +28,6 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceCounters.hpp"
#include "oops/oopHandle.hpp"
#include "oops/weakHandle.hpp"
#include "runtime/mutex.hpp"
@ -84,6 +83,7 @@ class ClassLoaderDataGraph : public AllStatic {
static volatile size_t _num_instance_classes;
static volatile size_t _num_array_classes;
static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous);
static ClassLoaderData* add(Handle class_loader, bool anonymous);
static void post_class_unload_events();
public:
@ -114,7 +114,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void packages_unloading_do(void f(PackageEntry*));
static void loaded_classes_do(KlassClosure* klass_closure);
static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive_closure, bool clean_previous_versions);
static bool do_unloading(bool clean_previous_versions);
// dictionary do
// Iterate over all klasses in dictionary, but
@ -221,7 +221,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
static ClassLoaderData * _the_null_class_loader_data;
WeakHandle<vm_class_loader_data> _holder; // The oop that determines lifetime of this class loader
oop _class_loader; // The instance of java/lang/ClassLoader associated with
OopHandle _class_loader; // The instance of java/lang/ClassLoader associated with
// this ClassLoaderData
ClassLoaderMetaspace * volatile _metaspace; // Meta-space where meta-data defined by the
@ -234,7 +234,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool _modified_oops; // Card Table Equivalent (YC/CMS support)
bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
s2 _keep_alive; // if this CLD is kept alive without a keep_alive_object().
s2 _keep_alive; // if this CLD is kept alive.
// Used for anonymous classes and the boot class
// loader. _keep_alive does not need to be volatile or
// atomic since there is one unique CLD per anonymous class.
@ -265,6 +265,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// Support for walking class loader data objects
ClassLoaderData* _next; /// Next loader_datas created
// JFR support
Klass* _class_loader_klass;
Symbol* _class_loader_name;
TRACE_DEFINE_TRACE_ID_FIELD;
void set_next(ClassLoaderData* next) { _next = next; }
@ -305,6 +308,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
MetaWord* allocate(size_t size);
Dictionary* create_dictionary();
void initialize_name_and_klass(Handle class_loader);
public:
// GC interface.
void clear_claimed() { _claimed = 0; }
@ -340,9 +345,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// Returns true if this class loader data is for the boot class loader.
// (Note that the class loader data may be anonymous.)
bool is_boot_class_loader_data() const {
return class_loader() == NULL;
}
inline bool is_boot_class_loader_data() const;
bool is_builtin_class_loader_data() const;
bool is_permanent_class_loader_data() const;
@ -351,10 +354,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// method will allocate a Metaspace if needed.
ClassLoaderMetaspace* metaspace_non_null();
oop class_loader() const { return _class_loader; }
// The object the GC is using to keep this ClassLoaderData alive.
oop keep_alive_object() const;
inline oop class_loader() const;
// Returns true if this class loader data is for a loader going away.
bool is_unloading() const {
@ -363,7 +363,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
}
// Used to refcount an anonymous class's CLD in order to
// indicate their aliveness without a keep_alive_object().
// indicate their aliveness.
void inc_keep_alive();
void dec_keep_alive();
@ -407,6 +407,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
static ClassLoaderData* class_loader_data_or_null(oop loader);
static ClassLoaderData* anonymous_class_loader_data(Handle loader);
Klass* class_loader_klass() const { return _class_loader_klass; }
Symbol* class_loader_name() const { return _class_loader_name; }
TRACE_DEFINE_TRACE_ID_METHODS;
};

View File

@ -28,6 +28,18 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopHandle.inline.hpp"
#include "oops/weakHandle.inline.hpp"
inline oop ClassLoaderData::class_loader() const {
assert(!_unloading, "This oop is not available to unloading class loader data");
assert(_holder.is_null() || _holder.peek() != NULL , "This class loader data holder must be alive");
return _class_loader.resolve();
}
inline bool ClassLoaderData::is_boot_class_loader_data() const {
return class_loader() == NULL;
}
inline ClassLoaderData* ClassLoaderData::class_loader_data_or_null(oop loader) {
if (loader == NULL) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderStats.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -214,13 +214,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, Handle protection_
// During class loading we may have cached a protection domain that has
// since been unreferenced, so this entry should be cleared.
void Dictionary::clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe) {
void Dictionary::clean_cached_protection_domains(DictionaryEntry* probe) {
assert_locked_or_safepoint(SystemDictionary_lock);
ProtectionDomainEntry* current = probe->pd_set();
ProtectionDomainEntry* prev = NULL;
while (current != NULL) {
if (!is_alive->do_object_b(current->object_no_keepalive())) {
if (current->object_no_keepalive() == NULL) {
LogTarget(Debug, protectiondomain) lt;
if (lt.is_enabled()) {
ResourceMark rm;
@ -228,7 +228,6 @@ void Dictionary::clean_cached_protection_domains(BoolObjectClosure* is_alive, Di
LogStream ls(lt);
ls.print_cr("PD in set is not alive:");
ls.print("class loader: "); loader_data()->class_loader()->print_value_on(&ls);
ls.print(" protection domain: "); current->object_no_keepalive()->print_value_on(&ls);
ls.print(" loading: "); probe->instance_klass()->print_value_on(&ls);
ls.cr();
}
@ -249,7 +248,7 @@ void Dictionary::clean_cached_protection_domains(BoolObjectClosure* is_alive, Di
}
void Dictionary::do_unloading(BoolObjectClosure* is_alive) {
void Dictionary::do_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// The NULL class loader doesn't initiate loading classes from other class loaders
@ -276,7 +275,7 @@ void Dictionary::do_unloading(BoolObjectClosure* is_alive) {
continue;
}
// Clean pd_set
clean_cached_protection_domains(is_alive, probe);
clean_cached_protection_domains(probe);
p = probe->next_addr();
}
}

View File

@ -52,7 +52,7 @@ class Dictionary : public Hashtable<InstanceKlass*, mtClass> {
DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name);
void clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe);
void clean_cached_protection_domains(DictionaryEntry* probe);
protected:
static size_t entry_size();
@ -72,20 +72,16 @@ public:
InstanceKlass* find_shared_class(int index, unsigned int hash, Symbol* name);
// GC support
void oops_do(OopClosure* f);
void roots_oops_do(OopClosure* strong, OopClosure* weak);
void classes_do(void f(InstanceKlass*));
void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
void all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
void classes_do(MetaspaceClosure* it);
void unlink(BoolObjectClosure* is_alive);
void unlink();
void remove_classes_in_error_state();
// Unload classes whose defining loaders are unloaded
void do_unloading(BoolObjectClosure* is_alive);
void do_unloading();
// Protection domains
InstanceKlass* find(unsigned int hash, Symbol* name, Handle protection_domain);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,7 +99,7 @@ void LoaderConstraintTable::purge_loader_constraints() {
InstanceKlass* klass = probe->klass();
// Remove klass that is no longer alive
if (klass != NULL &&
klass->class_loader_data()->is_unloading()) {
!klass->is_loader_alive()) {
probe->set_klass(NULL);
if (lt.is_enabled()) {
ResourceMark rm;
@ -116,31 +116,31 @@ void LoaderConstraintTable::purge_loader_constraints() {
int n = 0;
while (n < probe->num_loaders()) {
if (probe->loader_data(n)->is_unloading()) {
if (lt.is_enabled()) {
ResourceMark rm;
lt.print("purging loader %s from constraint for name %s",
probe->loader_data(n)->loader_name(),
probe->name()->as_C_string()
);
}
if (lt.is_enabled()) {
ResourceMark rm;
lt.print("purging loader %s from constraint for name %s",
probe->loader_data(n)->loader_name(),
probe->name()->as_C_string()
);
}
// Compact array
int num = probe->num_loaders() - 1;
probe->set_num_loaders(num);
// Compact array
int num = probe->num_loaders() - 1;
probe->set_num_loaders(num);
probe->set_loader_data(n, probe->loader_data(num));
probe->set_loader_data(num, NULL);
if (lt.is_enabled()) {
ResourceMark rm;
lt.print("new loader list:");
for (int i = 0; i < probe->num_loaders(); i++) {
lt.print(" [%d]: %s", i,
probe->loader_data(i)->loader_name());
}
if (lt.is_enabled()) {
ResourceMark rm;
lt.print("new loader list:");
for (int i = 0; i < probe->num_loaders(); i++) {
lt.print(" [%d]: %s", i,
probe->loader_data(i)->loader_name());
}
}
continue; // current element replaced, so restart without
// incrementing n
continue; // current element replaced, so restart without
// incrementing n
}
n++;
}
@ -159,9 +159,7 @@ void LoaderConstraintTable::purge_loader_constraints() {
} else {
#ifdef ASSERT
if (probe->klass() != NULL) {
ClassLoaderData* loader_data =
probe->klass()->class_loader_data();
assert(!loader_data->is_unloading(), "klass should be live");
assert(probe->klass()->is_loader_alive(), "klass should be live");
}
#endif
// Go to next entry

View File

@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jni.h"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/moduleEntry.hpp"
#include "logging/log.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/placeholders.hpp"
#include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp"

View File

@ -30,6 +30,7 @@
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "utilities/hashtable.inline.hpp"
unsigned int ProtectionDomainCacheTable::compute_hash(Handle protection_domain) {
@ -42,26 +43,26 @@ int ProtectionDomainCacheTable::index_for(Handle protection_domain) {
}
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
: Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
: Hashtable<ClassLoaderWeakHandle, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
{
}
void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
void ProtectionDomainCacheTable::unlink() {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
for (int i = 0; i < table_size(); ++i) {
ProtectionDomainCacheEntry** p = bucket_addr(i);
ProtectionDomainCacheEntry* entry = bucket(i);
while (entry != NULL) {
if (is_alive->do_object_b(entry->object_no_keepalive())) {
oop pd = entry->object_no_keepalive();
if (pd != NULL) {
p = entry->next_addr();
} else {
LogTarget(Debug, protectiondomain) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
ls.print("protection domain unlinked: ");
entry->object_no_keepalive()->print_value_on(&ls);
ls.cr();
ls.print_cr("protection domain unlinked at %d", i);
}
entry->literal().release();
*p = entry->next();
free_entry(entry);
}
@ -70,16 +71,6 @@ void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
}
}
void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->oops_do(f);
}
}
}
void ProtectionDomainCacheTable::print_on(outputStream* st) const {
st->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
table_size(), number_of_entries());
@ -97,7 +88,7 @@ void ProtectionDomainCacheTable::verify() {
}
oop ProtectionDomainCacheEntry::object() {
return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(literal_addr());
return literal().resolve();
}
oop ProtectionDomainEntry::object() {
@ -108,7 +99,7 @@ oop ProtectionDomainEntry::object() {
// keeping it alive. This is okay to do in the VM thread state if it is not
// leaked out to become strongly reachable.
oop ProtectionDomainCacheEntry::object_no_keepalive() {
return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(literal_addr());
return literal().peek();
}
oop ProtectionDomainEntry::object_no_keepalive() {
@ -116,7 +107,7 @@ oop ProtectionDomainEntry::object_no_keepalive() {
}
void ProtectionDomainCacheEntry::verify() {
guarantee(oopDesc::is_oop(object_no_keepalive()), "must be an oop");
guarantee(object_no_keepalive() == NULL || oopDesc::is_oop(object_no_keepalive()), "must be an oop");
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(Handle protection_domain) {
@ -127,6 +118,8 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(Handle protection_do
if (entry == NULL) {
entry = add_entry(index, hash, protection_domain);
}
// keep entry alive
(void)entry->object();
return entry;
}
@ -145,7 +138,8 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
assert(index == index_for(protection_domain), "incorrect index?");
assert(find_entry(index, protection_domain) == NULL, "no double entry");
ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
Hashtable<oop, mtClass>::add_entry(index, p);
ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(protection_domain);
ProtectionDomainCacheEntry* p = new_entry(hash, w);
Hashtable<ClassLoaderWeakHandle, mtClass>::add_entry(index, p);
return p;
}

View File

@ -26,6 +26,7 @@
#define SHARE_VM_CLASSFILE_PROTECTIONDOMAINCACHE_HPP
#include "oops/oop.hpp"
#include "oops/weakHandle.hpp"
#include "memory/iterator.hpp"
#include "utilities/hashtable.hpp"
@ -34,22 +35,18 @@
// to dictionary.hpp pd_set for more information about how protection domain entries
// are used.
// This table is walked during GC, rather than the class loader data graph dictionaries.
class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
class ProtectionDomainCacheEntry : public HashtableEntry<ClassLoaderWeakHandle, mtClass> {
friend class VMStructs;
public:
oop object();
oop object_no_keepalive();
ProtectionDomainCacheEntry* next() {
return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
return (ProtectionDomainCacheEntry*)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next();
}
ProtectionDomainCacheEntry** next_addr() {
return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
}
void oops_do(OopClosure* f) {
f->do_oop(literal_addr());
return (ProtectionDomainCacheEntry**)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next_addr();
}
void verify();
@ -64,20 +61,21 @@ class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
// we only need to iterate over this set.
// The amount of different protection domains used is typically magnitudes smaller
// than the number of system dictionary entries (loaded classes).
class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
class ProtectionDomainCacheTable : public Hashtable<ClassLoaderWeakHandle, mtClass> {
friend class VMStructs;
private:
ProtectionDomainCacheEntry* bucket(int i) const {
return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
return (ProtectionDomainCacheEntry*) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket(i);
}
// The following method is not MT-safe and must be done under lock.
ProtectionDomainCacheEntry** bucket_addr(int i) {
return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
return (ProtectionDomainCacheEntry**) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket_addr(i);
}
ProtectionDomainCacheEntry* new_entry(unsigned int hash, Handle protection_domain) {
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain());
ProtectionDomainCacheEntry* new_entry(unsigned int hash, ClassLoaderWeakHandle protection_domain) {
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*)
Hashtable<ClassLoaderWeakHandle, mtClass>::new_entry(hash, protection_domain);
return entry;
}
@ -91,10 +89,7 @@ public:
ProtectionDomainCacheTable(int table_size);
ProtectionDomainCacheEntry* get(Handle protection_domain);
void unlink(BoolObjectClosure* cl);
// GC support
void oops_do(OopClosure* f);
void unlink();
void print_on(outputStream* st) const;
void verify();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -125,9 +125,8 @@ void ResolutionErrorTable::purge_resolution_errors() {
assert(entry->pool() != (ConstantPool*)NULL, "resolution error table is corrupt");
ConstantPool* pool = entry->pool();
assert(pool->pool_holder() != NULL, "Constant pool without a class?");
ClassLoaderData* loader_data =
pool->pool_holder()->class_loader_data();
if (!loader_data->is_unloading()) {
if (pool->pool_holder()->is_loader_alive()) {
p = entry->next_addr();
} else {
*p = entry->next();

View File

@ -1831,24 +1831,6 @@ void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
}
#ifdef ASSERT
class VerifySDReachableAndLiveClosure : public OopClosure {
private:
BoolObjectClosure* _is_alive;
template <class T> void do_oop_work(T* p) {
oop obj = RawAccess<>::oop_load(p);
guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live");
}
public:
VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
#endif
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
@ -1865,8 +1847,7 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer);
// First, mark for unload all ClassLoaderData referencing a dead class loader.
unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive,
do_cleaning);
unloading_occurred = ClassLoaderDataGraph::do_unloading(do_cleaning);
}
if (unloading_occurred) {
@ -1880,17 +1861,12 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
// Oops referenced by the protection domain cache table may get unreachable independently
// of the class loader (eg. cached protection domain oops). So we need to
// explicitly unlink them here.
_pd_cache_table->unlink(is_alive);
#ifdef ASSERT
VerifySDReachableAndLiveClosure cl(is_alive);
_pd_cache_table->oops_do(&cl);
#endif
_pd_cache_table->unlink();
}
if (do_cleaning) {
GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
ResolvedMethodTable::unlink(is_alive);
ResolvedMethodTable::unlink();
}
return unloading_occurred;
@ -1906,21 +1882,15 @@ void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
if (strong == weak || !ClassUnloading) {
// Only the protection domain oops contain references into the heap. Iterate
// over all of them.
_pd_cache_table->oops_do(strong);
vm_weak_oop_storage()->oops_do(strong);
} else {
if (weak != NULL) {
_pd_cache_table->oops_do(weak);
vm_weak_oop_storage()->oops_do(weak);
}
}
// Visit extra methods
invoke_method_table()->oops_do(strong);
if (weak != NULL) {
ResolvedMethodTable::oops_do(weak);
}
}
void SystemDictionary::oops_do(OopClosure* f) {
@ -1929,15 +1899,9 @@ void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_system_loader_lock_obj);
CDS_ONLY(SystemDictionaryShared::oops_do(f);)
// Only the protection domain oops contain references into the heap. Iterate
// over all of them.
_pd_cache_table->oops_do(f);
// Visit extra methods
invoke_method_table()->oops_do(f);
ResolvedMethodTable::oops_do(f);
vm_weak_oop_storage()->oops_do(f);
}

View File

@ -357,6 +357,14 @@ Handle SystemDictionaryShared::init_security_info(Handle class_loader, InstanceK
return pd;
}
bool SystemDictionaryShared::is_sharing_possible(ClassLoaderData* loader_data) {
oop class_loader = loader_data->class_loader();
return (class_loader == NULL ||
(UseAppCDS && (SystemDictionary::is_system_class_loader(class_loader) ||
SystemDictionary::is_platform_class_loader(class_loader)))
);
}
// Currently AppCDS only archives classes from the run-time image, the
// -Xbootclasspath/a path, the class path, and the module path.
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -300,13 +300,7 @@ public:
}
// Check if sharing is supported for the class loader.
static bool is_sharing_possible(ClassLoaderData* loader_data) {
oop class_loader = loader_data->class_loader();
return (class_loader == NULL ||
(UseAppCDS && (SystemDictionary::is_system_class_loader(class_loader) ||
SystemDictionary::is_platform_class_loader(class_loader)))
);
}
static bool is_sharing_possible(ClassLoaderData* loader_data);
static bool is_shared_class_visible_for_classloader(InstanceKlass* ik,
Handle class_loader,
const char* pkg_string,

View File

@ -613,16 +613,22 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* gra
//---< some sanity checks >---
// Do not assert here, just check, print error message and return.
// This is a diagnostic function. It is not supposed to tear down the VM.
if ((char*)h < low_bound ) {
if ((char*)h < low_bound) {
insane = true; ast->print_cr("Sanity check: HeapBlock @%p below low bound (%p)", (char*)h, low_bound);
}
if (ix_end >= granules ) {
if ((char*)h > (low_bound + res_size)) {
insane = true; ast->print_cr("Sanity check: HeapBlock @%p outside reserved range (%p)", (char*)h, low_bound + res_size);
}
if ((char*)h > (low_bound + size)) {
insane = true; ast->print_cr("Sanity check: HeapBlock @%p outside used range (%p)", (char*)h, low_bound + size);
}
if (ix_end >= granules) {
insane = true; ast->print_cr("Sanity check: end index (%d) out of bounds (" SIZE_FORMAT ")", ix_end, granules);
}
if (size != heap->capacity()) {
insane = true; ast->print_cr("Sanity check: code heap capacity has changed (" SIZE_FORMAT "K to " SIZE_FORMAT "K)", size/(size_t)K, heap->capacity()/(size_t)K);
}
if (ix_beg > ix_end ) {
if (ix_beg > ix_end) {
insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
}
if (insane) {
@ -988,6 +994,11 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* gra
ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
ast->cr();
ast->print_cr("Segment start = " INTPTR_FORMAT ", used space = " SIZE_FORMAT_W(8)"k", p2i(low_bound), size/K);
ast->print_cr("Segment end (used) = " INTPTR_FORMAT ", remaining space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + size, (res_size - size)/K);
ast->print_cr("Segment end (reserved) = " INTPTR_FORMAT ", reserved space = " SIZE_FORMAT_W(8)"k", p2i(low_bound) + res_size, res_size/K);
ast->cr();
ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
@ -1218,14 +1229,14 @@ void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
blob_name = this_blob->name();
nm = this_blob->as_nmethod_or_null();
//---< blob address >---
ast->print("%p", this_blob);
ast->print(INTPTR_FORMAT, p2i(this_blob));
ast->fill_to(19);
//---< blob offset from CodeHeap begin >---
ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
ast->fill_to(33);
} else {
//---< block address >---
ast->print("%p", TopSizeArray[i].start);
ast->print(INTPTR_FORMAT, p2i(TopSizeArray[i].start));
ast->fill_to(19);
//---< block offset from CodeHeap begin >---
ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
@ -1404,7 +1415,7 @@ void CodeHeapState::print_freeSpace(outputStream* out, CodeHeap* heap) {
unsigned int ix = 0;
for (ix = 0; ix < alloc_freeBlocks-1; ix++) {
ast->print("%p: Len[%4d] = " HEX32_FORMAT ",", FreeArray[ix].start, ix, FreeArray[ix].len);
ast->print(INTPTR_FORMAT ": Len[%4d] = " HEX32_FORMAT ",", p2i(FreeArray[ix].start), ix, FreeArray[ix].len);
ast->fill_to(38);
ast->print("Gap[%4d..%4d]: " HEX32_FORMAT " bytes,", ix, ix+1, FreeArray[ix].gap);
ast->fill_to(71);
@ -1414,7 +1425,7 @@ void CodeHeapState::print_freeSpace(outputStream* out, CodeHeap* heap) {
}
STRINGSTREAM_FLUSH_LOCKED("\n")
}
ast->print_cr("%p: Len[%4d] = " HEX32_FORMAT, FreeArray[ix].start, ix, FreeArray[ix].len);
ast->print_cr(INTPTR_FORMAT ": Len[%4d] = " HEX32_FORMAT, p2i(FreeArray[ix].start), ix, FreeArray[ix].len);
STRINGSTREAM_FLUSH_LOCKED("\n\n")
}
@ -2039,10 +2050,16 @@ void CodeHeapState::print_age(outputStream* out, CodeHeap* heap) {
}
#define JDK8200450_REMEDY
#define JDK8200450_TRACE
void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
if (!initialization_complete) {
return;
}
#ifdef JDK8200450_TRACE
out->print_cr("print_names() entered for heap @ " INTPTR_FORMAT, p2i(heap));
out->flush();
#endif
const char* heapName = get_heapName(heap);
get_HeapStatGlobals(out, heapName);
@ -2057,7 +2074,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
CodeBlob* last_blob = NULL;
bool name_in_addr_range = true;
//---< print at least 128K per block >---
//---< print at least 128K per block (i.e. between headers) >---
if (granules_per_line*granule_size < 128*K) {
granules_per_line = (unsigned int)((128*K)/granule_size);
}
@ -2067,7 +2084,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
" Due to the living nature of the code heap and because the CodeCache_lock\n"
" is not continuously held, the displayed name might be wrong or no name\n"
" might be found at all. The likelihood for that to happen increases\n"
" over time passed between analysis and print step.\n");
" over time passed between aggregtion and print steps.\n");
STRINGSTREAM_FLUSH_LOCKED("")
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
@ -2078,23 +2095,69 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
}
name_in_addr_range = false;
size_t end_ix = (ix+granules_per_line <= alloc_granules) ? ix+granules_per_line : alloc_granules;
ast->cr();
ast->print_cr("--------------------------------------------------------------------");
ast->print_cr("Address range [%p,%p), " SIZE_FORMAT "k", low_bound+ix*granule_size, low_bound+(ix+granules_per_line)*granule_size, granules_per_line*granule_size/(size_t)K);
ast->print_cr("Address range [" INTPTR_FORMAT "," INTPTR_FORMAT "), " SIZE_FORMAT "k", p2i(low_bound+ix*granule_size), p2i(low_bound + end_ix*granule_size), (end_ix - ix)*granule_size/(size_t)K);
ast->print_cr("--------------------------------------------------------------------");
STRINGSTREAM_FLUSH_LOCKED("")
}
// Only check granule if it contains at least one blob.
unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
StatArray[ix].stub_count + StatArray[ix].dead_count;
if (nBlobs > 0 ) {
#ifdef JDK8200450_REMEDY
if (nBlobs > 0 )
#endif
{
for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
// heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
bool blob_initialized = (this_blob != NULL) &&
((char*)this_blob + this_blob->header_size() == (char*)(this_blob->relocation_begin())) &&
((char*)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (char*)(this_blob->content_begin()));
#ifndef JDK8200450_REMEDY
bool blob_initialized = (this_blob != NULL)
#else
#ifndef JDK8200450_TRACE
bool blob_initialized = (this_blob != NULL) && (this_blob->header_size() >= 0) && (this_blob->relocation_size() >= 0) &&
((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin()) &&
is_readable_pointer((address)(this_blob->relocation_begin()) &&
is_readable_pointer(this_blob->content_begin());
#else
int hdr_size = 0;
int reloc_size = 0;
address reloc_begin = NULL;
address cntnt_begin = NULL;
if (this_blob != NULL) {
hdr_size = this_blob->header_size();
reloc_size = this_blob->relocation_size();
reloc_begin = (address)(this_blob->relocation_begin());
cntnt_begin = this_blob->content_begin();
}
bool blob_initialized = (this_blob != NULL) && (hdr_size >= 0) && (reloc_size >= 0) &&
((address)this_blob + hdr_size == reloc_begin) &&
((address)this_blob + CodeBlob::align_code_offset(hdr_size + reloc_size) == cntnt_begin) &&
is_readable_pointer(reloc_begin) &&
is_readable_pointer(cntnt_begin);
#endif
#endif
if (blob_initialized && (this_blob != last_blob)) {
last_blob = this_blob;
//---< get type and name >---
blobType cbType = noType;
if (segment_granules) {
cbType = (blobType)StatArray[ix].type;
} else {
cbType = get_cbType(this_blob); // Is this here safe?
}
// this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe invisible on stack
const char* blob_name = this_blob->name();
#ifdef JDK8200450_REMEDY
if (blob_name == NULL) {
blob_name = "<unavailable>";
}
#endif
//---< print table header for new print range >---
if (!name_in_addr_range) {
name_in_addr_range = true;
ast->fill_to(51);
@ -2102,32 +2165,34 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
ast->fill_to(61);
ast->print_cr("%6s", "method");
ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
STRINGSTREAM_FLUSH_LOCKED("")
}
//---< Print blobTypeName as recorded during analysis >---
ast->print("%p", this_blob);
//---< print line prefix (address and offset from CodeHeap start) >---
ast->print(INTPTR_FORMAT, p2i(this_blob));
ast->fill_to(19);
ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
ast->fill_to(33);
//---< print size, name, and signature (for nMethods) >---
// this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe not visible on stack
const char* blob_name = this_blob->name();
if (blob_name == 0) {
blob_name = "<unavailable>";
}
// this_blob->as_nmethod_or_null() is safe. Inlined, maybe not visible on stack.
nmethod* nm = this_blob->as_nmethod_or_null();
blobType cbType = noType;
if (segment_granules) {
cbType = (blobType)StatArray[ix].type;
} else {
cbType = get_cbType(this_blob);
}
if ((nm != NULL) && (nm->method() != NULL)) {
#ifdef JDK8200450_TRACE
STRINGSTREAM_FLUSH_LOCKED("") // Remove before push!!!
#endif
// this_blob->as_nmethod_or_null() is safe. Inlined, maybe invisible on stack.
nmethod* nm = this_blob->as_nmethod_or_null();
Method* method = (nm == NULL) ? NULL : nm->method(); // may be uninitialized, i.e. != NULL, but invalid
#ifdef JDK8200450_REMEDY
if ((nm != NULL) && (method != NULL) && is_readable_pointer(method) && is_readable_pointer(method->constants())) {
#else
if ((nm != NULL) && (method != NULL)) {
#endif
ResourceMark rm;
//---< nMethod size in hex >---
//---< collect all data to locals as quickly as possible >---
unsigned int total_size = nm->total_size();
int hotness = nm->hotness_counter();
bool nm_zombie = nm->is_zombie();
bool get_name = nm->is_in_use() || nm->is_not_entrant();
//---< nMethod size in hex >---
ast->print(PTR32_FORMAT, total_size);
ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
//---< compiler information >---
@ -2135,21 +2200,36 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
//---< method temperature >---
ast->fill_to(62);
ast->print("%5d", nm->hotness_counter());
ast->print("%5d", hotness);
//---< name and signature >---
ast->fill_to(62+6);
ast->print("%s", blobTypeName[cbType]);
ast->fill_to(82+6);
if (nm->is_in_use()) {
blob_name = nm->method()->name_and_sig_as_C_string();
}
if (nm->is_not_entrant()) {
blob_name = nm->method()->name_and_sig_as_C_string();
}
if (nm->is_zombie()) {
if (nm_zombie) {
ast->print("%14s", " zombie method");
}
ast->print("%s", blob_name);
#ifdef JDK8200450_TRACE
STRINGSTREAM_FLUSH_LOCKED("") // Remove before push!!!
#endif
if (get_name) {
#ifdef JDK8200450_REMEDY
Symbol* methName = method->name();
const char* methNameS = (methName == NULL) ? NULL : methName->as_C_string();
methNameS = (methNameS == NULL) ? "<method name unavailable>" : methNameS;
Symbol* methSig = method->signature();
const char* methSigS = (methSig == NULL) ? NULL : methSig->as_C_string();
methSigS = (methSigS == NULL) ? "<method signature unavailable>" : methSigS;
ast->print("%s", methNameS);
ast->print("%s", methSigS);
#else
blob_name = method->name_and_sig_as_C_string();
ast->print("%s", blob_name);
#endif
} else {
ast->print("%s", blob_name);
}
} else {
ast->fill_to(62+6);
ast->print("%s", blobTypeName[cbType]);
@ -2157,12 +2237,48 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
ast->print("%s", blob_name);
}
STRINGSTREAM_FLUSH_LOCKED("\n")
#ifdef JDK8200450_TRACE
if ((nm != NULL) && (method != NULL) && !(is_readable_pointer(method) && is_readable_pointer(method->constants()))) {
ast->print("Potential CodeHeap State Analytics issue found.\n");
if (is_readable_pointer(method)) {
ast->print(" Issue would have been detected by is_readable_pointer(" INTPTR_FORMAT "(method->constants())) check.\n", p2i(method->constants()));
} else {
ast->print(" Issue would have been detected by is_readable_pointer(" INTPTR_FORMAT "(method)) check.\n", p2i(method));
}
STRINGSTREAM_FLUSH_LOCKED("\n")
}
#endif
} else if (!blob_initialized && (this_blob != last_blob) && (this_blob != NULL)) {
last_blob = this_blob;
} else if (!blob_initialized && (this_blob != NULL)) {
last_blob = this_blob;
#ifdef JDK8200450_TRACE
ast->print("Potential CodeHeap State Analytics issue found.\n");
if (nBlobs == 0) {
ast->print(" Issue would have been detected by (nBlobs > 0) check.\n");
} else {
if (!((address)this_blob + hdr_size == reloc_begin)) {
ast->print(" Issue would have been detected by (this(" INTPTR_FORMAT ") + header(%d) == relocation_begin(" INTPTR_FORMAT ")) check.\n", p2i(this_blob), hdr_size, p2i(reloc_begin));
}
if (!((address)this_blob + CodeBlob::align_code_offset(hdr_size + reloc_size) == cntnt_begin)) {
ast->print(" Issue would have been detected by (this(" INTPTR_FORMAT ") + header(%d) + relocation(%d) == content_begin(" INTPTR_FORMAT ")) check.\n", p2i(this_blob), hdr_size, reloc_size, p2i(cntnt_begin));
}
if (hdr_size != this_blob->header_size()) {
ast->print(" header_size meanwhile changed from %d to %d\n", hdr_size, this_blob->header_size());
}
if (reloc_size != this_blob->relocation_size()) {
ast->print(" relocation_size meanwhile changed from %d to %d\n", reloc_size, this_blob->relocation_size());
}
if (reloc_begin != (address)(this_blob->relocation_begin())) {
ast->print(" relocation_begin meanwhile changed from " INTPTR_FORMAT " to " INTPTR_FORMAT "\n", p2i(reloc_begin), p2i(this_blob->relocation_begin()));
}
if (cntnt_begin != this_blob->content_begin()) {
ast->print(" relocation_begin meanwhile changed from " INTPTR_FORMAT " to " INTPTR_FORMAT "\n", p2i(cntnt_begin), p2i(this_blob->content_begin()));
}
}
STRINGSTREAM_FLUSH_LOCKED("\n")
#endif
}
}
}
} // nBlobs > 0
}
STRINGSTREAM_FLUSH_LOCKED("\n\n")
}
@ -2287,7 +2403,7 @@ void CodeHeapState::print_line_delim(outputStream* out, outputStream* ast, char*
ast->cr();
assert(out == ast, "must use the same stream!");
ast->print("%p", low_bound + ix*granule_size);
ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
ast->fill_to(19);
ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
}
@ -2307,7 +2423,7 @@ void CodeHeapState::print_line_delim(outputStream* out, bufferedStream* ast, cha
ast->reset();
}
ast->print("%p", low_bound + ix*granule_size);
ast->print(INTPTR_FORMAT, p2i(low_bound + ix*granule_size));
ast->fill_to(19);
ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
}
@ -2336,3 +2452,17 @@ CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
}
return noType;
}
// Check if pointer can be read from (4-byte read access).
// Helps to prove validity of a not-NULL pointer.
// Returns true in very early stages of VM life when stub is not yet generated.
#define SAFEFETCH_DEFAULT true
bool CodeHeapState::is_readable_pointer(const void* p) {
if (!CanUseSafeFetch32()) {
return SAFEFETCH_DEFAULT;
}
int* const aligned = (int*) align_down((intptr_t)p, 4);
int cafebabe = 0xcafebabe; // tester value 1
int deadbeef = 0xdeadbeef; // tester value 2
return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef);
}

View File

@ -93,6 +93,7 @@ class CodeHeapState : public CHeapObj<mtCode> {
static void print_line_delim(outputStream* out, bufferedStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
static void print_line_delim(outputStream* out, outputStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
static blobType get_cbType(CodeBlob* cb);
static bool is_readable_pointer(const void* p);
public:
static void discard(outputStream* out, CodeHeap* heap);

View File

@ -99,7 +99,7 @@ void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
release_set_exception_cache(new_entry);
}
void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
void CompiledMethod::clean_exception_cache() {
ExceptionCache* prev = NULL;
ExceptionCache* curr = exception_cache();
@ -107,7 +107,7 @@ void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
ExceptionCache* next = curr->next();
Klass* ex_klass = curr->exception_type();
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
if (prev == NULL) {
set_exception_cache(next);
} else {
@ -369,56 +369,42 @@ void CompiledMethod::clear_ic_stubs() {
}
#ifdef ASSERT
class CheckClass : AllStatic {
static BoolObjectClosure* _is_alive;
// Check class_loader is alive for this bit of metadata.
static void check_class(Metadata* md) {
Klass* klass = NULL;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(_is_alive), "must be alive");
}
public:
static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
_is_alive = is_alive;
nm->metadata_do(check_class);
}
};
// This is called during a safepoint so can use static data
BoolObjectClosure* CheckClass::_is_alive = NULL;
// Check class_loader is alive for this bit of metadata.
static void check_class(Metadata* md) {
Klass* klass = NULL;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(), "must be alive");
}
#endif // ASSERT
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->is_loader_alive(is_alive)) {
if (cichk_oop->is_loader_alive()) {
return;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
if (((Klass*)ic_oop)->is_loader_alive()) {
return;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive()) {
return;
}
} else {
@ -453,7 +439,7 @@ unsigned char CompiledMethod::unloading_clock() {
// all strong references alive. Any weak references should have been
// cleared as well. Visit all the metadata and ensure that it's
// really alive.
void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
void CompiledMethod::verify_metadata_loaders(address low_boundary) {
#ifdef ASSERT
RelocIterator iter(this, low_boundary);
while (iter.next()) {
@ -483,7 +469,7 @@ void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClo
}
}
// Check that the metadata embedded in the nmethod is alive
CheckClass::do_check_class(is_alive, this);
metadata_do(check_class);
#endif
}
@ -518,7 +504,7 @@ void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_oc
}
// Exception cache
clean_exception_cache(is_alive);
clean_exception_cache();
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
@ -529,7 +515,7 @@ void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_oc
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive);
clean_ic_if_metadata_is_dead(ic);
}
}
}
@ -545,7 +531,7 @@ void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_oc
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
verify_metadata_loaders(low_boundary);
}
template <class CompiledICorStaticCall>
@ -606,7 +592,7 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
}
// Exception cache
clean_exception_cache(is_alive);
clean_exception_cache();
bool postponed = false;
@ -619,7 +605,7 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
@ -656,7 +642,7 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
verify_metadata_loaders(low_boundary);
return postponed;
}

View File

@ -295,7 +295,7 @@ public:
void release_set_exception_cache(ExceptionCache *ec);
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
void clean_exception_cache();
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
@ -364,10 +364,10 @@ public:
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
CompiledMethod* unloading_next() { return _unloading_next; }
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
void verify_metadata_loaders(address low_boundary);
virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1.

View File

@ -1365,7 +1365,7 @@ void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
}
// During GC the is_alive closure is non-NULL, and is used to
// determine liveness of dependees that need to be updated.
if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
if (is_alive == NULL || klass->is_loader_alive()) {
// The GC defers deletion of this entry, since there might be multiple threads
// iterating over the _dependencies graph. Other call paths are single-threaded
// and may delete it immediately.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -113,7 +113,7 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
public:
PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
@ -141,7 +141,7 @@ class ParPushAndMarkClosure: public MetadataAwareOopClosure {
public:
ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
@ -166,7 +166,7 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
DO_OOP_WORK_DEFN
public:
MarkRefsIntoAndScanClosure(MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
@ -204,7 +204,7 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
public:
ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);

View File

@ -5244,7 +5244,7 @@ void CMSCollector::refProcessingWork() {
CodeCache::do_unloading(&_is_alive_closure, purged_class);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&_is_alive_closure);
Klass::clean_weak_klass_links();
}
{
@ -5825,7 +5825,7 @@ MarkRefsIntoClosure::MarkRefsIntoClosure(
_span(span),
_bitMap(bitMap)
{
assert(ref_processor() == NULL, "deliberately left NULL");
assert(ref_discoverer() == NULL, "deliberately left NULL");
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
@ -5847,7 +5847,7 @@ ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
_span(span),
_bitMap(bitMap)
{
assert(ref_processor() == NULL, "deliberately left NULL");
assert(ref_discoverer() == NULL, "deliberately left NULL");
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
@ -5871,7 +5871,7 @@ MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
_verification_bm(verification_bm),
_cms_bm(cms_bm)
{
assert(ref_processor() == NULL, "deliberately left NULL");
assert(ref_discoverer() == NULL, "deliberately left NULL");
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
@ -5900,7 +5900,7 @@ void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure
//////////////////////////////////////////////////
MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
@ -5911,15 +5911,15 @@ MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack),
_pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
_pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
mark_stack, concurrent_precleaning),
_yield(should_yield),
_concurrent_precleaning(concurrent_precleaning),
_freelistLock(NULL)
{
// FIXME: Should initialize in base class constructor.
assert(rp != NULL, "ref_processor shouldn't be NULL");
set_ref_processor_internal(rp);
assert(rd != NULL, "ref_discoverer shouldn't be NULL");
set_ref_discoverer_internal(rd);
}
// This closure is used to mark refs into the CMS generation at the
@ -6004,18 +6004,18 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
// MarkRefsIntoAndScanClosure
///////////////////////////////////////////////////////////
ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
CMSBitMap* bit_map, OopTaskQueue* work_queue):
_span(span),
_bit_map(bit_map),
_work_queue(work_queue),
_low_water_mark(MIN2((work_queue->max_elems()/4),
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
_parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
{
// FIXME: Should initialize in base class constructor.
assert(rp != NULL, "ref_processor shouldn't be NULL");
set_ref_processor_internal(rp);
assert(rd != NULL, "ref_discoverer shouldn't be NULL");
set_ref_discoverer_internal(rd);
}
// This closure is used to mark refs into the CMS generation at the
@ -6842,12 +6842,12 @@ void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_w
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
bool concurrent_precleaning):
MetadataAwareOopClosure(rp),
MetadataAwareOopClosure(rd),
_collector(collector),
_span(span),
_bit_map(bit_map),
@ -6855,7 +6855,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
_mark_stack(mark_stack),
_concurrent_precleaning(concurrent_precleaning)
{
assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
}
// Grey object rescan during pre-cleaning and second checkpoint phases --
@ -6916,16 +6916,16 @@ void PushAndMarkClosure::do_oop(oop obj) {
ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
OopTaskQueue* work_queue):
MetadataAwareOopClosure(rp),
MetadataAwareOopClosure(rd),
_collector(collector),
_span(span),
_bit_map(bit_map),
_work_queue(work_queue)
{
assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
}
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }

View File

@ -3125,7 +3125,7 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
pss->set_ref_processor(rp);
pss->set_ref_discoverer(rp);
double start_strong_roots_sec = os::elapsedTime();
@ -3457,13 +3457,11 @@ private:
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
class G1KlassCleaningTask : public StackObj {
BoolObjectClosure* _is_alive;
volatile int _clean_klass_tree_claimed;
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
public:
G1KlassCleaningTask(BoolObjectClosure* is_alive) :
_is_alive(is_alive),
G1KlassCleaningTask() :
_clean_klass_tree_claimed(0),
_klass_iterator() {
}
@ -3490,7 +3488,7 @@ class G1KlassCleaningTask : public StackObj {
public:
void clean_klass(InstanceKlass* ik) {
ik->clean_weak_instanceklass_links(_is_alive);
ik->clean_weak_instanceklass_links();
}
void work() {
@ -3498,7 +3496,7 @@ public:
// One worker will clean the subklass/sibling klass tree.
if (claim_clean_klass_tree_task()) {
Klass::clean_subklass_tree(_is_alive);
Klass::clean_subklass_tree();
}
// All workers will help cleaning the classes,
@ -3510,11 +3508,10 @@ public:
};
class G1ResolvedMethodCleaningTask : public StackObj {
BoolObjectClosure* _is_alive;
volatile int _resolved_method_task_claimed;
public:
G1ResolvedMethodCleaningTask(BoolObjectClosure* is_alive) :
_is_alive(is_alive), _resolved_method_task_claimed(0) {}
G1ResolvedMethodCleaningTask() :
_resolved_method_task_claimed(0) {}
bool claim_resolved_method_task() {
if (_resolved_method_task_claimed) {
@ -3526,7 +3523,7 @@ public:
// These aren't big, one thread can do it all.
void work() {
if (claim_resolved_method_task()) {
ResolvedMethodTable::unlink(_is_alive);
ResolvedMethodTable::unlink();
}
}
};
@ -3546,8 +3543,8 @@ public:
AbstractGangTask("Parallel Cleaning"),
_string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
_code_cache_task(num_workers, is_alive, unloading_occurred),
_klass_cleaning_task(is_alive),
_resolved_method_cleaning_task(is_alive) {
_klass_cleaning_task(),
_resolved_method_cleaning_task() {
}
// The parallel work done by all worker threads.
@ -3823,7 +3820,7 @@ public:
G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
pss->set_ref_processor(NULL);
pss->set_ref_discoverer(NULL);
// Keep alive closure.
G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
@ -3915,7 +3912,7 @@ public:
HandleMark hm;
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
pss->set_ref_processor(NULL);
pss->set_ref_discoverer(NULL);
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
// Is alive closure
@ -4021,7 +4018,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
// Use only a single queue for this PSS.
G1ParScanThreadState* pss = per_thread_states->state_for_worker(0);
pss->set_ref_processor(NULL);
pss->set_ref_discoverer(NULL);
assert(pss->queue_is_empty(), "pre-condition");
// Keep alive closure.

View File

@ -1413,14 +1413,13 @@ bool G1CMIsAliveClosure::do_object_b(oop obj) {
class G1CMKeepAliveAndDrainClosure : public OopClosure {
G1ConcurrentMark* _cm;
G1CMTask* _task;
int _ref_counter_limit;
int _ref_counter;
uint _ref_counter_limit;
uint _ref_counter;
bool _is_serial;
public:
G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
_cm(cm), _task(task), _is_serial(is_serial),
_ref_counter_limit(G1RefProcDrainInterval) {
assert(_ref_counter_limit > 0, "sanity");
assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
_ref_counter = _ref_counter_limit;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,7 @@
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
@ -72,10 +73,40 @@ ReferenceProcessor* G1FullCollector::reference_processor() {
return _heap->ref_processor_stw();
}
uint G1FullCollector::calc_active_workers() {
G1CollectedHeap* heap = G1CollectedHeap::heap();
uint max_worker_count = heap->workers()->total_workers();
// Only calculate number of workers if UseDynamicNumberOfGCThreads
// is enabled, otherwise use max.
if (!UseDynamicNumberOfGCThreads) {
return max_worker_count;
}
// Consider G1HeapWastePercent to decide max number of workers. Each worker
// will in average cause half a region waste.
uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
// Also consider HeapSizePerGCThread by calling AdaptiveSizePolicy to calculate
// the number of workers.
uint current_active_workers = heap->workers()->active_workers();
uint adaptive_worker_limit = AdaptiveSizePolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
// Update active workers to the lower of the limits.
uint worker_count = MIN2(heap_waste_worker_limit, adaptive_worker_limit);
log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, adaptive workers: %u)",
worker_count, heap_waste_worker_limit, adaptive_worker_limit);
worker_count = heap->workers()->update_active_workers(worker_count);
log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
return worker_count;
}
G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) :
_heap(heap),
_scope(memory_manager, explicit_gc, clear_soft_refs),
_num_workers(heap->workers()->active_workers()),
_num_workers(calc_active_workers()),
_oop_queue_set(_num_workers),
_array_queue_set(_num_workers),
_preserved_marks_set(true),

View File

@ -56,6 +56,8 @@ class G1FullCollector : StackObj {
G1IsAliveClosure _is_alive;
ReferenceProcessorIsAliveMutator _is_alive_mutator;
static uint calc_active_workers();
public:
G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
~G1FullCollector();

View File

@ -60,7 +60,7 @@ class G1MarkAndPushClosure : public ExtendedOopClosure {
uint _worker_id;
public:
G1MarkAndPushClosure(uint worker, G1FullGCMarker* marker, ReferenceProcessor* ref) :
G1MarkAndPushClosure(uint worker, G1FullGCMarker* marker, ReferenceDiscoverer* ref) :
_marker(marker),
_worker_id(worker),
ExtendedOopClosure(ref) { }

View File

@ -27,6 +27,7 @@
#include "gc/g1/g1MonitoringSupport.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/shared/hSpaceCounters.hpp"
#include "memory/metaspaceCounters.hpp"
G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name,

View File

@ -96,8 +96,8 @@ public:
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
void set_ref_processor(ReferenceProcessor* rp) {
set_ref_processor_internal(rp);
void set_ref_discoverer(ReferenceDiscoverer* rd) {
set_ref_discoverer_internal(rd);
}
};

View File

@ -87,7 +87,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
virtual ~G1ParScanThreadState();
void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
#ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }

View File

@ -74,7 +74,7 @@
"in milliseconds.") \
range(1.0, DBL_MAX) \
\
product(int, G1RefProcDrainInterval, 10, \
product(uint, G1RefProcDrainInterval, 1000, \
"The number of discovered reference objects to process before " \
"draining concurrent marking work queues.") \
range(1, INT_MAX) \

View File

@ -41,6 +41,7 @@
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcWhen.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceCounters.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"

View File

@ -562,7 +562,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
Klass::clean_weak_klass_links();
}
{

View File

@ -2139,7 +2139,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
Klass::clean_weak_klass_links();
}
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -234,7 +234,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
CodeCache::do_unloading(&is_alive, purged_class);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&is_alive);
Klass::clean_weak_klass_links();
}
{

View File

@ -211,7 +211,7 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
_ref_processor = rp;
mark_and_push_closure.set_ref_processor(_ref_processor);
mark_and_push_closure.set_ref_discoverer(_ref_processor);
}
AdjustPointerClosure MarkSweep::adjust_pointer_closure;

View File

@ -184,8 +184,8 @@ public:
virtual void do_cld(ClassLoaderData* cld);
void do_cld_nv(ClassLoaderData* cld);
void set_ref_processor(ReferenceProcessor* rp) {
set_ref_processor_internal(rp);
void set_ref_discoverer(ReferenceDiscoverer* rd) {
set_ref_discoverer_internal(rd);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -355,6 +355,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// For ParNew collections
// For PS scavenge and ParOld collections
// For G1 evacuation pauses (subject to update)
// For G1 Full GCs (subject to update)
// Other collection phases inherit the number of
// GC workers from the calls above. For example,
// a CMS parallel remark uses the same number of GC

View File

@ -49,6 +49,7 @@
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceCounters.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -151,7 +151,7 @@ class FilteringClosure: public ExtendedOopClosure {
template <class T> inline void do_oop_work(T* p);
public:
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
ExtendedOopClosure(cl->ref_discoverer()), _boundary(boundary),
_cl(cl) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_REFERENCEDISCOVERER_HPP
#define SHARE_GC_SHARED_REFERENCEDISCOVERER_HPP
#include "memory/allocation.hpp"
#include "memory/referenceType.hpp"
#include "oops/oopsHierarchy.hpp"
class ReferenceDiscoverer : public CHeapObj<mtGC> {
public:
virtual bool discover_reference(oop obj, ReferenceType type) = 0;
};
#endif // SHARE_GC_SHARED_REFERENCEDISCOVERER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
#include "gc/shared/referenceDiscoverer.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/referenceProcessorStats.hpp"
@ -166,7 +167,7 @@ public:
}
};
class ReferenceProcessor : public CHeapObj<mtGC> {
class ReferenceProcessor : public ReferenceDiscoverer {
private:
size_t total_count(DiscoveredList lists[]) const;
@ -405,7 +406,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
void verify_list(DiscoveredList& ref_list);
// Discover a Reference object, using appropriate discovery criteria
bool discover_reference(oop obj, ReferenceType rt);
virtual bool discover_reference(oop obj, ReferenceType rt);
// Has discovered references that need handling
bool has_discovered_references();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -164,6 +164,13 @@ public:
static size_t refill_waste_limit_increment() { return TLABWasteIncrement; }
template <typename T> void addresses_do(T f) {
f(&_start);
f(&_top);
f(&_pf_top);
f(&_end);
}
// Code generation support
static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); }
static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); }

View File

@ -415,6 +415,34 @@ JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, Basic
}
JRT_END
// Object.notify() fast path, caller does slow path
JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread *thread, oopDesc* obj))
// Very few notify/notifyAll operations find any threads on the waitset, so
// the dominant fast-path is to simply return.
// Relatedly, it's critical that notify/notifyAll be fast in order to
// reduce lock hold times.
if (!SafepointSynchronize::is_synchronizing()) {
if (ObjectSynchronizer::quick_notify(obj, thread, false)) {
return true;
}
}
return false; // caller must perform slow path
JRT_END
// Object.notifyAll() fast path, caller does slow path
JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread *thread, oopDesc* obj))
if (!SafepointSynchronize::is_synchronizing() ) {
if (ObjectSynchronizer::quick_notify(obj, thread, true)) {
return true;
}
}
return false; // caller must perform slow path
JRT_END
JRT_ENTRY(void, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message))
TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK);
SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);

View File

@ -139,6 +139,8 @@ class JVMCIRuntime: public AllStatic {
static address exception_handler_for_pc(JavaThread* thread);
static void monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
static void monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
static jboolean object_notify(JavaThread* thread, oopDesc* obj);
static jboolean object_notifyAll(JavaThread* thread, oopDesc* obj);
static void vm_error(JavaThread* thread, jlong where, jlong format, jlong value);
static oopDesc* load_and_clear_exception(JavaThread* thread);
static void log_printf(JavaThread* thread, oopDesc* format, jlong v1, jlong v2, jlong v3);

View File

@ -625,6 +625,8 @@
declare_function(JVMCIRuntime::exception_handler_for_pc) \
declare_function(JVMCIRuntime::monitorenter) \
declare_function(JVMCIRuntime::monitorexit) \
declare_function(JVMCIRuntime::object_notify) \
declare_function(JVMCIRuntime::object_notifyAll) \
declare_function(JVMCIRuntime::throw_and_post_jvmti_exception) \
declare_function(JVMCIRuntime::throw_klass_external_name_exception) \
declare_function(JVMCIRuntime::throw_class_cast_exception) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"

View File

@ -31,7 +31,7 @@
class CodeBlob;
class nmethod;
class ReferenceProcessor;
class ReferenceDiscoverer;
class DataLayout;
class KlassClosure;
class ClassLoaderData;
@ -60,17 +60,17 @@ extern DoNothingClosure do_nothing_cl;
// pollute the OopClosure interface.
class ExtendedOopClosure : public OopClosure {
private:
ReferenceProcessor* _ref_processor;
ReferenceDiscoverer* _ref_discoverer;
protected:
ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
ExtendedOopClosure() : _ref_processor(NULL) { }
ExtendedOopClosure(ReferenceDiscoverer* rd) : _ref_discoverer(rd) { }
ExtendedOopClosure() : _ref_discoverer(NULL) { }
~ExtendedOopClosure() { }
void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; }
void set_ref_discoverer_internal(ReferenceDiscoverer* rd) { _ref_discoverer = rd; }
public:
ReferenceProcessor* ref_processor() const { return _ref_processor; }
ReferenceDiscoverer* ref_discoverer() const { return _ref_discoverer; }
// Iteration of InstanceRefKlasses differ depending on the closure,
// the below enum describes the different alternatives.
@ -165,7 +165,7 @@ class MetadataAwareOopClosure: public ExtendedOopClosure {
public:
MetadataAwareOopClosure() : ExtendedOopClosure() { }
MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { }
MetadataAwareOopClosure(ReferenceDiscoverer* rd) : ExtendedOopClosure(rd) { }
bool do_metadata_nv() { return true; }
virtual bool do_metadata() { return do_metadata_nv(); }

View File

@ -46,6 +46,7 @@
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceCounters.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"

View File

@ -74,12 +74,12 @@ class CompiledICHolder : public CHeapObj<mtCompiler> {
CompiledICHolder* next() { return _next; }
void set_next(CompiledICHolder* n) { _next = n; }
inline bool is_loader_alive(BoolObjectClosure* is_alive) {
inline bool is_loader_alive() {
Klass* k = _is_metadata_method ? ((Method*)_holder_metadata)->method_holder() : (Klass*)_holder_metadata;
if (!k->is_loader_alive(is_alive)) {
if (!k->is_loader_alive()) {
return false;
}
if (!_holder_klass->is_loader_alive(is_alive)) {
if (!_holder_klass->is_loader_alive()) {
return false;
}
return true;

View File

@ -28,6 +28,7 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/systemDictionary.hpp"
@ -1891,22 +1892,22 @@ bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
}
#endif //PRODUCT
void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive) {
clean_implementors_list(is_alive);
clean_method_data(is_alive);
void InstanceKlass::clean_weak_instanceklass_links() {
clean_implementors_list();
clean_method_data();
// Since GC iterates InstanceKlasses sequentially, it is safe to remove stale entries here.
DependencyContext dep_context(&_dep_context);
dep_context.expunge_stale_entries();
}
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
assert(class_loader_data()->is_alive(), "this klass should be live");
void InstanceKlass::clean_implementors_list() {
assert(is_loader_alive(), "this klass should be live");
if (is_interface()) {
if (ClassUnloading) {
Klass* impl = implementor();
if (impl != NULL) {
if (!impl->is_loader_alive(is_alive)) {
if (!impl->is_loader_alive()) {
// remove this guy
Klass** klass = adr_implementor();
assert(klass != NULL, "null klass");
@ -1919,11 +1920,11 @@ void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
}
}
void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
void InstanceKlass::clean_method_data() {
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
if (mdo != NULL) {
mdo->clean_method_data(is_alive);
mdo->clean_method_data(/*always_clean*/false);
}
}
}

View File

@ -1148,9 +1148,9 @@ public:
void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
#endif // INCLUDE_JVMTI
void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
void clean_weak_instanceklass_links();
void clean_implementors_list();
void clean_method_data();
// Explicit metaspace deallocation of fields
// For RedefineClasses and class file parsing errors, we need to deallocate

View File

@ -63,14 +63,14 @@ void InstanceRefKlass::do_discovered(oop obj, OopClosureType* closure, Contains&
template <typename T, class OopClosureType>
bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
ReferenceProcessor* rp = closure->ref_processor();
if (rp != NULL) {
ReferenceDiscoverer* rd = closure->ref_discoverer();
if (rd != NULL) {
T referent_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::referent_addr_raw(obj));
if (!CompressedOops::is_null(referent_oop)) {
oop referent = CompressedOops::decode_not_null(referent_oop);
if (!referent->is_gc_marked()) {
// Only try to discover if not yet marked.
return rp->discover_reference(obj, type);
return rd->discover_reference(obj, type);
}
}
}

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
@ -381,22 +382,7 @@ void Klass::append_to_sibling_list() {
debug_only(verify();)
}
bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
#ifdef ASSERT
// The class is alive iff the class loader is alive.
oop loader = class_loader();
bool loader_alive = (loader == NULL) || is_alive->do_object_b(loader);
#endif // ASSERT
// The class is alive if it's mirror is alive (which should be marked if the
// loader is alive) unless it's an anoymous class.
bool mirror_alive = is_alive->do_object_b(java_mirror());
assert(!mirror_alive || loader_alive, "loader must be alive if the mirror is"
" but not the other way around with anonymous classes");
return mirror_alive;
}
void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
void Klass::clean_weak_klass_links(bool clean_alive_klasses) {
if (!ClassUnloading) {
return;
}
@ -408,11 +394,11 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
while (!stack.is_empty()) {
Klass* current = stack.pop();
assert(current->is_loader_alive(is_alive), "just checking, this should be live");
assert(current->is_loader_alive(), "just checking, this should be live");
// Find and set the first alive subklass
Klass* sub = current->subklass();
while (sub != NULL && !sub->is_loader_alive(is_alive)) {
while (sub != NULL && !sub->is_loader_alive()) {
#ifndef PRODUCT
if (log_is_enabled(Trace, class, unload)) {
ResourceMark rm;
@ -428,7 +414,7 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
// Find and set the first alive sibling
Klass* sibling = current->next_sibling();
while (sibling != NULL && !sibling->is_loader_alive(is_alive)) {
while (sibling != NULL && !sibling->is_loader_alive()) {
if (log_is_enabled(Trace, class, unload)) {
ResourceMark rm;
log_trace(class, unload)("[Unlinking class (sibling) %s]", sibling->external_name());
@ -443,12 +429,12 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
// Clean the implementors list and method data.
if (clean_alive_klasses && current->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(current);
ik->clean_weak_instanceklass_links(is_alive);
ik->clean_weak_instanceklass_links();
// JVMTI RedefineClasses creates previous versions that are not in
// the class hierarchy, so process them here.
while ((ik = ik->previous_versions()) != NULL) {
ik->clean_weak_instanceklass_links(is_alive);
ik->clean_weak_instanceklass_links();
}
}
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_OOPS_KLASS_HPP
#define SHARE_VM_OOPS_KLASS_HPP
#include "classfile/classLoaderData.hpp"
#include "gc/shared/specialized_oop_closures.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
@ -52,7 +53,6 @@
// Forward declarations.
template <class T> class Array;
template <class T> class GrowableArray;
class ClassLoaderData;
class fieldDescriptor;
class KlassSizeStats;
class klassVtable;
@ -634,13 +634,12 @@ protected:
virtual MetaspaceObj::Type type() const { return ClassType; }
// Iff the class loader (or mirror for anonymous classes) is alive the
// Klass is considered alive.
// The is_alive closure passed in depends on the Garbage Collector used.
bool is_loader_alive(BoolObjectClosure* is_alive);
// Klass is considered alive. Has already been marked as unloading.
bool is_loader_alive() const { return !class_loader_data()->is_unloading(); }
static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
static void clean_subklass_tree(BoolObjectClosure* is_alive) {
clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
static void clean_weak_klass_links(bool clean_alive_klasses = true);
static void clean_subklass_tree() {
clean_weak_klass_links(false /* clean_alive_klasses */);
}
// GC specific object visitors

View File

@ -71,9 +71,9 @@ void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
}
}
void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
void DataLayout::clean_weak_klass_links(bool always_clean) {
ResourceMark m;
data_in()->clean_weak_klass_links(cl);
data_in()->clean_weak_klass_links(always_clean);
}
@ -315,23 +315,20 @@ void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* md
}
}
bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
Klass* k = (Klass*)klass_part(p);
return k != NULL && k->is_loader_alive(is_alive_cl);
}
void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
for (int i = 0; i < _number_of_entries; i++) {
intptr_t p = type(i);
if (!is_loader_alive(is_alive_cl, p)) {
Klass* k = (Klass*)klass_part(p);
if (k != NULL && (always_clean || !k->is_loader_alive())) {
set_type(i, with_status((Klass*)NULL, p));
}
}
}
void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
intptr_t p = type();
if (!is_loader_alive(is_alive_cl, p)) {
Klass* k = (Klass*)klass_part(p);
if (k != NULL && (always_clean || !k->is_loader_alive())) {
set_type(with_status((Klass*)NULL, p));
}
}
@ -408,21 +405,21 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con
// that the check is reached, and a series of (Klass*, count) pairs
// which are used to store a type profile for the receiver of the check.
void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
for (uint row = 0; row < row_limit(); row++) {
Klass* p = receiver(row);
if (p != NULL && !p->is_loader_alive(is_alive_cl)) {
if (p != NULL && (always_clean || !p->is_loader_alive())) {
clear_row(row);
}
}
}
#if INCLUDE_JVMCI
void VirtualCallData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
ReceiverTypeData::clean_weak_klass_links(is_alive_cl);
void VirtualCallData::clean_weak_klass_links(bool always_clean) {
ReceiverTypeData::clean_weak_klass_links(always_clean);
for (uint row = 0; row < method_row_limit(); row++) {
Method* p = method(row);
if (p != NULL && !p->method_holder()->is_loader_alive(is_alive_cl)) {
if (p != NULL && (always_clean || !p->method_holder()->is_loader_alive())) {
clear_method_row(row);
}
}
@ -1669,12 +1666,11 @@ public:
// Check for entries that reference an unloaded method
class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
private:
BoolObjectClosure* _is_alive;
bool _always_clean;
public:
CleanExtraDataKlassClosure(BoolObjectClosure* is_alive) : _is_alive(is_alive) {}
CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
bool is_live(Method* m) {
return m->method_holder()->is_loader_alive(_is_alive);
return !(_always_clean) && m->method_holder()->is_loader_alive();
}
};
@ -1757,19 +1753,19 @@ void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
#endif
}
void MethodData::clean_method_data(BoolObjectClosure* is_alive) {
void MethodData::clean_method_data(bool always_clean) {
ResourceMark rm;
for (ProfileData* data = first_data();
is_valid(data);
data = next_data(data)) {
data->clean_weak_klass_links(is_alive);
data->clean_weak_klass_links(always_clean);
}
ParametersTypeData* parameters = parameters_type_data();
if (parameters != NULL) {
parameters->clean_weak_klass_links(is_alive);
parameters->clean_weak_klass_links(always_clean);
}
CleanExtraDataKlassClosure cl(is_alive);
CleanExtraDataKlassClosure cl(always_clean);
clean_extra_data(&cl);
verify_extra_data_clean(&cl);
}

View File

@ -254,7 +254,7 @@ public:
ProfileData* data_in();
// GC support
void clean_weak_klass_links(BoolObjectClosure* cl);
void clean_weak_klass_links(bool always_clean);
// Redefinition support
void clean_weak_method_links();
@ -505,7 +505,7 @@ public:
virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
virtual void clean_weak_klass_links(bool always_clean) {}
// Redefinition support
virtual void clean_weak_method_links() {}
@ -820,9 +820,6 @@ public:
static void print_klass(outputStream* st, intptr_t k);
// GC support
static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
protected:
// ProfileData object these entries are part of
ProfileData* _pd;
@ -930,7 +927,7 @@ public:
}
// GC support
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
void clean_weak_klass_links(bool always_clean);
void print_data_on(outputStream* st) const;
};
@ -973,7 +970,7 @@ public:
}
// GC support
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
void clean_weak_klass_links(bool always_clean);
void print_data_on(outputStream* st) const;
};
@ -1157,12 +1154,12 @@ public:
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
virtual void clean_weak_klass_links(bool always_clean) {
if (has_arguments()) {
_args.clean_weak_klass_links(is_alive_closure);
_args.clean_weak_klass_links(always_clean);
}
if (has_return()) {
_ret.clean_weak_klass_links(is_alive_closure);
_ret.clean_weak_klass_links(always_clean);
}
}
@ -1303,7 +1300,7 @@ public:
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
virtual void clean_weak_klass_links(bool always_clean);
#ifdef CC_INTERP
static int receiver_type_data_size_in_bytes() {
@ -1433,7 +1430,7 @@ public:
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
virtual void clean_weak_klass_links(bool always_clean);
// Redefinition support
virtual void clean_weak_method_links();
@ -1562,13 +1559,13 @@ public:
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
virtual void clean_weak_klass_links(bool always_clean) {
ReceiverTypeData::clean_weak_klass_links(always_clean);
if (has_arguments()) {
_args.clean_weak_klass_links(is_alive_closure);
_args.clean_weak_klass_links(always_clean);
}
if (has_return()) {
_ret.clean_weak_klass_links(is_alive_closure);
_ret.clean_weak_klass_links(always_clean);
}
}
@ -2021,8 +2018,8 @@ public:
_parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
}
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
_parameters.clean_weak_klass_links(is_alive_closure);
virtual void clean_weak_klass_links(bool always_clean) {
_parameters.clean_weak_klass_links(always_clean);
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
@ -2610,7 +2607,7 @@ public:
static bool profile_parameters();
static bool profile_return_jsr292_only();
void clean_method_data(BoolObjectClosure* is_alive);
void clean_method_data(bool always_clean);
void clean_weak_method_links();
DEBUG_ONLY(void verify_clean_weak_method_links();)
Mutex* extra_data_lock() { return &_extra_data_lock; }

View File

@ -46,7 +46,7 @@ public:
inline oop resolve() const;
// Used only for removing handle.
oop* ptr_raw() { return _obj; }
oop* ptr_raw() const { return _obj; }
};
#endif // SHARE_VM_OOPS_OOPHANDLE_HPP

View File

@ -51,8 +51,8 @@ template <WeakHandleType T>
void WeakHandle<T>::release() const {
// Only release if the pointer to the object has been created.
if (_obj != NULL) {
// Clear the WeakHandle. For class loader data race, the handle may not have
// been previously cleared by GC.
// Clear the WeakHandle. For race in creating ClassLoaderData, we can release this
// WeakHandle before it is cleared by GC.
RootAccess<ON_PHANTOM_OOP_REF>::oop_store(_obj, (oop)NULL);
get_storage()->release(_obj);
}

View File

@ -63,4 +63,6 @@ class WeakHandle {
void print_on(outputStream* st) const;
};
typedef WeakHandle<vm_class_loader_data> ClassLoaderWeakHandle;
#endif // SHARE_VM_OOPS_WEAKHANDLE_HPP

View File

@ -2768,15 +2768,14 @@ JNIEXPORT int jio_printf(const char *fmt, ...) {
return len;
}
// HotSpot specific jio method
void jio_print(const char* s) {
void jio_print(const char* s, size_t len) {
// Try to make this function as atomic as possible.
if (Arguments::vfprintf_hook() != NULL) {
jio_fprintf(defaultStream::output_stream(), "%s", s);
jio_fprintf(defaultStream::output_stream(), "%.*s", (int)len, s);
} else {
// Make an unused local variable to avoid warning from gcc 4.x compiler.
size_t count = ::write(defaultStream::output_fd(), s, (int)strlen(s));
size_t count = ::write(defaultStream::output_fd(), s, (int)len);
}
}

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"

View File

@ -30,6 +30,7 @@
#include "oops/oop.inline.hpp"
#include "oops/method.hpp"
#include "oops/symbol.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
@ -39,7 +40,7 @@
oop ResolvedMethodEntry::object() {
return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(literal_addr());
return literal().resolve();
}
oop ResolvedMethodEntry::object_no_keepalive() {
@ -48,11 +49,11 @@ oop ResolvedMethodEntry::object_no_keepalive() {
// not leak out past a thread transition where a safepoint can happen.
// A subsequent oop_load without AS_NO_KEEPALIVE (the object() accessor)
// keeps the oop alive before doing so.
return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(literal_addr());
return literal().peek();
}
ResolvedMethodTable::ResolvedMethodTable()
: Hashtable<oop, mtClass>(_table_size, sizeof(ResolvedMethodEntry)) { }
: Hashtable<ClassLoaderWeakHandle, mtClass>(_table_size, sizeof(ResolvedMethodEntry)) { }
oop ResolvedMethodTable::lookup(int index, unsigned int hash, Method* method) {
for (ResolvedMethodEntry* p = bucket(index); p != NULL; p = p->next()) {
@ -62,7 +63,7 @@ oop ResolvedMethodTable::lookup(int index, unsigned int hash, Method* method) {
oop target = p->object_no_keepalive();
// The method is in the table as a target already
if (java_lang_invoke_ResolvedMethodName::vmtarget(target) == method) {
if (target != NULL && java_lang_invoke_ResolvedMethodName::vmtarget(target) == method) {
ResourceMark rm;
log_debug(membername, table) ("ResolvedMethod entry found for %s index %d",
method->name_and_sig_as_C_string(), index);
@ -88,7 +89,7 @@ oop ResolvedMethodTable::lookup(Method* method) {
return lookup(index, hash, method);
}
oop ResolvedMethodTable::basic_add(Method* method, oop rmethod_name) {
oop ResolvedMethodTable::basic_add(Method* method, Handle rmethod_name) {
assert_locked_or_safepoint(ResolvedMethodTable_lock);
unsigned int hash = compute_hash(method);
@ -100,12 +101,13 @@ oop ResolvedMethodTable::basic_add(Method* method, oop rmethod_name) {
return entry;
}
ResolvedMethodEntry* p = (ResolvedMethodEntry*) Hashtable<oop, mtClass>::new_entry(hash, rmethod_name);
Hashtable<oop, mtClass>::add_entry(index, p);
ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(rmethod_name);
ResolvedMethodEntry* p = (ResolvedMethodEntry*) Hashtable<ClassLoaderWeakHandle, mtClass>::new_entry(hash, w);
Hashtable<ClassLoaderWeakHandle, mtClass>::add_entry(index, p);
ResourceMark rm;
log_debug(membername, table) ("ResolvedMethod entry added for %s index %d",
method->name_and_sig_as_C_string(), index);
return rmethod_name;
return rmethod_name();
}
ResolvedMethodTable* ResolvedMethodTable::_the_table = NULL;
@ -134,7 +136,7 @@ oop ResolvedMethodTable::add_method(Handle resolved_method_name) {
// have any membernames in the table.
method->method_holder()->set_has_resolved_methods();
return _the_table->basic_add(method, resolved_method_name());
return _the_table->basic_add(method, resolved_method_name);
}
// Removing entries
@ -143,7 +145,7 @@ int ResolvedMethodTable::_oops_counted = 0;
// Serially invoke removed unused oops from the table.
// This is done late during GC.
void ResolvedMethodTable::unlink(BoolObjectClosure* is_alive) {
void ResolvedMethodTable::unlink() {
_oops_removed = 0;
_oops_counted = 0;
for (int i = 0; i < _the_table->table_size(); ++i) {
@ -151,38 +153,27 @@ void ResolvedMethodTable::unlink(BoolObjectClosure* is_alive) {
ResolvedMethodEntry* entry = _the_table->bucket(i);
while (entry != NULL) {
_oops_counted++;
if (is_alive->do_object_b(entry->object_no_keepalive())) {
oop l = entry->object_no_keepalive();
if (l != NULL) {
p = entry->next_addr();
} else {
// Entry has been removed.
_oops_removed++;
if (log_is_enabled(Debug, membername, table)) {
Method* m = (Method*)java_lang_invoke_ResolvedMethodName::vmtarget(entry->object_no_keepalive());
ResourceMark rm;
log_debug(membername, table) ("ResolvedMethod entry removed for %s index %d",
m->name_and_sig_as_C_string(), i);
log_debug(membername, table) ("ResolvedMethod entry removed for index %d", i);
}
entry->literal().release();
*p = entry->next();
_the_table->free_entry(entry);
}
// get next entry
entry = (ResolvedMethodEntry*)HashtableEntry<oop, mtClass>::make_ptr(*p);
entry = (ResolvedMethodEntry*)HashtableEntry<ClassLoaderWeakHandle, mtClass>::make_ptr(*p);
}
}
log_debug(membername, table) ("ResolvedMethod entries counted %d removed %d",
_oops_counted, _oops_removed);
}
// Serially invoke "f->do_oop" on the locations of all oops in the table.
void ResolvedMethodTable::oops_do(OopClosure* f) {
for (int i = 0; i < _the_table->table_size(); ++i) {
ResolvedMethodEntry* entry = _the_table->bucket(i);
while (entry != NULL) {
f->do_oop(entry->literal_addr());
entry = entry->next();
}
}
}
#ifndef PRODUCT
void ResolvedMethodTable::print() {
for (int i = 0; i < table_size(); ++i) {
@ -190,9 +181,11 @@ void ResolvedMethodTable::print() {
while (entry != NULL) {
tty->print("%d : ", i);
oop rmethod_name = entry->object_no_keepalive();
rmethod_name->print();
Method* m = (Method*)java_lang_invoke_ResolvedMethodName::vmtarget(rmethod_name);
m->print();
if (rmethod_name != NULL) {
rmethod_name->print();
Method* m = (Method*)java_lang_invoke_ResolvedMethodName::vmtarget(rmethod_name);
m->print();
}
entry = entry->next();
}
}
@ -205,9 +198,15 @@ void ResolvedMethodTable::adjust_method_entries(bool * trace_name_printed) {
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
// For each entry in RMT, change to new method
for (int i = 0; i < _the_table->table_size(); ++i) {
ResolvedMethodEntry* entry = _the_table->bucket(i);
while (entry != NULL) {
for (ResolvedMethodEntry* entry = _the_table->bucket(i);
entry != NULL;
entry = entry->next()) {
oop mem_name = entry->object_no_keepalive();
// except ones removed
if (mem_name == NULL) {
continue;
}
Method* old_method = (Method*)java_lang_invoke_ResolvedMethodName::vmtarget(mem_name);
if (old_method->is_old()) {
@ -235,7 +234,6 @@ void ResolvedMethodTable::adjust_method_entries(bool * trace_name_printed) {
("ResolvedMethod method update: %s(%s)",
new_method->name()->as_C_string(), new_method->signature()->as_C_string());
}
entry = entry->next();
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,22 +26,23 @@
#define SHARE_VM_PRIMS_RESOLVEDMETHOD_HPP
#include "oops/symbol.hpp"
#include "oops/weakHandle.hpp"
#include "utilities/hashtable.hpp"
// Hashtable to record Method* used in ResolvedMethods, via. ResolvedMethod oops.
// This is needed for redefinition to replace Method* with redefined versions.
// Entry in a ResolvedMethodTable, mapping a single oop of java_lang_invoke_ResolvedMethodName which
// holds JVM Method* in vmtarget.
// Entry in a ResolvedMethodTable, mapping a ClassLoaderWeakHandle for a single oop of
// java_lang_invoke_ResolvedMethodName which holds JVM Method* in vmtarget.
class ResolvedMethodEntry : public HashtableEntry<oop, mtClass> {
class ResolvedMethodEntry : public HashtableEntry<ClassLoaderWeakHandle, mtClass> {
public:
ResolvedMethodEntry* next() const {
return (ResolvedMethodEntry*)HashtableEntry<oop, mtClass>::next();
return (ResolvedMethodEntry*)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next();
}
ResolvedMethodEntry** next_addr() {
return (ResolvedMethodEntry**)HashtableEntry<oop, mtClass>::next_addr();
return (ResolvedMethodEntry**)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next_addr();
}
oop object();
@ -50,7 +51,7 @@ class ResolvedMethodEntry : public HashtableEntry<oop, mtClass> {
void print_on(outputStream* st) const;
};
class ResolvedMethodTable : public Hashtable<oop, mtClass> {
class ResolvedMethodTable : public Hashtable<ClassLoaderWeakHandle, mtClass> {
enum Constants {
_table_size = 1007
};
@ -61,11 +62,11 @@ class ResolvedMethodTable : public Hashtable<oop, mtClass> {
static ResolvedMethodTable* _the_table;
private:
ResolvedMethodEntry* bucket(int i) {
return (ResolvedMethodEntry*) Hashtable<oop, mtClass>::bucket(i);
return (ResolvedMethodEntry*) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket(i);
}
ResolvedMethodEntry** bucket_addr(int i) {
return (ResolvedMethodEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
return (ResolvedMethodEntry**) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket_addr(i);
}
unsigned int compute_hash(Method* method);
@ -75,7 +76,7 @@ private:
oop lookup(Method* method);
// must be done under ResolvedMethodTable_lock
oop basic_add(Method* method, oop rmethod_name);
oop basic_add(Method* method, Handle rmethod_name);
public:
ResolvedMethodTable();
@ -95,8 +96,7 @@ public:
#endif // INCLUDE_JVMTI
// Cleanup cleared entries
static void unlink(BoolObjectClosure* is_alive);
static void oops_do(OopClosure* f);
static void unlink();
#ifndef PRODUCT
void print();

View File

@ -933,8 +933,6 @@ WB_ENTRY(jint, WB_MatchesMethod(JNIEnv* env, jobject o, jobject method, jstring
return result;
WB_END
static AlwaysFalseClosure always_false;
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION(env);
@ -951,7 +949,7 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
mdo->set_arg_modified(i, 0);
}
MutexLockerEx mu(mdo->extra_data_lock());
mdo->clean_method_data(&always_false);
mdo->clean_method_data(/*always_clean*/true);
}
mh->clear_not_c1_compilable();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -106,7 +106,8 @@ class Monitor : public CHeapObj<mtInternal> {
access = event + 1,
special = access + 2,
suspend_resume = special + 1,
leaf = suspend_resume + 2,
vmweak = suspend_resume + 2,
leaf = vmweak + 2,
safepoint = leaf + 10,
barrier = safepoint + 1,
nonleaf = barrier + 1,

View File

@ -182,6 +182,9 @@ void mutex_init() {
def(CGC_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // coordinate between fore- and background GC
def(STS_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
def(VMWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never);
def(VMWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never);
if (UseConcMarkSweepGC || UseG1GC) {
def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
}
@ -262,8 +265,6 @@ void mutex_init() {
def(JNIGlobalActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNIWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(VMWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(VMWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions
def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);

View File

@ -1495,7 +1495,6 @@ void WatcherThread::print_on(outputStream* st) const {
jlong* JavaThread::_jvmci_old_thread_counters;
bool jvmci_counters_include(JavaThread* thread) {
oop threadObj = thread->threadObj();
return !JVMCICountersExcludeCompiler || !thread->is_Compiler_thread();
}

View File

@ -543,7 +543,7 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
/*******************/ \
/* ClassLoaderData */ \
/*******************/ \
nonstatic_field(ClassLoaderData, _class_loader, oop) \
nonstatic_field(ClassLoaderData, _class_loader, OopHandle) \
nonstatic_field(ClassLoaderData, _next, ClassLoaderData*) \
volatile_nonstatic_field(ClassLoaderData, _klasses, Klass*) \
nonstatic_field(ClassLoaderData, _is_anonymous, bool) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@
len = name->utf8_length(); \
} \
HOTSPOT_CLASS_##type( /* type = unloaded, loaded */ \
data, len, (void*)(clss)->class_loader(), (shared)); \
data, len, (void*)(clss)->class_loader_data(), (shared)); \
}
#else // ndef DTRACE_ENABLED

View File

@ -931,7 +931,7 @@ void CodeCacheDCmd::execute(DCmdSource source, TRAPS) {
//---< BEGIN >--- CodeHeap State Analytics.
CodeHeapAnalyticsDCmd::CodeHeapAnalyticsDCmd(outputStream* output, bool heap) :
DCmdWithParser(output, heap),
_function("function", "Function to be performed (aggregate, UsedSpace, FreeSpace, MethodCount, MethodSpace, MethodAge, discard", "STRING", false, "all"),
_function("function", "Function to be performed (aggregate, UsedSpace, FreeSpace, MethodCount, MethodSpace, MethodAge, MethodNames, discard", "STRING", false, "all"),
_granularity("granularity", "Detail level - smaller value -> more detail", "STRING", false, "4096") {
_dcmdparser.add_dcmd_argument(&_function);
_dcmdparser.add_dcmd_argument(&_granularity);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "trace/traceStream.hpp"
#if INCLUDE_TRACE
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,7 @@ class ArenaBitMapAllocator : StackObj {
};
template <class Allocator>
BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old_map, idx_t old_size_in_bits, idx_t new_size_in_bits) {
BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old_map, idx_t old_size_in_bits, idx_t new_size_in_bits, bool clear) {
size_t old_size_in_words = calc_size_in_words(old_size_in_bits);
size_t new_size_in_words = calc_size_in_words(new_size_in_bits);
@ -86,7 +86,7 @@ BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old
MIN2(old_size_in_words, new_size_in_words));
}
if (new_size_in_words > old_size_in_words) {
if (clear && new_size_in_words > old_size_in_words) {
clear_range_of_words(map, old_size_in_words, new_size_in_words);
}
}
@ -99,9 +99,9 @@ BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old
}
template <class Allocator>
bm_word_t* BitMap::allocate(const Allocator& allocator, idx_t size_in_bits) {
bm_word_t* BitMap::allocate(const Allocator& allocator, idx_t size_in_bits, bool clear) {
// Reuse reallocate to ensure that the new memory is cleared.
return reallocate(allocator, NULL, 0, size_in_bits);
return reallocate(allocator, NULL, 0, size_in_bits, clear);
}
template <class Allocator>
@ -153,8 +153,8 @@ ArenaBitMap::ArenaBitMap(Arena* arena, idx_t size_in_bits)
: BitMap(allocate(ArenaBitMapAllocator(arena), size_in_bits), size_in_bits) {
}
CHeapBitMap::CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags)
: BitMap(allocate(CHeapBitMapAllocator(flags), size_in_bits), size_in_bits), _flags(flags) {
CHeapBitMap::CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags, bool clear)
: BitMap(allocate(CHeapBitMapAllocator(flags), size_in_bits, clear), size_in_bits), _flags(flags) {
}
CHeapBitMap::~CHeapBitMap() {

View File

@ -123,11 +123,11 @@ class BitMap {
// Allocates and clears the bitmap memory.
template <class Allocator>
static bm_word_t* allocate(const Allocator&, idx_t size_in_bits);
static bm_word_t* allocate(const Allocator&, idx_t size_in_bits, bool clear = true);
// Reallocates and clears the new bitmap memory.
template <class Allocator>
static bm_word_t* reallocate(const Allocator&, bm_word_t* map, idx_t old_size_in_bits, idx_t new_size_in_bits);
static bm_word_t* reallocate(const Allocator&, bm_word_t* map, idx_t old_size_in_bits, idx_t new_size_in_bits, bool clear = true);
// Free the bitmap memory.
template <class Allocator>
@ -359,7 +359,7 @@ class CHeapBitMap : public BitMap {
public:
CHeapBitMap(MEMFLAGS flags = mtInternal) : BitMap(NULL, 0), _flags(flags) {}
// Clears the bitmap memory.
CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags = mtInternal);
CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags = mtInternal, bool clear = true);
~CHeapBitMap();
// Resize the backing bitmap memory.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,7 @@
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/hashtable.hpp"
@ -148,7 +149,6 @@ template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(Rehashabl
}
// give the new table the free list as well
new_table->copy_freelist(this);
assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?");
// Destroy memory used by the buckets in the hashtable. The memory
// for the elements has been used in a new table and is not
@ -263,6 +263,10 @@ static int literal_size(oop obj) {
}
}
static int literal_size(ClassLoaderWeakHandle v) {
return literal_size(v.peek());
}
template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@ -382,6 +386,13 @@ template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end)
}
#ifndef PRODUCT
template <class T> void print_literal(T l) {
l->print();
}
static void print_literal(ClassLoaderWeakHandle l) {
l.print();
}
template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
ResourceMark rm;
@ -390,7 +401,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
HashtableEntry<T, F>* entry = bucket(i);
while(entry != NULL) {
tty->print("%d : ", i);
entry->literal()->print();
print_literal(entry->literal());
tty->cr();
entry = entry->next();
}
@ -443,21 +454,19 @@ template class BasicHashtable<mtGC>;
#endif
template class Hashtable<ConstantPool*, mtClass>;
template class RehashableHashtable<Symbol*, mtSymbol>;
template class RehashableHashtable<oopDesc*, mtSymbol>;
template class RehashableHashtable<oop, mtSymbol>;
template class Hashtable<Symbol*, mtSymbol>;
template class Hashtable<Klass*, mtClass>;
template class Hashtable<InstanceKlass*, mtClass>;
template class Hashtable<oop, mtClass>;
template class Hashtable<ClassLoaderWeakHandle, mtClass>;
template class Hashtable<Symbol*, mtModule>;
#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
template class Hashtable<oop, mtSymbol>;
template class RehashableHashtable<oop, mtSymbol>;
#endif // SOLARIS || CHECK_UNHANDLED_OOPS
template class Hashtable<oopDesc*, mtSymbol>;
template class Hashtable<ClassLoaderWeakHandle, mtSymbol>;
template class Hashtable<Symbol*, mtClass>;
template class HashtableEntry<Symbol*, mtSymbol>;
template class HashtableEntry<Symbol*, mtClass>;
template class HashtableEntry<oop, mtSymbol>;
template class HashtableEntry<ClassLoaderWeakHandle, mtSymbol>;
template class HashtableBucket<mtClass>;
template class BasicHashtableEntry<mtSymbol>;
template class BasicHashtableEntry<mtCode>;

View File

@ -36,7 +36,9 @@
#include "utilities/vmError.hpp"
#include "utilities/xmlstream.hpp"
extern "C" void jio_print(const char* s); // Declarationtion of jvm method
// Declarations of jvm methods
extern "C" void jio_print(const char* s, size_t len);
extern "C" int jio_printf(const char *fmt, ...);
outputStream::outputStream(int width) {
_width = width;
@ -612,19 +614,15 @@ fileStream* defaultStream::open_file(const char* log_name) {
// Try again to open the file in the temp directory.
delete file;
char warnbuf[O_BUFLEN*2];
jio_snprintf(warnbuf, sizeof(warnbuf), "Warning: Cannot open log file: %s\n", log_name);
// Note: This feature is for maintainer use only. No need for L10N.
jio_print(warnbuf);
// Note: This feature is for maintainer use only. No need for L10N.
jio_printf("Warning: Cannot open log file: %s\n", log_name);
try_name = make_log_name(log_name, os::get_temp_directory());
if (try_name == NULL) {
warning("Cannot open file %s: file name is too long for directory %s.\n", log_name, os::get_temp_directory());
return NULL;
}
jio_snprintf(warnbuf, sizeof(warnbuf),
"Warning: Forcing option -XX:LogFile=%s\n", try_name);
jio_print(warnbuf);
jio_printf("Warning: Forcing option -XX:LogFile=%s\n", try_name);
file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
FREE_C_HEAP_ARRAY(char, try_name);
@ -824,20 +822,6 @@ void defaultStream::release(intx holder) {
tty_lock->unlock();
}
// Yuck: jio_print does not accept char*/len.
static void call_jio_print(const char* s, size_t len) {
char buffer[O_BUFLEN+100];
if (len > sizeof(buffer)-1) {
warning("increase O_BUFLEN in ostream.cpp -- output truncated");
len = sizeof(buffer)-1;
}
strncpy(buffer, s, len);
buffer[len] = '\0';
jio_print(buffer);
}
void defaultStream::write(const char* s, size_t len) {
intx thread_id = os::current_thread_id();
intx holder = hold(thread_id);
@ -845,11 +829,7 @@ void defaultStream::write(const char* s, size_t len) {
if (DisplayVMOutput &&
(_outer_xmlStream == NULL || !_outer_xmlStream->inside_attrs())) {
// print to output stream. It can be redirected by a vfprintf hook
if (s[len] == '\0') {
jio_print(s);
} else {
call_jio_print(s, len);
}
jio_print(s, len);
}
// print to log file

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,6 +159,17 @@ public final class Optional<T> {
return value != null;
}
/**
* If a value is not present, returns {@code true}, otherwise
* {@code false}.
*
* @return {@code true} if a value is not present, otherwise {@code false}
* @since 11
*/
public boolean isEmpty() {
return value == null;
}
/**
* If a value is present, performs the given action with the value,
* otherwise does nothing.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,6 +139,17 @@ public final class OptionalDouble {
return isPresent;
}
/**
* If a value is not present, returns {@code true}, otherwise
* {@code false}.
*
* @return {@code true} if a value is not present, otherwise {@code false}
* @since 11
*/
public boolean isEmpty() {
return !isPresent;
}
/**
* If a value is present, performs the given action with the value,
* otherwise does nothing.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,6 +139,17 @@ public final class OptionalInt {
return isPresent;
}
/**
* If a value is not present, returns {@code true}, otherwise
* {@code false}.
*
* @return {@code true} if a value is not present, otherwise {@code false}
* @since 11
*/
public boolean isEmpty() {
return !isPresent;
}
/**
* If a value is present, performs the given action with the value,
* otherwise does nothing.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,6 +139,17 @@ public final class OptionalLong {
return isPresent;
}
/**
* If a value is not present, returns {@code true}, otherwise
* {@code false}.
*
* @return {@code true} if a value is not present, otherwise {@code false}
* @since 11
*/
public boolean isEmpty() {
return !isPresent;
}
/**
* If a value is present, performs the given action with the value,
* otherwise does nothing.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,13 @@
package java.util.zip;
import java.lang.ref.Cleaner.Cleanable;
import java.lang.ref.Reference;
import java.nio.ByteBuffer;
import java.nio.ReadOnlyBufferException;
import java.util.Objects;
import jdk.internal.ref.CleanerFactory;
import sun.nio.ch.DirectBuffer;
/**
* This class provides support for general purpose compression using the
@ -35,8 +41,14 @@ import jdk.internal.ref.CleanerFactory;
* protected by patents. It is fully described in the specifications at
* the <a href="package-summary.html#package.description">java.util.zip
* package description</a>.
*
* <p>The following code fragment demonstrates a trivial compression
* <p>
* This class deflates sequences of bytes into ZLIB compressed data format.
* The input byte sequence is provided in either byte array or byte buffer,
* via one of the {@code setInput()} methods. The output byte sequence is
* written to the output byte array or byte buffer passed to the
* {@code deflate()} methods.
* <p>
* The following code fragment demonstrates a trivial compression
* and decompression of a string using {@code Deflater} and
* {@code Inflater}.
*
@ -92,8 +104,9 @@ import jdk.internal.ref.CleanerFactory;
public class Deflater {
private final DeflaterZStreamRef zsRef;
private byte[] buf = new byte[0];
private int off, len;
private ByteBuffer input = ZipUtils.defaultBuf;
private byte[] inputArray;
private int inputPos, inputLim;
private int level, strategy;
private boolean setParams;
private boolean finish, finished;
@ -170,9 +183,14 @@ public class Deflater {
*/
public static final int FULL_FLUSH = 3;
/**
* Flush mode to use at the end of output. Can only be provided by the
* user by way of {@link #finish()}.
*/
private static final int FINISH = 4;
static {
ZipUtils.loadLibrary();
initIDs();
}
/**
@ -208,35 +226,71 @@ public class Deflater {
}
/**
* Sets input data for compression. This should be called whenever
* needsInput() returns true indicating that more input data is required.
* @param b the input data bytes
* Sets input data for compression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
* <p>
* @param input the input data bytes
* @param off the start offset of the data
* @param len the length of the data
* @see Deflater#needsInput
*/
public void setInput(byte[] b, int off, int len) {
if (b== null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
public void setInput(byte[] input, int off, int len) {
if (off < 0 || len < 0 || off > input.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
synchronized (zsRef) {
this.buf = b;
this.off = off;
this.len = len;
this.input = null;
this.inputArray = input;
this.inputPos = off;
this.inputLim = off + len;
}
}
/**
* Sets input data for compression. This should be called whenever
* needsInput() returns true indicating that more input data is required.
* @param b the input data bytes
* Sets input data for compression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
* <p>
* @param input the input data bytes
* @see Deflater#needsInput
*/
public void setInput(byte[] b) {
setInput(b, 0, b.length);
public void setInput(byte[] input) {
setInput(input, 0, input.length);
}
/**
* Sets input data for compression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
* <p>
* The given buffer's position will be advanced as deflate
* operations are performed, up to the buffer's limit.
* The input buffer may be modified (refilled) between deflate
* operations; doing so is equivalent to creating a new buffer
* and setting it with this method.
* <p>
* Modifying the input buffer's contents, position, or limit
* concurrently with an deflate operation will result in
* undefined behavior, which may include incorrect operation
* results or operation failure.
*
* @param input the input data bytes
* @see Deflater#needsInput
* @since 11
*/
public void setInput(ByteBuffer input) {
Objects.requireNonNull(input);
synchronized (zsRef) {
this.input = input;
this.inputArray = null;
}
}
/**
@ -245,22 +299,19 @@ public class Deflater {
* uncompressed with Inflater.inflate(), Inflater.getAdler() can be called
* in order to get the Adler-32 value of the dictionary required for
* decompression.
* @param b the dictionary data bytes
* @param dictionary the dictionary data bytes
* @param off the start offset of the data
* @param len the length of the data
* @see Inflater#inflate
* @see Inflater#getAdler
*/
public void setDictionary(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
public void setDictionary(byte[] dictionary, int off, int len) {
if (off < 0 || len < 0 || off > dictionary.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
synchronized (zsRef) {
ensureOpen();
setDictionary(zsRef.address(), b, off, len);
setDictionary(zsRef.address(), dictionary, off, len);
}
}
@ -270,12 +321,47 @@ public class Deflater {
* uncompressed with Inflater.inflate(), Inflater.getAdler() can be called
* in order to get the Adler-32 value of the dictionary required for
* decompression.
* @param b the dictionary data bytes
* @param dictionary the dictionary data bytes
* @see Inflater#inflate
* @see Inflater#getAdler
*/
public void setDictionary(byte[] b) {
setDictionary(b, 0, b.length);
public void setDictionary(byte[] dictionary) {
setDictionary(dictionary, 0, dictionary.length);
}
/**
* Sets preset dictionary for compression. A preset dictionary is used
* when the history buffer can be predetermined. When the data is later
* uncompressed with Inflater.inflate(), Inflater.getAdler() can be called
* in order to get the Adler-32 value of the dictionary required for
* decompression.
* <p>
* The bytes in given byte buffer will be fully consumed by this method. On
* return, its position will equal its limit.
*
* @param dictionary the dictionary data bytes
* @see Inflater#inflate
* @see Inflater#getAdler
*/
public void setDictionary(ByteBuffer dictionary) {
synchronized (zsRef) {
int position = dictionary.position();
int remaining = Math.max(dictionary.limit() - position, 0);
ensureOpen();
if (dictionary.isDirect()) {
long address = ((DirectBuffer) dictionary).address();
try {
setDictionaryBuffer(zsRef.address(), address + position, remaining);
} finally {
Reference.reachabilityFence(dictionary);
}
} else {
byte[] array = ZipUtils.getBufferArray(dictionary);
int offset = ZipUtils.getBufferOffset(dictionary);
setDictionary(zsRef.address(), array, offset + position, remaining);
}
dictionary.position(position + remaining);
}
}
/**
@ -331,14 +417,17 @@ public class Deflater {
}
/**
* Returns true if the input data buffer is empty and setInput()
* should be called in order to provide more input.
* Returns true if no data remains in the input buffer. This can
* be used to determine if one of the {@code setInput()} methods should be
* called in order to provide more input.
*
* @return true if the input data buffer is empty and setInput()
* should be called in order to provide more input
*/
public boolean needsInput() {
synchronized (zsRef) {
return len <= 0;
ByteBuffer input = this.input;
return input == null ? inputLim == inputPos : ! input.hasRemaining();
}
}
@ -375,14 +464,14 @@ public class Deflater {
* yields the same result as the invocation of
* {@code deflater.deflate(b, off, len, Deflater.NO_FLUSH)}.
*
* @param b the buffer for the compressed data
* @param output the buffer for the compressed data
* @param off the start offset of the data
* @param len the maximum number of bytes of compressed data
* @return the actual number of bytes of compressed data written to the
* output buffer
*/
public int deflate(byte[] b, int off, int len) {
return deflate(b, off, len, NO_FLUSH);
public int deflate(byte[] output, int off, int len) {
return deflate(output, off, len, NO_FLUSH);
}
/**
@ -396,12 +485,32 @@ public class Deflater {
* yields the same result as the invocation of
* {@code deflater.deflate(b, 0, b.length, Deflater.NO_FLUSH)}.
*
* @param b the buffer for the compressed data
* @param output the buffer for the compressed data
* @return the actual number of bytes of compressed data written to the
* output buffer
*/
public int deflate(byte[] b) {
return deflate(b, 0, b.length, NO_FLUSH);
public int deflate(byte[] output) {
return deflate(output, 0, output.length, NO_FLUSH);
}
/**
* Compresses the input data and fills specified buffer with compressed
* data. Returns actual number of bytes of compressed data. A return value
* of 0 indicates that {@link #needsInput() needsInput} should be called
* in order to determine if more input data is required.
*
* <p>This method uses {@link #NO_FLUSH} as its compression flush mode.
* An invocation of this method of the form {@code deflater.deflate(output)}
* yields the same result as the invocation of
* {@code deflater.deflate(output, Deflater.NO_FLUSH)}.
*
* @param output the buffer for the compressed data
* @return the actual number of bytes of compressed data written to the
* output buffer
* @since 11
*/
public int deflate(ByteBuffer output) {
return deflate(output, NO_FLUSH);
}
/**
@ -441,7 +550,11 @@ public class Deflater {
* repeatedly output to the output buffer every time this method is
* invoked.
*
* @param b the buffer for the compressed data
* <p>If the {@link #setInput(ByteBuffer)} method was called to provide a buffer
* for input, the input buffer's position will be advanced by the number of bytes
* consumed by this operation.
*
* @param output the buffer for the compressed data
* @param off the start offset of the data
* @param len the maximum number of bytes of compressed data
* @param flush the compression flush mode
@ -451,25 +564,248 @@ public class Deflater {
* @throws IllegalArgumentException if the flush mode is invalid
* @since 1.7
*/
public int deflate(byte[] b, int off, int len, int flush) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
public int deflate(byte[] output, int off, int len, int flush) {
if (off < 0 || len < 0 || off > output.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
if (flush != NO_FLUSH && flush != SYNC_FLUSH && flush != FULL_FLUSH) {
throw new IllegalArgumentException();
}
synchronized (zsRef) {
ensureOpen();
if (flush == NO_FLUSH || flush == SYNC_FLUSH ||
flush == FULL_FLUSH) {
int thisLen = this.len;
int n = deflateBytes(zsRef.address(), b, off, len, flush);
bytesWritten += n;
bytesRead += (thisLen - this.len);
return n;
ByteBuffer input = this.input;
if (finish) {
// disregard given flush mode in this case
flush = FINISH;
}
int params;
if (setParams) {
// bit 0: true to set params
// bit 1-2: strategy (0, 1, or 2)
// bit 3-31: level (0..9 or -1)
params = 1 | strategy << 1 | level << 3;
} else {
params = 0;
}
int inputPos;
long result;
if (input == null) {
inputPos = this.inputPos;
result = deflateBytesBytes(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
output, off, len,
flush, params);
} else {
inputPos = input.position();
int inputRem = Math.max(input.limit() - inputPos, 0);
if (input.isDirect()) {
try {
long inputAddress = ((DirectBuffer) input).address();
result = deflateBufferBytes(zsRef.address(),
inputAddress + inputPos, inputRem,
output, off, len,
flush, params);
} finally {
Reference.reachabilityFence(input);
}
} else {
byte[] inputArray = ZipUtils.getBufferArray(input);
int inputOffset = ZipUtils.getBufferOffset(input);
result = deflateBytesBytes(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
output, off, len,
flush, params);
}
}
int read = (int) (result & 0x7fff_ffffL);
int written = (int) (result >>> 31 & 0x7fff_ffffL);
if ((result >>> 62 & 1) != 0) {
finished = true;
}
if (params != 0 && (result >>> 63 & 1) == 0) {
setParams = false;
}
if (input != null) {
input.position(inputPos + read);
} else {
this.inputPos = inputPos + read;
}
bytesWritten += written;
bytesRead += read;
return written;
}
}
/**
* Compresses the input data and fills the specified buffer with compressed
* data. Returns actual number of bytes of data compressed.
*
* <p>Compression flush mode is one of the following three modes:
*
* <ul>
* <li>{@link #NO_FLUSH}: allows the deflater to decide how much data
* to accumulate, before producing output, in order to achieve the best
* compression (should be used in normal use scenario). A return value
* of 0 in this flush mode indicates that {@link #needsInput()} should
* be called in order to determine if more input data is required.
*
* <li>{@link #SYNC_FLUSH}: all pending output in the deflater is flushed,
* to the specified output buffer, so that an inflater that works on
* compressed data can get all input data available so far (In particular
* the {@link #needsInput()} returns {@code true} after this invocation
* if enough output space is provided). Flushing with {@link #SYNC_FLUSH}
* may degrade compression for some compression algorithms and so it
* should be used only when necessary.
*
* <li>{@link #FULL_FLUSH}: all pending output is flushed out as with
* {@link #SYNC_FLUSH}. The compression state is reset so that the inflater
* that works on the compressed output data can restart from this point
* if previous compressed data has been damaged or if random access is
* desired. Using {@link #FULL_FLUSH} too often can seriously degrade
* compression.
* </ul>
*
* <p>In the case of {@link #FULL_FLUSH} or {@link #SYNC_FLUSH}, if
* the return value is equal to the {@linkplain ByteBuffer#remaining() remaining space}
* of the buffer, this method should be invoked again with the same
* {@code flush} parameter and more output space. Make sure that
* the buffer has at least 6 bytes of remaining space to avoid the
* flush marker (5 bytes) being repeatedly output to the output buffer
* every time this method is invoked.
*
* <p>On success, the position of the given {@code output} byte buffer will be
* advanced by as many bytes as were produced by the operation, which is equal
* to the number returned by this method.
*
* <p>If the {@link #setInput(ByteBuffer)} method was called to provide a buffer
* for input, the input buffer's position will be advanced by the number of bytes
* consumed by this operation.
*
* @param output the buffer for the compressed data
* @param flush the compression flush mode
* @return the actual number of bytes of compressed data written to
* the output buffer
*
* @throws IllegalArgumentException if the flush mode is invalid
* @since 11
*/
public int deflate(ByteBuffer output, int flush) {
if (output.isReadOnly()) {
throw new ReadOnlyBufferException();
}
if (flush != NO_FLUSH && flush != SYNC_FLUSH && flush != FULL_FLUSH) {
throw new IllegalArgumentException();
}
synchronized (zsRef) {
ensureOpen();
ByteBuffer input = this.input;
if (finish) {
// disregard given flush mode in this case
flush = FINISH;
}
int params;
if (setParams) {
// bit 0: true to set params
// bit 1-2: strategy (0, 1, or 2)
// bit 3-31: level (0..9 or -1)
params = 1 | strategy << 1 | level << 3;
} else {
params = 0;
}
int outputPos = output.position();
int outputRem = Math.max(output.limit() - outputPos, 0);
int inputPos;
long result;
if (input == null) {
inputPos = this.inputPos;
if (output.isDirect()) {
long outputAddress = ((DirectBuffer) output).address();
try {
result = deflateBytesBuffer(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
outputAddress + outputPos, outputRem,
flush, params);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = deflateBytesBytes(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
outputArray, outputOffset + outputPos, outputRem,
flush, params);
}
} else {
inputPos = input.position();
int inputRem = Math.max(input.limit() - inputPos, 0);
if (input.isDirect()) {
long inputAddress = ((DirectBuffer) input).address();
try {
if (output.isDirect()) {
long outputAddress = outputPos + ((DirectBuffer) output).address();
try {
result = deflateBufferBuffer(zsRef.address(),
inputAddress + inputPos, inputRem,
outputAddress, outputRem,
flush, params);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = deflateBufferBytes(zsRef.address(),
inputAddress + inputPos, inputRem,
outputArray, outputOffset + outputPos, outputRem,
flush, params);
}
} finally {
Reference.reachabilityFence(input);
}
} else {
byte[] inputArray = ZipUtils.getBufferArray(input);
int inputOffset = ZipUtils.getBufferOffset(input);
if (output.isDirect()) {
long outputAddress = ((DirectBuffer) output).address();
try {
result = deflateBytesBuffer(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
outputAddress + outputPos, outputRem,
flush, params);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = deflateBytesBytes(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
outputArray, outputOffset + outputPos, outputRem,
flush, params);
}
}
}
int read = (int) (result & 0x7fff_ffffL);
int written = (int) (result >>> 31 & 0x7fff_ffffL);
if ((result >>> 62 & 1) != 0) {
finished = true;
}
if (params != 0 && (result >>> 63 & 1) == 0) {
setParams = false;
}
if (input != null) {
input.position(inputPos + read);
} else {
this.inputPos = inputPos + read;
}
output.position(outputPos + written);
bytesWritten += written;
bytesRead += read;
return written;
}
}
/**
@ -545,7 +881,8 @@ public class Deflater {
reset(zsRef.address());
finish = false;
finished = false;
off = len = 0;
input = ZipUtils.defaultBuf;
inputArray = null;
bytesRead = bytesWritten = 0;
}
}
@ -560,7 +897,7 @@ public class Deflater {
public void end() {
synchronized (zsRef) {
zsRef.clean();
buf = null;
input = ZipUtils.defaultBuf;
}
}
@ -585,11 +922,26 @@ public class Deflater {
throw new NullPointerException("Deflater has been closed");
}
private static native void initIDs();
private static native long init(int level, int strategy, boolean nowrap);
private static native void setDictionary(long addr, byte[] b, int off, int len);
private native int deflateBytes(long addr, byte[] b, int off, int len,
int flush);
private static native void setDictionary(long addr, byte[] b, int off,
int len);
private static native void setDictionaryBuffer(long addr, long bufAddress, int len);
private native long deflateBytesBytes(long addr,
byte[] inputArray, int inputOff, int inputLen,
byte[] outputArray, int outputOff, int outputLen,
int flush, int params);
private native long deflateBytesBuffer(long addr,
byte[] inputArray, int inputOff, int inputLen,
long outputAddress, int outputLen,
int flush, int params);
private native long deflateBufferBytes(long addr,
long inputAddress, int inputLen,
byte[] outputArray, int outputOff, int outputLen,
int flush, int params);
private native long deflateBufferBuffer(long addr,
long inputAddress, int inputLen,
long outputAddress, int outputLen,
int flush, int params);
private static native int getAdler(long addr);
private static native void reset(long addr);
private static native void end(long addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,13 @@
package java.util.zip;
import java.lang.ref.Cleaner.Cleanable;
import java.lang.ref.Reference;
import java.nio.ByteBuffer;
import java.nio.ReadOnlyBufferException;
import java.util.Objects;
import jdk.internal.ref.CleanerFactory;
import sun.nio.ch.DirectBuffer;
/**
* This class provides support for general purpose decompression using the
@ -35,8 +41,13 @@ import jdk.internal.ref.CleanerFactory;
* protected by patents. It is fully described in the specifications at
* the <a href="package-summary.html#package.description">java.util.zip
* package description</a>.
*
* <p>The following code fragment demonstrates a trivial compression
* <p>
* This class inflates sequences of ZLIB compressed bytes. The input byte
* sequence is provided in either byte array or byte buffer, via one of the
* {@code setInput()} methods. The output byte sequence is written to the
* output byte array or byte buffer passed to the {@code deflate()} methods.
* <p>
* The following code fragment demonstrates a trivial compression
* and decompression of a string using {@code Deflater} and
* {@code Inflater}.
*
@ -92,14 +103,20 @@ import jdk.internal.ref.CleanerFactory;
public class Inflater {
private final InflaterZStreamRef zsRef;
private byte[] buf = defaultBuf;
private int off, len;
private ByteBuffer input = ZipUtils.defaultBuf;
private byte[] inputArray;
private int inputPos, inputLim;
private boolean finished;
private boolean needDict;
private long bytesRead;
private long bytesWritten;
private static final byte[] defaultBuf = new byte[0];
/*
* These fields are used as an "out" parameter from JNI when a
* DataFormatException is thrown during the inflate operation.
*/
private int inputConsumed;
private int outputConsumed;
static {
ZipUtils.loadLibrary();
@ -129,37 +146,71 @@ public class Inflater {
}
/**
* Sets input data for decompression. Should be called whenever
* needsInput() returns true indicating that more input data is
* required.
* @param b the input data bytes
* Sets input data for decompression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
*
* @param input the input data bytes
* @param off the start offset of the input data
* @param len the length of the input data
* @see Inflater#needsInput
*/
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
public void setInput(byte[] input, int off, int len) {
if (off < 0 || len < 0 || off > input.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
synchronized (zsRef) {
this.buf = b;
this.off = off;
this.len = len;
this.input = null;
this.inputArray = input;
this.inputPos = off;
this.inputLim = off + len;
}
}
/**
* Sets input data for decompression. Should be called whenever
* needsInput() returns true indicating that more input data is
* required.
* @param b the input data bytes
* Sets input data for decompression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
*
* @param input the input data bytes
* @see Inflater#needsInput
*/
public void setInput(byte[] b) {
setInput(b, 0, b.length);
public void setInput(byte[] input) {
setInput(input, 0, input.length);
}
/**
* Sets input data for decompression.
* <p>
* One of the {@code setInput()} methods should be called whenever
* {@code needsInput()} returns true indicating that more input data
* is required.
* <p>
* The given buffer's position will be advanced as inflate
* operations are performed, up to the buffer's limit.
* The input buffer may be modified (refilled) between inflate
* operations; doing so is equivalent to creating a new buffer
* and setting it with this method.
* <p>
* Modifying the input buffer's contents, position, or limit
* concurrently with an inflate operation will result in
* undefined behavior, which may include incorrect operation
* results or operation failure.
*
* @param input the input data bytes
* @see Inflater#needsInput
* @since 11
*/
public void setInput(ByteBuffer input) {
Objects.requireNonNull(input);
synchronized (zsRef) {
this.input = input;
this.inputArray = null;
}
}
/**
@ -167,22 +218,19 @@ public class Inflater {
* called when inflate() returns 0 and needsDictionary() returns true
* indicating that a preset dictionary is required. The method getAdler()
* can be used to get the Adler-32 value of the dictionary needed.
* @param b the dictionary data bytes
* @param dictionary the dictionary data bytes
* @param off the start offset of the data
* @param len the length of the data
* @see Inflater#needsDictionary
* @see Inflater#getAdler
*/
public void setDictionary(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
public void setDictionary(byte[] dictionary, int off, int len) {
if (off < 0 || len < 0 || off > dictionary.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
synchronized (zsRef) {
ensureOpen();
setDictionary(zsRef.address(), b, off, len);
setDictionary(zsRef.address(), dictionary, off, len);
needDict = false;
}
}
@ -192,12 +240,48 @@ public class Inflater {
* called when inflate() returns 0 and needsDictionary() returns true
* indicating that a preset dictionary is required. The method getAdler()
* can be used to get the Adler-32 value of the dictionary needed.
* @param b the dictionary data bytes
* @param dictionary the dictionary data bytes
* @see Inflater#needsDictionary
* @see Inflater#getAdler
*/
public void setDictionary(byte[] b) {
setDictionary(b, 0, b.length);
public void setDictionary(byte[] dictionary) {
setDictionary(dictionary, 0, dictionary.length);
}
/**
* Sets the preset dictionary to the bytes in the given buffer. Should be
* called when inflate() returns 0 and needsDictionary() returns true
* indicating that a preset dictionary is required. The method getAdler()
* can be used to get the Adler-32 value of the dictionary needed.
* <p>
* The bytes in given byte buffer will be fully consumed by this method. On
* return, its position will equal its limit.
*
* @param dictionary the dictionary data bytes
* @see Inflater#needsDictionary
* @see Inflater#getAdler
* @since 11
*/
public void setDictionary(ByteBuffer dictionary) {
synchronized (zsRef) {
int position = dictionary.position();
int remaining = Math.max(dictionary.limit() - position, 0);
ensureOpen();
if (dictionary.isDirect()) {
long address = ((DirectBuffer) dictionary).address();
try {
setDictionaryBuffer(zsRef.address(), address + position, remaining);
} finally {
Reference.reachabilityFence(dictionary);
}
} else {
byte[] array = ZipUtils.getBufferArray(dictionary);
int offset = ZipUtils.getBufferOffset(dictionary);
setDictionary(zsRef.address(), array, offset + position, remaining);
}
dictionary.position(position + remaining);
needDict = false;
}
}
/**
@ -208,19 +292,22 @@ public class Inflater {
*/
public int getRemaining() {
synchronized (zsRef) {
return len;
ByteBuffer input = this.input;
return input == null ? inputLim - inputPos : input.remaining();
}
}
/**
* Returns true if no data remains in the input buffer. This can
* be used to determine if #setInput should be called in order
* to provide more input.
* be used to determine if one of the {@code setInput()} methods should be
* called in order to provide more input.
*
* @return true if no data remains in the input buffer
*/
public boolean needsInput() {
synchronized (zsRef) {
return len <= 0;
ByteBuffer input = this.input;
return input == null ? inputLim == inputPos : ! input.hasRemaining();
}
}
@ -254,30 +341,103 @@ public class Inflater {
* determine if more input data or a preset dictionary is required.
* In the latter case, getAdler() can be used to get the Adler-32
* value of the dictionary required.
* @param b the buffer for the uncompressed data
* <p>
* If the {@link #setInput(ByteBuffer)} method was called to provide a buffer
* for input, the input buffer's position will be advanced by the number of bytes
* consumed by this operation, even in the event that a {@link DataFormatException}
* is thrown.
* <p>
* The {@linkplain #getRemaining() remaining byte count} will be reduced by
* the number of consumed input bytes. If the {@link #setInput(ByteBuffer)}
* method was called to provide a buffer for input, the input buffer's position
* will be advanced the number of consumed bytes.
* <p>
* These byte totals, as well as
* the {@linkplain #getBytesRead() total bytes read}
* and the {@linkplain #getBytesWritten() total bytes written}
* values, will be updated even in the event that a {@link DataFormatException}
* is thrown to reflect the amount of data consumed and produced before the
* exception occurred.
*
* @param output the buffer for the uncompressed data
* @param off the start offset of the data
* @param len the maximum number of uncompressed bytes
* @return the actual number of uncompressed bytes
* @exception DataFormatException if the compressed data format is invalid
* @throws DataFormatException if the compressed data format is invalid
* @see Inflater#needsInput
* @see Inflater#needsDictionary
*/
public int inflate(byte[] b, int off, int len)
public int inflate(byte[] output, int off, int len)
throws DataFormatException
{
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
if (off < 0 || len < 0 || off > output.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
synchronized (zsRef) {
ensureOpen();
int thisLen = this.len;
int n = inflateBytes(zsRef.address(), b, off, len);
bytesWritten += n;
bytesRead += (thisLen - this.len);
return n;
ByteBuffer input = this.input;
long result;
int inputPos;
try {
if (input == null) {
inputPos = this.inputPos;
try {
result = inflateBytesBytes(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
output, off, len);
} catch (DataFormatException e) {
this.inputPos = inputPos + inputConsumed;
throw e;
}
} else {
inputPos = input.position();
try {
int inputRem = Math.max(input.limit() - inputPos, 0);
if (input.isDirect()) {
try {
long inputAddress = ((DirectBuffer) input).address();
result = inflateBufferBytes(zsRef.address(),
inputAddress + inputPos, inputRem,
output, off, len);
} finally {
Reference.reachabilityFence(input);
}
} else {
byte[] inputArray = ZipUtils.getBufferArray(input);
int inputOffset = ZipUtils.getBufferOffset(input);
result = inflateBytesBytes(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
output, off, len);
}
} catch (DataFormatException e) {
input.position(inputPos + inputConsumed);
throw e;
}
}
} catch (DataFormatException e) {
bytesRead += inputConsumed;
inputConsumed = 0;
int written = outputConsumed;
bytesWritten += written;
outputConsumed = 0;
throw e;
}
int read = (int) (result & 0x7fff_ffffL);
int written = (int) (result >>> 31 & 0x7fff_ffffL);
if ((result >>> 62 & 1) != 0) {
finished = true;
}
if ((result >>> 63 & 1) != 0) {
needDict = true;
}
if (input != null) {
input.position(inputPos + read);
} else {
this.inputPos = inputPos + read;
}
bytesWritten += written;
bytesRead += read;
return written;
}
}
@ -288,14 +448,177 @@ public class Inflater {
* determine if more input data or a preset dictionary is required.
* In the latter case, getAdler() can be used to get the Adler-32
* value of the dictionary required.
* @param b the buffer for the uncompressed data
* <p>
* The {@linkplain #getRemaining() remaining byte count} will be reduced by
* the number of consumed input bytes. If the {@link #setInput(ByteBuffer)}
* method was called to provide a buffer for input, the input buffer's position
* will be advanced the number of consumed bytes.
* <p>
* These byte totals, as well as
* the {@linkplain #getBytesRead() total bytes read}
* and the {@linkplain #getBytesWritten() total bytes written}
* values, will be updated even in the event that a {@link DataFormatException}
* is thrown to reflect the amount of data consumed and produced before the
* exception occurred.
*
* @param output the buffer for the uncompressed data
* @return the actual number of uncompressed bytes
* @exception DataFormatException if the compressed data format is invalid
* @throws DataFormatException if the compressed data format is invalid
* @see Inflater#needsInput
* @see Inflater#needsDictionary
*/
public int inflate(byte[] b) throws DataFormatException {
return inflate(b, 0, b.length);
public int inflate(byte[] output) throws DataFormatException {
return inflate(output, 0, output.length);
}
/**
* Uncompresses bytes into specified buffer. Returns actual number
* of bytes uncompressed. A return value of 0 indicates that
* needsInput() or needsDictionary() should be called in order to
* determine if more input data or a preset dictionary is required.
* In the latter case, getAdler() can be used to get the Adler-32
* value of the dictionary required.
* <p>
* On success, the position of the given {@code output} byte buffer will be
* advanced by as many bytes as were produced by the operation, which is equal
* to the number returned by this method. Note that the position of the
* {@code output} buffer will be advanced even in the event that a
* {@link DataFormatException} is thrown.
* <p>
* The {@linkplain #getRemaining() remaining byte count} will be reduced by
* the number of consumed input bytes. If the {@link #setInput(ByteBuffer)}
* method was called to provide a buffer for input, the input buffer's position
* will be advanced the number of consumed bytes.
* <p>
* These byte totals, as well as
* the {@linkplain #getBytesRead() total bytes read}
* and the {@linkplain #getBytesWritten() total bytes written}
* values, will be updated even in the event that a {@link DataFormatException}
* is thrown to reflect the amount of data consumed and produced before the
* exception occurred.
*
* @param output the buffer for the uncompressed data
* @return the actual number of uncompressed bytes
* @throws DataFormatException if the compressed data format is invalid
* @throws ReadOnlyBufferException if the given output buffer is read-only
* @see Inflater#needsInput
* @see Inflater#needsDictionary
* @since 11
*/
public int inflate(ByteBuffer output) throws DataFormatException {
if (output.isReadOnly()) {
throw new ReadOnlyBufferException();
}
synchronized (zsRef) {
ensureOpen();
ByteBuffer input = this.input;
long result;
int inputPos;
int outputPos = output.position();
int outputRem = Math.max(output.limit() - outputPos, 0);
try {
if (input == null) {
inputPos = this.inputPos;
try {
if (output.isDirect()) {
long outputAddress = ((DirectBuffer) output).address();
try {
result = inflateBytesBuffer(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
outputAddress + outputPos, outputRem);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = inflateBytesBytes(zsRef.address(),
inputArray, inputPos, inputLim - inputPos,
outputArray, outputOffset + outputPos, outputRem);
}
} catch (DataFormatException e) {
this.inputPos = inputPos + inputConsumed;
throw e;
}
} else {
inputPos = input.position();
int inputRem = Math.max(input.limit() - inputPos, 0);
try {
if (input.isDirect()) {
long inputAddress = ((DirectBuffer) input).address();
try {
if (output.isDirect()) {
long outputAddress = ((DirectBuffer) output).address();
try {
result = inflateBufferBuffer(zsRef.address(),
inputAddress + inputPos, inputRem,
outputAddress + outputPos, outputRem);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = inflateBufferBytes(zsRef.address(),
inputAddress + inputPos, inputRem,
outputArray, outputOffset + outputPos, outputRem);
}
} finally {
Reference.reachabilityFence(input);
}
} else {
byte[] inputArray = ZipUtils.getBufferArray(input);
int inputOffset = ZipUtils.getBufferOffset(input);
if (output.isDirect()) {
long outputAddress = ((DirectBuffer) output).address();
try {
result = inflateBytesBuffer(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
outputAddress + outputPos, outputRem);
} finally {
Reference.reachabilityFence(output);
}
} else {
byte[] outputArray = ZipUtils.getBufferArray(output);
int outputOffset = ZipUtils.getBufferOffset(output);
result = inflateBytesBytes(zsRef.address(),
inputArray, inputOffset + inputPos, inputRem,
outputArray, outputOffset + outputPos, outputRem);
}
}
} catch (DataFormatException e) {
input.position(inputPos + inputConsumed);
throw e;
}
}
} catch (DataFormatException e) {
bytesRead += inputConsumed;
inputConsumed = 0;
int written = outputConsumed;
output.position(outputPos + written);
bytesWritten += written;
outputConsumed = 0;
throw e;
}
int read = (int) (result & 0x7fff_ffffL);
int written = (int) (result >>> 31 & 0x7fff_ffffL);
if ((result >>> 62 & 1) != 0) {
finished = true;
}
if ((result >>> 63 & 1) != 0) {
needDict = true;
}
if (input != null) {
input.position(inputPos + read);
} else {
this.inputPos = inputPos + read;
}
// Note: this method call also serves to keep the byteBuffer ref alive
output.position(outputPos + written);
bytesWritten += written;
bytesRead += read;
return written;
}
}
/**
@ -368,10 +691,10 @@ public class Inflater {
synchronized (zsRef) {
ensureOpen();
reset(zsRef.address());
buf = defaultBuf;
input = ZipUtils.defaultBuf;
inputArray = null;
finished = false;
needDict = false;
off = len = 0;
bytesRead = bytesWritten = 0;
}
}
@ -386,7 +709,8 @@ public class Inflater {
public void end() {
synchronized (zsRef) {
zsRef.clean();
buf = null;
input = ZipUtils.defaultBuf;
inputArray = null;
}
}
@ -416,18 +740,23 @@ public class Inflater {
throw new NullPointerException("Inflater has been closed");
}
boolean ended() {
synchronized (zsRef) {
return zsRef.address() == 0;
}
}
private static native void initIDs();
private static native long init(boolean nowrap);
private static native void setDictionary(long addr, byte[] b, int off,
int len);
private native int inflateBytes(long addr, byte[] b, int off, int len)
throws DataFormatException;
private static native void setDictionaryBuffer(long addr, long bufAddress, int len);
private native long inflateBytesBytes(long addr,
byte[] inputArray, int inputOff, int inputLen,
byte[] outputArray, int outputOff, int outputLen) throws DataFormatException;
private native long inflateBytesBuffer(long addr,
byte[] inputArray, int inputOff, int inputLen,
long outputAddress, int outputLen) throws DataFormatException;
private native long inflateBufferBytes(long addr,
long inputAddress, int inputLen,
byte[] outputArray, int outputOff, int outputLen) throws DataFormatException;
private native long inflateBufferBuffer(long addr,
long inputAddress, int inputLen,
long outputAddress, int outputLen) throws DataFormatException;
private static native int getAdler(long addr);
private static native void reset(long addr);
private static native void end(long addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
package java.util.zip;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.file.attribute.FileTime;
import java.security.AccessController;
import java.security.PrivilegedAction;
@ -37,6 +39,9 @@ import java.util.concurrent.TimeUnit;
import static java.util.zip.ZipConstants.ENDHDR;
import jdk.internal.misc.Unsafe;
import sun.nio.ch.DirectBuffer;
class ZipUtils {
// used to adjust values between Windows and java epoch
@ -45,6 +50,9 @@ class ZipUtils {
// used to indicate the corresponding windows time is not available
public static final long WINDOWS_TIME_NOT_AVAILABLE = Long.MIN_VALUE;
// static final ByteBuffer defaultBuf = ByteBuffer.allocateDirect(0);
static final ByteBuffer defaultBuf = ByteBuffer.allocate(0);
/**
* Converts Windows time (in microseconds, UTC/GMT) time to FileTime.
*/
@ -281,4 +289,17 @@ class ZipUtils {
AccessController.doPrivileged(pa);
}
}
private static final Unsafe unsafe = Unsafe.getUnsafe();
private static final long byteBufferArrayOffset = unsafe.objectFieldOffset(ByteBuffer.class, "hb");
private static final long byteBufferOffsetOffset = unsafe.objectFieldOffset(ByteBuffer.class, "offset");
static byte[] getBufferArray(ByteBuffer byteBuffer) {
return (byte[]) unsafe.getObject(byteBuffer, byteBufferArrayOffset);
}
static int getBufferOffset(ByteBuffer byteBuffer) {
return unsafe.getInt(byteBuffer, byteBufferOffsetOffset);
}
}

View File

@ -93,7 +93,7 @@ WinMain(HINSTANCE inst, HINSTANCE previnst, LPSTR cmdline, int cmdshow)
__initenv = _environ;
#else /* JAVAW */
JNIEXPORT int JNICALL
JNIEXPORT int
main(int argc, char **argv)
{
int margc;

Some files were not shown because too many files have changed in this diff Show More