This commit is contained in:
J. Duke 2017-07-05 22:13:53 +02:00
commit 42f2d62264
618 changed files with 11079 additions and 4000 deletions

View File

@ -378,3 +378,4 @@ a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132
be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
065724348690eda41fc69112278d8da6dcde548c jdk-9+134
82b94cb5f342319d2cda77f9fa59703ad7fde576 jdk-9+135
3ec350f5f32af249b59620d7e37b54bdcd77b233 jdk-9+136

View File

@ -378,3 +378,4 @@ f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131
2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133
1a497f5ca0cfd88115cc7daa8af8a62b8741caf2 jdk-9+134
094d0db606db976045f594dba47d4593b715cc81 jdk-9+135
aa053a3faf266c12b4fd5272da431a3e08e4a3e3 jdk-9+136

View File

@ -538,3 +538,4 @@ e96b34b76d863ed1fa04e0eeb3f297ac17b490fd jdk-9+129
a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
b8b694c6b4d2ab0939aed7adaf0eec1ac321a085 jdk-9+134
3b1c4562953db47e36b237a500f368d5c9746d47 jdk-9+135
a20da289f646ee44440695b81abc0548330e4ca7 jdk-9+136

View File

@ -67,6 +67,7 @@ JVM_FindSignal
JVM_FreeMemory
JVM_GC
JVM_GetAllThreads
JVM_GetAndClearReferencePendingList
JVM_GetArrayElement
JVM_GetArrayLength
JVM_GetCallerClass
@ -130,6 +131,7 @@ JVM_GetSystemPackages
JVM_GetTemporaryDirectory
JVM_GetVmArguments
JVM_Halt
JVM_HasReferencePendingList
JVM_HoldsLock
JVM_IHashCode
JVM_InitProperties
@ -179,6 +181,7 @@ JVM_SuspendThread
JVM_ToStackTraceElement
JVM_TotalMemory
JVM_UnloadLibrary
JVM_WaitForReferencePendingList
JVM_Yield
# Module related API's

View File

@ -65,10 +65,10 @@ define_pd_global(intx, InlineSmallCode, 1000);
#ifdef AMD64
// Very large C++ stack frames using solaris-amd64 optimized builds
// due to lack of optimization caused by C++ compiler bugs
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2))
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(7) DEBUG_ONLY(+2))
// For those clients that do not use write socket, we allow
// the min range value to be below that of the default
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(6) DEBUG_ONLY(+2))
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(7) DEBUG_ONLY(+2))
#else
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES

View File

@ -24,6 +24,7 @@
package sun.jvm.hotspot.gc.g1;
import java.io.PrintStream;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
@ -125,6 +126,15 @@ public class G1CollectedHeap extends CollectedHeap {
return CollectedHeapName.G1_COLLECTED_HEAP;
}
@Override
public void printOn(PrintStream tty) {
MemRegion mr = reservedRegion();
tty.print("garbage-first heap");
tty.print(" [" + mr.start() + ", " + mr.end() + "]");
tty.println(" region size " + (HeapRegion.grainBytes() / 1024) + "K");
}
public G1CollectedHeap(Address addr) {
super(addr);
}

View File

@ -36,6 +36,7 @@ import sun.jvm.hotspot.utilities.*;
public class MethodData extends Metadata implements MethodDataInterface<Klass,Method> {
static int TypeProfileWidth = 2;
static int BciProfileWidth = 2;
static int MethodProfileWidth = 0;
static int CompileThreshold;
static int Reason_many; // indicates presence of several reasons
@ -142,6 +143,8 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
TypeProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("BciProfileWidth")) {
BciProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("MethodProfileWidth")) {
MethodProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("CompileThreshold")) {
CompileThreshold = (int)flag.getIntx();
}
@ -154,7 +157,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
parametersTypeDataDi = new CIntField(type.getCIntegerField("_parameters_type_data_di"), 0);
sizeofMethodDataOopDesc = (int)type.getSize();;
sizeofMethodDataOopDesc = (int)type.getSize();
Reason_many = db.lookupIntConstant("Deoptimization::Reason_many").intValue();
Reason_none = db.lookupIntConstant("Deoptimization::Reason_none").intValue();
@ -257,7 +260,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
ParametersTypeData<Klass,Method> parametersTypeData() {
int di = (int)parametersTypeDataDi.getValue(getAddress());
if (di == -1) {
if (di == -1 || di == -2) {
return null;
}
DataLayout dataLayout = new DataLayout(this, di + (int)data.getOffset());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,21 @@ import sun.jvm.hotspot.utilities.*;
// that the check is reached, and a series of (Klass, count) pairs
// which are used to store a type profile for the receiver of the check.
public class ReceiverTypeData<K,M> extends CounterData {
static final int receiver0Offset = counterCellCount;
static final int count0Offset = receiver0Offset + 1;
static final int receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset;
static final int INCLUDE_JVMCI;
static final int nonProfiledCountOffset = counterCellCount;
static final int receiver0Offset;
static final int count0Offset;
static final int receiverTypeRowCellCount;
static {
INCLUDE_JVMCI = VM.getVM().getTypeDataBase().lookupIntConstant("INCLUDE_JVMCI");
if (INCLUDE_JVMCI == 1) {
receiver0Offset = nonProfiledCountOffset + 1;
} else {
receiver0Offset = counterCellCount;
}
count0Offset = receiver0Offset + 1;
receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset;
}
final MethodDataInterface<K,M> methodData;
public ReceiverTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
@ -53,7 +65,11 @@ public class ReceiverTypeData<K,M> extends CounterData {
boolean isReceivertypedata() { return true; }
static int staticCellCount() {
return counterCellCount + MethodData.TypeProfileWidth * receiverTypeRowCellCount;
int cellCount = counterCellCount + MethodData.TypeProfileWidth * receiverTypeRowCellCount;
if (INCLUDE_JVMCI == 1) {
cellCount += 1;
}
return cellCount;
}
public int cellCount() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,11 @@ public class VirtualCallData<K,M> extends ReceiverTypeData<K,M> {
static int staticCellCount() {
// At this point we could add more profile state, e.g., for arguments.
// But for now it's the same size as the base record type.
return ReceiverTypeData.staticCellCount();
int cellCount = ReceiverTypeData.staticCellCount();
if (INCLUDE_JVMCI == 1) {
cellCount += MethodData.MethodProfileWidth * receiverTypeRowCellCount;
}
return cellCount;
}
public int cellCount() {

View File

@ -129,8 +129,6 @@ public class Threads {
virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
}
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class);
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
}
@ -172,7 +170,7 @@ public class Threads {
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e);
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread or CodeCacheSweeperThread)", e);
}
}

View File

@ -837,7 +837,6 @@ vmType2Class["InterpreterCodelet"] = sapkg.interpreter.InterpreterCodelet;
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
// gc

View File

@ -244,16 +244,20 @@ final class CompilerToVM {
native void resolveInvokeDynamicInPool(HotSpotConstantPool constantPool, int cpi);
/**
* Ensures that the type referenced by the entry for a
* If {@code cpi} denotes an entry representing a
* <a href="https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.9">signature
* polymorphic</a> method at index {@code cpi} in {@code constantPool} is loaded and
* initialized.
*
* The behavior of this method is undefined if {@code cpi} does not denote an entry representing
* a signature polymorphic method.
* polymorphic</a> method, this method ensures that the type referenced by the entry is loaded
* and initialized. It {@code cpi} does not denote a signature polymorphic method, this method
* does nothing.
*/
native void resolveInvokeHandleInPool(HotSpotConstantPool constantPool, int cpi);
/**
* Gets the list of type names (in the format of {@link JavaType#getName()}) denoting the
* classes that define signature polymorphic methods.
*/
native String[] getSignaturePolymorphicHolders();
/**
* Gets the resolved type denoted by the entry at index {@code cpi} in {@code constantPool}.
*
@ -348,6 +352,7 @@ final class CompilerToVM {
* [String name, Long value, ...] vmConstants,
* [String name, Long value, ...] vmAddresses,
* VMFlag[] vmFlags
* VMIntrinsicMethod[] vmIntrinsics
* ]
* </pre>
*
@ -610,4 +615,5 @@ final class CompilerToVM {
* @return the number of bytes required for deoptimization of this frame state
*/
native int interpreterFrameSize(BytecodeFrame frame);
}

View File

@ -282,7 +282,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return constant pool tag
*/
private JVM_CONSTANT getTagAt(int index) {
assertBounds(index);
assert checkBounds(index);
HotSpotVMConfig config = config();
final long metaspaceConstantPoolTags = UNSAFE.getAddress(getMetaspaceConstantPool() + config.constantPoolTagsOffset);
final int tag = UNSAFE.getByteVolatile(null, metaspaceConstantPoolTags + config.arrayU1DataOffset + index);
@ -299,7 +299,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return constant pool entry
*/
private long getEntryAt(int index) {
assertBounds(index);
assert checkBounds(index);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getAddress(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -311,7 +311,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return integer constant pool entry at index
*/
private int getIntAt(int index) {
assertTag(index, JVM_CONSTANT.Integer);
assert checkTag(index, JVM_CONSTANT.Integer);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -323,7 +323,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return long constant pool entry
*/
private long getLongAt(int index) {
assertTag(index, JVM_CONSTANT.Long);
assert checkTag(index, JVM_CONSTANT.Long);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getLong(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -335,7 +335,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return float constant pool entry
*/
private float getFloatAt(int index) {
assertTag(index, JVM_CONSTANT.Float);
assert checkTag(index, JVM_CONSTANT.Float);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getFloat(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -347,7 +347,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return float constant pool entry
*/
private double getDoubleAt(int index) {
assertTag(index, JVM_CONSTANT.Double);
assert checkTag(index, JVM_CONSTANT.Double);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getDouble(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -359,7 +359,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return {@code JVM_CONSTANT_NameAndType} constant pool entry
*/
private int getNameAndTypeAt(int index) {
assertTag(index, JVM_CONSTANT.NameAndType);
assert checkTag(index, JVM_CONSTANT.NameAndType);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
@ -441,7 +441,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* @return klass reference index
*/
private int getUncachedKlassRefIndexAt(int index) {
assertTagIsFieldOrMethod(index);
assert checkTagIsFieldOrMethod(index);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
final int refIndex = UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
// klass ref index is in the low 16-bits.
@ -449,23 +449,27 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
}
/**
* Asserts that the constant pool index {@code index} is in the bounds of the constant pool.
* Checks that the constant pool index {@code index} is in the bounds of the constant pool.
*
* @param index constant pool index
* @throws AssertionError if the check fails
*/
private void assertBounds(int index) {
private boolean checkBounds(int index) {
assert 0 <= index && index < length() : "index " + index + " not between 0 and " + length();
return true;
}
/**
* Asserts that the constant pool tag at index {@code index} is equal to {@code tag}.
* Checks that the constant pool tag at index {@code index} is equal to {@code tag}.
*
* @param index constant pool index
* @param tag expected tag
* @throws AssertionError if the check fails
*/
private void assertTag(int index, JVM_CONSTANT tag) {
private boolean checkTag(int index, JVM_CONSTANT tag) {
final JVM_CONSTANT tagAt = getTagAt(index);
assert tagAt == tag : "constant pool tag at index " + index + " is " + tagAt + " but expected " + tag;
return true;
}
/**
@ -473,10 +477,12 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
* or a {@link JVM_CONSTANT#MethodRef}, or a {@link JVM_CONSTANT#InterfaceMethodref}.
*
* @param index constant pool index
* @throws AssertionError if the check fails
*/
private void assertTagIsFieldOrMethod(int index) {
private boolean checkTagIsFieldOrMethod(int index) {
final JVM_CONSTANT tagAt = getTagAt(index);
assert tagAt == JVM_CONSTANT.Fieldref || tagAt == JVM_CONSTANT.MethodRef || tagAt == JVM_CONSTANT.InterfaceMethodref : tagAt;
return true;
}
@Override
@ -523,7 +529,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
@Override
public String lookupUtf8(int cpi) {
assertTag(cpi, JVM_CONSTANT.Utf8);
assert checkTag(cpi, JVM_CONSTANT.Utf8);
return compilerToVM().getSymbol(getEntryAt(cpi));
}
@ -690,11 +696,10 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
UNSAFE.ensureClassInitialized(klass);
}
if (tag == JVM_CONSTANT.MethodRef) {
if (Bytecodes.isInvokeHandleAlias(opcode)) {
if (Bytecodes.isInvokeHandleAlias(opcode) && isSignaturePolymorphicHolder(type)) {
final int methodRefCacheIndex = rawIndexToConstantPoolIndex(cpi, opcode);
if (isInvokeHandle(methodRefCacheIndex, type)) {
compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
}
assert checkTag(compilerToVM().constantPoolRemapInstructionOperandFromCache(this, methodRefCacheIndex), JVM_CONSTANT.MethodRef);
compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
}
}
@ -708,11 +713,26 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
// nothing
break;
}
}
private boolean isInvokeHandle(int methodRefCacheIndex, HotSpotResolvedObjectTypeImpl klass) {
assertTag(compilerToVM().constantPoolRemapInstructionOperandFromCache(this, methodRefCacheIndex), JVM_CONSTANT.MethodRef);
return ResolvedJavaMethod.isSignaturePolymorphic(klass, getNameOf(methodRefCacheIndex), runtime().getHostJVMCIBackend().getMetaAccess());
// Lazily initialized.
private static String[] signaturePolymorphicHolders;
/**
* Determines if {@code type} contains signature polymorphic methods.
*/
private static boolean isSignaturePolymorphicHolder(final HotSpotResolvedObjectTypeImpl type) {
String name = type.getName();
if (signaturePolymorphicHolders == null) {
signaturePolymorphicHolders = compilerToVM().getSignaturePolymorphicHolders();
}
for (String holder : signaturePolymorphicHolders) {
if (name.equals(holder)) {
return true;
}
}
return false;
}
@Override

View File

@ -497,6 +497,9 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
for (Map.Entry<String, Long> e : typeSizes.entrySet()) {
printConfigLine(vm, "[vmconfig:type size] %s = %d%n", e.getKey(), e.getValue());
}
for (VMIntrinsicMethod e : store.getIntrinsics()) {
printConfigLine(vm, "[vmconfig:intrinsic] %d = %s.%s %s%n", e.id, e.declaringClass, e.name, e.descriptor);
}
}
public OutputStream getLogStream() {

View File

@ -111,6 +111,15 @@ public interface HotSpotResolvedJavaMethod extends ResolvedJavaMethod {
int intrinsicId();
/**
* Determines if this method denotes itself as a candidate for intrinsification. As of JDK 9,
* this is denoted by the {@code HotSpotIntrinsicCandidate} annotation. In earlier JDK versions,
* this method returns true.
*
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8076112">JDK-8076112</a>
*/
boolean isIntrinsicCandidate();
/**
* Allocates a compile id for this method by asking the VM for one.
*

View File

@ -693,6 +693,10 @@ final class HotSpotResolvedJavaMethodImpl extends HotSpotMethod implements HotSp
return UNSAFE.getChar(metaspaceMethod + config.methodIntrinsicIdOffset);
}
public boolean isIntrinsicCandidate() {
return (getFlags() & config().methodFlagsIntrinsicCandidate) != 0;
}
@Override
public JavaConstant invoke(JavaConstant receiver, JavaConstant[] arguments) {
assert !isConstructor();

View File

@ -144,6 +144,7 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess {
final int methodFlagsCallerSensitive = getConstant("Method::_caller_sensitive", Integer.class);
final int methodFlagsForceInline = getConstant("Method::_force_inline", Integer.class);
final int methodFlagsIntrinsicCandidate = getConstant("Method::_intrinsic_candidate", Integer.class);
final int methodFlagsDontInline = getConstant("Method::_dont_inline", Integer.class);
final int methodFlagsReservedStackAccess = getConstant("Method::_reserved_stack_access", Integer.class);
final int nonvirtualVtableIndex = getConstant("Method::nonvirtual_vtable_index", Integer.class);

View File

@ -29,6 +29,13 @@ import jdk.vm.ci.common.JVMCIError;
*/
public class HotSpotVMConfigAccess {
/**
* Gets the available configuration data.
*/
public HotSpotVMConfigStore getStore() {
return store;
}
/**
* Gets the address of a C++ symbol.
*

View File

@ -24,8 +24,10 @@ package jdk.vm.ci.hotspot;
import static jdk.vm.ci.common.InitTimer.timer;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import jdk.vm.ci.common.InitTimer;
@ -80,11 +82,19 @@ public final class HotSpotVMConfigStore {
return Collections.unmodifiableMap(vmFields);
}
/**
* Gets the VM intrinsic descriptions exposed by this object.
*/
public List<VMIntrinsicMethod> getIntrinsics() {
return Collections.unmodifiableList(vmIntrinsics);
}
final HashMap<String, VMField> vmFields;
final HashMap<String, Long> vmTypeSizes;
final HashMap<String, Long> vmConstants;
final HashMap<String, Long> vmAddresses;
final HashMap<String, VMFlag> vmFlags;
final List<VMIntrinsicMethod> vmIntrinsics;
/**
* Reads the database of VM info. The return value encodes the info in a nested object array
@ -97,6 +107,7 @@ public final class HotSpotVMConfigStore {
* [String name, Long value, ...] vmConstants,
* [String name, Long value, ...] vmAddresses,
* VMFlag[] vmFlags
* VMIntrinsicMethod[] vmIntrinsics
* ]
* </pre>
*/
@ -106,7 +117,7 @@ public final class HotSpotVMConfigStore {
try (InitTimer t = timer("CompilerToVm readConfiguration")) {
data = compilerToVm.readConfiguration();
}
assert data.length == 5 : data.length;
assert data.length == 6 : data.length;
// @formatter:off
VMField[] vmFieldsInfo = (VMField[]) data[0];
@ -115,11 +126,12 @@ public final class HotSpotVMConfigStore {
Object[] vmAddressesInfo = (Object[]) data[3];
VMFlag[] vmFlagsInfo = (VMFlag[]) data[4];
vmFields = new HashMap<>(vmFieldsInfo.length);
vmTypeSizes = new HashMap<>(vmTypesSizesInfo.length);
vmConstants = new HashMap<>(vmConstantsInfo.length);
vmAddresses = new HashMap<>(vmAddressesInfo.length);
vmFlags = new HashMap<>(vmFlagsInfo.length);
vmFields = new HashMap<>(vmFieldsInfo.length);
vmTypeSizes = new HashMap<>(vmTypesSizesInfo.length);
vmConstants = new HashMap<>(vmConstantsInfo.length);
vmAddresses = new HashMap<>(vmAddressesInfo.length);
vmFlags = new HashMap<>(vmFlagsInfo.length);
vmIntrinsics = Arrays.asList((VMIntrinsicMethod[]) data[5]);
// @formatter:on
try (InitTimer t = timer("HotSpotVMConfigStore<init> fill maps")) {

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import jdk.vm.ci.meta.Signature;
/**
* Describes a method for which the VM has an intrinsic implementation.
*/
public final class VMIntrinsicMethod {
/**
* The name of the class declaring the intrinsified method. The name is in
* <a href="https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.2.1">class
* file format</a> (e.g., {@code "java/lang/Thread"} instead of {@code "java.lang.Thread"}).
*/
public final String declaringClass;
/**
* The name of the intrinsified method. This is not guaranteed to be a legal method name (e.g.,
* there is a HotSpot intrinsic with the name {@code "<compiledLambdaForm>"}).
*/
public final String name;
/**
* The {@link Signature#toMethodDescriptor() descriptor} of the intrinsified method. This is not
* guaranteed to be a legal method descriptor (e.g., intrinsics for signature polymorphic
* methods have a descriptor of {@code "*"}).
*/
public final String descriptor;
/**
* The unique VM identifier for the intrinsic.
*/
public final int id;
VMIntrinsicMethod(String declaringClass, String name, String descriptor, int id) {
this.declaringClass = declaringClass;
this.name = name;
this.descriptor = descriptor;
this.id = id;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof VMIntrinsicMethod) {
VMIntrinsicMethod that = (VMIntrinsicMethod) obj;
if (that.id == this.id) {
assert that.name.equals(this.name) &&
that.declaringClass.equals(this.declaringClass) &&
that.descriptor.equals(this.descriptor);
return true;
}
}
return false;
}
@Override
public int hashCode() {
return id;
}
@Override
public String toString() {
return String.format("IntrinsicMethod[declaringClass=%s, name=%s, descriptor=%s, id=%d]", declaringClass, name, descriptor, id);
}
}

View File

@ -23,7 +23,6 @@
package jdk.vm.ci.meta;
import java.lang.annotation.Annotation;
import java.lang.invoke.MethodHandle;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Array;
import java.lang.reflect.Method;
@ -330,22 +329,4 @@ public interface ResolvedJavaMethod extends JavaMethod, InvokeTarget, ModifiersP
}
SpeculationLog getSpeculationLog();
/**
* Determines if the method identified by its holder and name is a
* <a href="https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.9">signature
* polymorphic</a> method.
*/
static boolean isSignaturePolymorphic(JavaType holder, String name, MetaAccessProvider metaAccess) {
if (!holder.getName().equals("Ljava/lang/invoke/MethodHandle;")) {
return false;
}
ResolvedJavaType methodHandleType = metaAccess.lookupJavaType(MethodHandle.class);
Signature signature = metaAccess.parseMethodDescriptor("([Ljava/lang/Object;)Ljava/lang/Object;");
ResolvedJavaMethod method = methodHandleType.findMethod(name, signature);
if (method == null) {
return false;
}
return method.isNative() && method.isVarArgs();
}
}

View File

@ -2875,7 +2875,7 @@ void os::Linux::rebuild_cpu_to_node_map() {
// in the library.
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
size_t cpu_num = os::active_processor_count();
size_t cpu_num = processor_count();
size_t cpu_map_size = NCPUS / BitsPerCLong;
size_t cpu_map_valid_size =
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);

View File

@ -2504,13 +2504,15 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// It write enables the page immediately after protecting it
// so just return.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
JavaThread* thread = (JavaThread*) t;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (os::is_memory_serialize_page(thread, addr)) {
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return EXCEPTION_CONTINUE_EXECUTION;
if (t != NULL && t->is_Java_thread()) {
JavaThread* thread = (JavaThread*) t;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (os::is_memory_serialize_page(thread, addr)) {
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return EXCEPTION_CONTINUE_EXECUTION;
}
}
}
@ -2564,7 +2566,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
}
#endif
if (thread->stack_guards_enabled()) {
if (_thread_in_Java) {
if (in_java) {
frame fr;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
@ -2576,6 +2578,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Yellow zone violation. The o/s has unprotected the first yellow
// zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page.
assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
thread->disable_stack_yellow_reserved_zone();
// If not in java code, return and hope for the best.
return in_java
@ -3793,6 +3796,11 @@ void os::win32::initialize_system_info() {
GlobalMemoryStatusEx(&ms);
_physical_memory = ms.ullTotalPhys;
if (FLAG_IS_DEFAULT(MaxRAM)) {
// Adjust MaxRAM according to the maximum virtual address space available.
FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
}
OSVERSIONINFOEX oi;
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
GetVersionEx((OSVERSIONINFO*)&oi);

View File

@ -444,7 +444,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (thread->thread_state() == _thread_in_vm) {
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
if (sig == SIGBUS && thread->doing_unsafe_access()) {
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
}

View File

@ -221,6 +221,9 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_putCharStringU:
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT)
case vmIntrinsics::_getClassId:
#endif
#endif
break;
default:

View File

@ -683,6 +683,7 @@ GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
, _cleanup_block(NULL)
, _cleanup_return_prev(NULL)
, _cleanup_state(NULL)
, _ignore_return(false)
{
if (parent != NULL) {
_max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
@ -1445,7 +1446,7 @@ void GraphBuilder::call_register_finalizer() {
}
void GraphBuilder::method_return(Value x) {
void GraphBuilder::method_return(Value x, bool ignore_return) {
if (RegisterFinalizersAtInit &&
method()->intrinsic_id() == vmIntrinsics::_Object_init) {
call_register_finalizer();
@ -1518,7 +1519,9 @@ void GraphBuilder::method_return(Value x) {
int invoke_bci = state()->caller_state()->bci();
set_state(state()->caller_state()->copy_for_parsing());
if (x != NULL) {
state()->push(x->type(), x);
if (!ignore_return) {
state()->push(x->type(), x);
}
if (profile_return() && x->type()->is_object_kind()) {
ciMethod* caller = state()->scope()->method();
ciMethodData* md = caller->method_data_or_null();
@ -1563,6 +1566,7 @@ void GraphBuilder::method_return(Value x) {
append(new MemBar(lir_membar_storestore));
}
assert(!ignore_return, "Ignoring return value works only for inlining");
append(new Return(x));
}
@ -1981,7 +1985,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokedynamic) {
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
// static binding => check if callee is ok
bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), false, code, better_receiver);
CHECK_BAILOUT();
clear_inline_bailout();
@ -2611,6 +2615,8 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
push_exception = true;
}
bool ignore_return = scope_data()->ignore_return();
while (!bailed_out() && last()->as_BlockEnd() == NULL &&
(code = stream()->next()) != ciBytecodeStream::EOBC() &&
(block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
@ -2806,12 +2812,12 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_ret : ret(s.get_index()); break;
case Bytecodes::_tableswitch : table_switch(); break;
case Bytecodes::_lookupswitch : lookup_switch(); break;
case Bytecodes::_ireturn : method_return(ipop()); break;
case Bytecodes::_lreturn : method_return(lpop()); break;
case Bytecodes::_freturn : method_return(fpop()); break;
case Bytecodes::_dreturn : method_return(dpop()); break;
case Bytecodes::_areturn : method_return(apop()); break;
case Bytecodes::_return : method_return(NULL ); break;
case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break;
case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break;
case Bytecodes::_freturn : method_return(fpop(), ignore_return); break;
case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break;
case Bytecodes::_areturn : method_return(apop(), ignore_return); break;
case Bytecodes::_return : method_return(NULL , ignore_return); break;
case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through
@ -3336,7 +3342,7 @@ int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
}
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) {
const char* msg = NULL;
// clear out any existing inline bailout condition
@ -3351,7 +3357,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
// method handle invokes
if (callee->is_method_handle_intrinsic()) {
if (try_method_handle_inline(callee)) {
if (try_method_handle_inline(callee, ignore_return)) {
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
}
@ -3363,7 +3369,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
// handle intrinsics
if (callee->intrinsic_id() != vmIntrinsics::_none &&
(CheckIntrinsics ? callee->intrinsic_candidate() : true)) {
if (try_inline_intrinsics(callee)) {
if (try_inline_intrinsics(callee, ignore_return)) {
print_inlining(callee, "intrinsic");
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
@ -3384,7 +3390,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
if (bc == Bytecodes::_illegal) {
bc = code();
}
if (try_inline_full(callee, holder_known, bc, receiver)) {
if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) {
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
}
@ -3415,7 +3421,7 @@ const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
return NULL;
}
void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee) {
void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) {
vmIntrinsics::ID id = callee->intrinsic_id();
assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
@ -3509,14 +3515,16 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee) {
vmIntrinsics::can_trap(id));
// append instruction & push result
Value value = append_split(result);
if (result_type != voidType) push(result_type, value);
if (result_type != voidType && !ignore_return) {
push(result_type, value);
}
if (callee != method() && profile_return() && result_type->is_object_kind()) {
profile_return_type(result, callee);
}
}
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) {
// For calling is_intrinsic_available we need to transition to
// the '_thread_in_vm' state because is_intrinsic_available()
// accesses critical VM-internal data.
@ -3536,7 +3544,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
return false;
}
}
build_graph_for_intrinsic(callee);
build_graph_for_intrinsic(callee, ignore_return);
return true;
}
@ -3691,7 +3699,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
}
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) {
assert(!callee->is_native(), "callee must not be native");
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("inlining prohibited by policy");
@ -3889,6 +3897,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
// Clear out bytecode stream
scope_data()->set_stream(NULL);
scope_data()->set_ignore_return(ignore_return);
CompileLog* log = compilation()->log();
if (log != NULL) log->head("parse method='%d'", log->identify(callee));
@ -3958,7 +3967,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
}
bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) {
ValueStack* state_before = copy_state_before();
vmIntrinsics::ID iid = callee->intrinsic_id();
switch (iid) {
@ -3972,7 +3981,8 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
// We don't do CHA here so only inline static and statically bindable methods.
if (target->is_static() || target->can_be_statically_bound()) {
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
if (try_inline(target, /*holder_known*/ true, bc)) {
ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void());
if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) {
return true;
}
} else {
@ -3994,10 +4004,11 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueType* type = apop()->type();
if (type->is_constant()) {
ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void());
// If the target is another method handle invoke, try to recursively get
// a better target.
if (target->is_method_handle_intrinsic()) {
if (try_method_handle_inline(target)) {
if (try_method_handle_inline(target, ignore_return)) {
return true;
}
} else {
@ -4032,7 +4043,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
// We don't do CHA here so only inline static and statically bindable methods.
if (target->is_static() || target->can_be_statically_bound()) {
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
if (try_inline(target, /*holder_known*/ true, bc)) {
if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) {
return true;
}
} else {

View File

@ -100,6 +100,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
Instruction* _cleanup_return_prev; // Instruction before return instruction
ValueStack* _cleanup_state; // State of that block (not yet pinned)
// When inlining do not push the result on the stack
bool _ignore_return;
public:
ScopeData(ScopeData* parent);
@ -163,6 +166,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
BlockBegin* inline_cleanup_block() const { return _cleanup_block; }
Instruction* inline_cleanup_return_prev() const{ return _cleanup_return_prev; }
ValueStack* inline_cleanup_state() const { return _cleanup_state; }
bool ignore_return() const { return _ignore_return; }
void set_ignore_return(bool ignore_return) { _ignore_return = ignore_return; }
};
// for all GraphBuilders
@ -246,7 +252,7 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void ret(int local_index);
void table_switch();
void lookup_switch();
void method_return(Value x);
void method_return(Value x, bool ignore_return = false);
void call_register_finalizer();
void access_field(Bytecodes::Code code);
void invoke(Bytecodes::Code code);
@ -340,19 +346,19 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void inline_sync_entry(Value lock, BlockBegin* sync_handler);
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
void build_graph_for_intrinsic(ciMethod* callee);
void build_graph_for_intrinsic(ciMethod* callee, bool ignore_return);
// inliners
bool try_inline( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
bool try_inline_intrinsics(ciMethod* callee);
bool try_inline_full( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
bool try_inline( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
bool try_inline_intrinsics(ciMethod* callee, bool ignore_return = false);
bool try_inline_full( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
bool try_inline_jsr(int jsr_dest_bci);
const char* check_can_parse(ciMethod* callee) const;
const char* should_not_inline(ciMethod* callee) const;
// JSR 292 support
bool try_method_handle_inline(ciMethod* callee);
bool try_method_handle_inline(ciMethod* callee, bool ignore_return);
// helpers
void inline_bailout(const char* msg);

View File

@ -3083,6 +3083,37 @@ void LIRGenerator::do_IfOp(IfOp* x) {
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
}
#ifdef TRACE_HAVE_INTRINSICS
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
CodeEmitInfo* info = state_for(x);
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
assert(info != NULL, "must have info");
LIRItem arg(x->argument_at(0), this);
arg.load_item();
LIR_Opr klass = new_register(T_METADATA);
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
LIR_Opr id = new_register(T_LONG);
ByteSize offset = TRACE_KLASS_TRACE_ID_OFFSET;
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
__ move(trace_id_addr, id);
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
__ store(id, trace_id_addr);
#ifdef TRACE_ID_META_BITS
__ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
#endif
#ifdef TRACE_ID_CLASS_SHIFT
__ unsigned_shift_right(id, TRACE_ID_CLASS_SHIFT, id);
#endif
__ move(id, rlock_result(x));
}
#endif
void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type");
// Enforce computation of _reserved_argument_area_size which is required on some platforms.
@ -3108,6 +3139,9 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
}
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_getClassId:
do_ClassIDIntrinsic(x);
break;
case vmIntrinsics::_counterTime:
do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x);
break;

View File

@ -438,6 +438,10 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
#ifdef TRACE_HAVE_INTRINSICS
void do_ClassIDIntrinsic(Intrinsic* x);
#endif
void do_RuntimeCall(address routine, Intrinsic* x);
ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,

View File

@ -29,7 +29,6 @@
#include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -577,9 +576,7 @@ class CompileReplay : public StackObj {
Method* method = parse_method(CHECK);
if (had_error()) return;
/* just copied from Method, to build interpret data*/
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
// To be properly initialized, some profiling in the MDO needs the
// method to be rewritten (number of arguments at a call for
// instance)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -205,103 +205,3 @@ juint AltHashing::murmur3_32(juint seed, const int* data, int len) {
juint AltHashing::murmur3_32(const int* data, int len) {
return murmur3_32(0, data, len);
}
#ifndef PRODUCT
// Overloaded versions for internal test.
juint AltHashing::murmur3_32(const jbyte* data, int len) {
return murmur3_32(0, data, len);
}
juint AltHashing::murmur3_32(const jchar* data, int len) {
return murmur3_32(0, data, len);
}
// Internal test for alternate hashing. Translated from JDK version
// test/sun/misc/Hashing.java
static const jbyte ONE_BYTE[] = { (jbyte) 0x80};
static const jbyte TWO_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81};
static const jchar ONE_CHAR[] = { (jchar) 0x8180};
static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
static const jint ONE_INT[] = { (jint)0x83828180};
static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
static const jbyte EIGHT_BYTE[] = {
(jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82,
(jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85,
(jbyte) 0x86, (jbyte) 0x87};
static const jchar FOUR_CHAR[] = {
(jchar) 0x8180, (jchar) 0x8382,
(jchar) 0x8584, (jchar) 0x8786};
static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
void AltHashing::testMurmur3_32_ByteArray() {
// printf("testMurmur3_32_ByteArray\n");
jbyte vector[256];
jbyte hashes[4 * 256];
for (int i = 0; i < 256; i++) {
vector[i] = (jbyte) i;
}
// Hash subranges {}, {0}, {0,1}, {0,1,2}, ..., {0,...,255}
for (int i = 0; i < 256; i++) {
juint hash = murmur3_32(256 - i, vector, i);
hashes[i * 4] = (jbyte) hash;
hashes[i * 4 + 1] = (jbyte)(hash >> 8);
hashes[i * 4 + 2] = (jbyte)(hash >> 16);
hashes[i * 4 + 3] = (jbyte)(hash >> 24);
}
// hash to get const result.
juint final_hash = murmur3_32(hashes, 4*256);
assert (MURMUR3_32_X86_CHECK_VALUE == final_hash,
"Calculated hash result not as expected. Expected %08X got %08X\n",
MURMUR3_32_X86_CHECK_VALUE,
final_hash);
}
void AltHashing::testEquivalentHashes() {
juint jbytes, jchars, ints;
// printf("testEquivalentHashes\n");
jbytes = murmur3_32(TWO_BYTE, 2);
jchars = murmur3_32(ONE_CHAR, 1);
assert (jbytes == jchars,
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
jbytes = murmur3_32(FOUR_BYTE, 4);
jchars = murmur3_32(TWO_CHAR, 2);
ints = murmur3_32(ONE_INT, 1);
assert ((jbytes == jchars) && (jbytes == ints),
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
jbytes = murmur3_32(SIX_BYTE, 6);
jchars = murmur3_32(THREE_CHAR, 3);
assert (jbytes == jchars,
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
jbytes = murmur3_32(EIGHT_BYTE, 8);
jchars = murmur3_32(FOUR_CHAR, 4);
ints = murmur3_32(TWO_INT, 2);
assert ((jbytes == jchars) && (jbytes == ints),
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
}
// Returns true if the alternate hashcode is correct
void AltHashing::test_alt_hash() {
testMurmur3_32_ByteArray();
testEquivalentHashes();
}
void AltHashing_test() {
AltHashing::test_alt_hash();
}
#endif // PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,26 +37,18 @@
*/
class AltHashing : AllStatic {
friend class AltHashingTest;
// utility function copied from java/lang/Integer
static juint Integer_rotateLeft(juint i, int distance) {
return (i << distance) | (i >> (32-distance));
return (i << distance) | (i >> (32 - distance));
}
static juint murmur3_32(const int* data, int len);
static juint murmur3_32(juint seed, const int* data, int len);
#ifndef PRODUCT
// Hashing functions used for internal testing
static juint murmur3_32(const jbyte* data, int len);
static juint murmur3_32(const jchar* data, int len);
static void testMurmur3_32_ByteArray();
static void testEquivalentHashes();
#endif // PRODUCT
public:
static juint compute_seed();
static juint murmur3_32(juint seed, const jbyte* data, int len);
static juint murmur3_32(juint seed, const jchar* data, int len);
NOT_PRODUCT(static void test_alt_hash();)
};
#endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP

View File

@ -95,7 +95,6 @@
#define JAVA_6_VERSION 50
// Used for backward compatibility reasons:
// - to check NameAndType_info signatures more aggressively
// - to disallow argument and require ACC_STATIC for <clinit> methods
#define JAVA_7_VERSION 51
@ -564,7 +563,7 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
break;
}
case JVM_CONSTANT_NameAndType: {
if (_need_verify && _major_version >= JAVA_7_VERSION) {
if (_need_verify) {
const int sig_index = cp->signature_ref_index_at(index);
const int name_index = cp->name_ref_index_at(index);
const Symbol* const name = cp->symbol_at(name_index);
@ -572,9 +571,17 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
guarantee_property(sig->utf8_length() != 0,
"Illegal zero length constant pool entry at %d in class %s",
sig_index, CHECK);
guarantee_property(name->utf8_length() != 0,
"Illegal zero length constant pool entry at %d in class %s",
name_index, CHECK);
if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
// Format check method name and signature
verify_legal_method_name(name, CHECK);
verify_legal_method_signature(name, sig, CHECK);
} else {
// Format check field name and signature
verify_legal_field_name(name, CHECK);
verify_legal_field_signature(name, sig, CHECK);
}
}
@ -595,42 +602,32 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
const Symbol* const name = cp->symbol_at(name_ref_index);
const Symbol* const signature = cp->symbol_at(signature_ref_index);
if (tag == JVM_CONSTANT_Fieldref) {
verify_legal_field_name(name, CHECK);
if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's non-zero length and the right type.
if (_need_verify) {
// Field name and signature are verified above, when iterating NameAndType_info.
// Need only to be sure signature is non-zero length and the right type.
if (signature->utf8_length() == 0 ||
signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
throwIllegalSignature(
"Field", name, signature, CHECK);
throwIllegalSignature("Field", name, signature, CHECK);
}
} else {
verify_legal_field_signature(name, signature, CHECK);
}
} else {
verify_legal_method_name(name, CHECK);
if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's non-zero length and the right type.
if (_need_verify) {
// Method name and signature are verified above, when iterating NameAndType_info.
// Need only to be sure signature is non-zero length and the right type.
if (signature->utf8_length() == 0 ||
signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
throwIllegalSignature(
"Method", name, signature, CHECK);
throwIllegalSignature("Method", name, signature, CHECK);
}
} else {
verify_legal_method_signature(name, signature, CHECK);
}
if (tag == JVM_CONSTANT_Methodref) {
// 4509014: If a class method name begins with '<', it must be "<init>".
assert(name != NULL, "method name in constant pool is null");
const unsigned int name_len = name->utf8_length();
if (name_len != 0 && name->byte_at(0) == '<') {
if (name != vmSymbols::object_initializer_name()) {
classfile_parse_error(
"Bad method name at constant pool index %u in class file %s",
name_ref_index, CHECK);
}
}
// 4509014: If a class method name begins with '<', it must be "<init>"
const unsigned int name_len = name->utf8_length();
if (tag == JVM_CONSTANT_Methodref &&
name_len != 0 &&
name->byte_at(0) == '<' &&
name != vmSymbols::object_initializer_name()) {
classfile_parse_error(
"Bad method name at constant pool index %u in class file %s",
name_ref_index, CHECK);
}
}
break;
@ -4843,19 +4840,28 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
}
}
else {
// 4900761: For class version > 48, any unicode is allowed in class name.
// Skip leading 'L' and ignore first appearance of ';'
length--;
signature++;
while (length > 0 && signature[0] != ';') {
if (signature[0] == '.') {
classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
}
length--;
signature++;
}
if (signature[0] == ';') { return signature + 1; }
}
char* c = strchr((char*) signature, ';');
// Format check signature
if (c != NULL) {
ResourceMark rm(THREAD);
int newlen = c - (char*) signature;
char* sig = NEW_RESOURCE_ARRAY(char, newlen + 1);
strncpy(sig, signature, newlen);
sig[newlen] = '\0';
bool legal = verify_unqualified_name(sig, newlen, LegalClass);
if (!legal) {
classfile_parse_error("Class name contains illegal character "
"in descriptor in class file %s",
CHECK_0);
return NULL;
}
return signature + newlen + 1;
}
}
return NULL;
}
case JVM_SIGNATURE_ARRAY:
@ -4869,7 +4875,6 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
length--;
void_ok = false;
break;
default:
return NULL;
}
@ -5402,11 +5407,75 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
debug_only(ik->verify();)
}
// For an anonymous class that is in the unnamed package, move it to its host class's
// package by prepending its host class's package name to its class name and setting
// its _class_name field.
void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
ResourceMark rm(THREAD);
assert(strrchr(_class_name->as_C_string(), '/') == NULL,
"Anonymous class should not be in a package");
const char* host_pkg_name =
ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
if (host_pkg_name != NULL) {
size_t host_pkg_len = strlen(host_pkg_name);
int class_name_len = _class_name->utf8_length();
char* new_anon_name =
NEW_RESOURCE_ARRAY(char, host_pkg_len + 1 + class_name_len);
// Copy host package name and trailing /.
strncpy(new_anon_name, host_pkg_name, host_pkg_len);
new_anon_name[host_pkg_len] = '/';
// Append anonymous class name. The anonymous class name can contain odd
// characters. So, do a strncpy instead of using sprintf("%s...").
strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
// Create a symbol and update the anonymous class name.
_class_name = SymbolTable::new_symbol(new_anon_name,
(int)host_pkg_len + 1 + class_name_len,
CHECK);
}
}
// If the host class and the anonymous class are in the same package then do
// nothing. If the anonymous class is in the unnamed package then move it to its
// host's package. If the classes are in different packages then throw an IAE
// exception.
void ClassFileParser::fix_anonymous_class_name(TRAPS) {
assert(_host_klass != NULL, "Expected an anonymous class");
const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
_class_name->utf8_length(), '/');
if (anon_last_slash == NULL) { // Unnamed package
prepend_host_package_name(_host_klass, CHECK);
} else {
if (!InstanceKlass::is_same_class_package(_host_klass->class_loader(),
_host_klass->name(),
_host_klass->class_loader(),
_class_name)) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("Host class %s and anonymous class %s are in different packages",
_host_klass->name()->as_C_string(), _class_name->as_C_string()));
}
}
}
static bool relax_format_check_for(ClassLoaderData* loader_data) {
bool trusted = (loader_data->is_the_null_class_loader_data() ||
SystemDictionary::is_platform_class_loader(loader_data->class_loader()));
bool need_verify =
// verifyAll
(BytecodeVerificationLocal && BytecodeVerificationRemote) ||
// verifyRemote
(!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted);
return !need_verify;
}
ClassFileParser::ClassFileParser(ClassFileStream* stream,
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
TRAPS) :
@ -5490,7 +5559,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
// Check if verification needs to be relaxed for this class file
// Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
_relax_verify = Verifier::relax_verify_for(_loader_data->class_loader());
_relax_verify = relax_format_check_for(_loader_data);
parse_stream(stream, CHECK);
@ -5681,6 +5750,13 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
return;
}
// if this is an anonymous class fix up its name if it's in the unnamed
// package. Otherwise, throw IAE if it is in a different package than
// its host class.
if (_host_klass != NULL) {
fix_anonymous_class_name(CHECK);
}
// Verification prevents us from creating names with dots in them, this
// asserts that that's the case.
assert(is_internal_format(_class_name), "external class name format used internally");

View File

@ -79,7 +79,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
const Symbol* _requested_name;
Symbol* _class_name;
mutable ClassLoaderData* _loader_data;
const Klass* _host_klass;
const InstanceKlass* _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
// Metadata created before the instance klass is created. Must be deallocated
@ -155,6 +155,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
ConstantPool* cp,
TRAPS);
void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
void fix_anonymous_class_name(TRAPS);
void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
void set_klass(InstanceKlass* instance);
@ -474,7 +477,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
TRAPS);
@ -500,7 +503,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
bool is_anonymous() const { return _host_klass != NULL; }
bool is_interface() const { return _access_flags.is_interface(); }
const Klass* host_klass() const { return _host_klass; }
const InstanceKlass* host_klass() const { return _host_klass; }
const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
ClassLoaderData* loader_data() const { return _loader_data; }
const Symbol* class_name() const { return _class_name; }

View File

@ -50,12 +50,14 @@ class ClassFileStream;
class ClassPathEntry : public CHeapObj<mtClass> {
private:
ClassPathEntry* _next;
ClassPathEntry* volatile _next;
public:
// Next entry in class path
ClassPathEntry* next() const { return _next; }
ClassPathEntry* next() const {
return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
}
void set_next(ClassPathEntry* next) {
// may have unlocked readers, so write atomically.
// may have unlocked readers, so ensure visibility.
OrderAccess::release_store_ptr(&_next, next);
}
virtual bool is_jrt() = 0;

View File

@ -966,7 +966,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
// Klasses to delete.
bool walk_all_metadata = clean_previous_versions &&
JvmtiExport::has_redefined_a_class() &&
InstanceKlass::has_previous_versions();
InstanceKlass::has_previous_versions_and_reset();
MetadataOnStackMark md_on_stack(walk_all_metadata);
// Save previous _unloading pointer for CMS which may add to unloading list before

View File

@ -3015,41 +3015,6 @@ void java_lang_boxing_object::print(BasicType type, jvalue* value, outputStream*
}
}
// Support for java_lang_ref_Reference
HeapWord *java_lang_ref_Reference::pending_list_lock_addr() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_lock_offset);
return (HeapWord*) addr;
}
oop java_lang_ref_Reference::pending_list_lock() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_lock_offset);
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
} else {
return oopDesc::load_decode_heap_oop((oop*)addr);
}
}
HeapWord *java_lang_ref_Reference::pending_list_addr() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_pending_offset);
// XXX This might not be HeapWord aligned, almost rather be char *.
return (HeapWord*)addr;
}
oop java_lang_ref_Reference::pending_list() {
char *addr = (char *)pending_list_addr();
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
} else {
return oopDesc::load_decode_heap_oop((oop*)addr);
}
}
// Support for java_lang_ref_SoftReference
jlong java_lang_ref_SoftReference::timestamp(oop ref) {
@ -3616,8 +3581,6 @@ int java_lang_ref_Reference::referent_offset;
int java_lang_ref_Reference::queue_offset;
int java_lang_ref_Reference::next_offset;
int java_lang_ref_Reference::discovered_offset;
int java_lang_ref_Reference::static_lock_offset;
int java_lang_ref_Reference::static_pending_offset;
int java_lang_ref_Reference::number_of_fake_oop_fields;
int java_lang_ref_SoftReference::timestamp_offset;
int java_lang_ref_SoftReference::static_clock_offset;
@ -3772,8 +3735,6 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header;
java_lang_ref_Reference::next_offset = java_lang_ref_Reference::hc_next_offset * x + header;
java_lang_ref_Reference::discovered_offset = java_lang_ref_Reference::hc_discovered_offset * x + header;
java_lang_ref_Reference::static_lock_offset = java_lang_ref_Reference::hc_static_lock_offset * x;
java_lang_ref_Reference::static_pending_offset = java_lang_ref_Reference::hc_static_pending_offset * x;
// Artificial fields for java_lang_ref_Reference
// The first field is for the discovered field added in 1.4
java_lang_ref_Reference::number_of_fake_oop_fields = 1;
@ -4006,8 +3967,6 @@ void JavaClasses::check_offsets() {
CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;");
// Fake field
//CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;");
CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, lock, "Ljava/lang/ref/Reference$Lock;");
CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, pending, "Ljava/lang/ref/Reference;");
// java.lang.ref.SoftReference

View File

@ -886,17 +886,11 @@ class java_lang_ref_Reference: AllStatic {
hc_next_offset = 2,
hc_discovered_offset = 3 // Is not last, see SoftRefs.
};
enum {
hc_static_lock_offset = 0,
hc_static_pending_offset = 1
};
static int referent_offset;
static int queue_offset;
static int next_offset;
static int discovered_offset;
static int static_lock_offset;
static int static_pending_offset;
static int number_of_fake_oop_fields;
// Accessors
@ -912,13 +906,6 @@ class java_lang_ref_Reference: AllStatic {
static inline void set_discovered(oop ref, oop value);
static inline void set_discovered_raw(oop ref, oop value);
static inline HeapWord* discovered_addr(oop ref);
// Accessors for statics
static oop pending_list_lock();
static oop pending_list();
static HeapWord* pending_list_lock_addr();
static HeapWord* pending_list_addr();
};

View File

@ -94,7 +94,7 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
TRAPS) {

View File

@ -72,7 +72,7 @@ class KlassFactory : AllStatic {
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
TRAPS);
};

View File

@ -1027,7 +1027,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
ClassFileStream* st,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
TRAPS) {

View File

@ -299,7 +299,7 @@ public:
Handle class_loader,
Handle protection_domain,
ClassFileStream* st,
const Klass* host_klass,
const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches,
TRAPS);

View File

@ -67,12 +67,12 @@ static void* volatile _verify_byte_codes_fn = NULL;
static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
static void* verify_byte_codes_fn() {
if (_verify_byte_codes_fn == NULL) {
if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) {
void *lib_handle = os::native_java_library();
void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
if (func == NULL) {
OrderAccess::release_store(&_is_new_verify_byte_codes_fn, false);
_is_new_verify_byte_codes_fn = false;
func = os::dll_lookup(lib_handle, "VerifyClassCodes");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
}
@ -88,7 +88,7 @@ bool Verifier::should_verify_for(oop class_loader, bool should_verify_class) {
BytecodeVerificationLocal : BytecodeVerificationRemote;
}
bool Verifier::relax_verify_for(oop loader) {
bool Verifier::relax_access_for(oop loader) {
bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
bool need_verify =
// verifyAll
@ -2786,7 +2786,7 @@ void ClassVerifier::verify_invoke_instructions(
// direct interface relative to the host class
have_imr_indirect = (have_imr_indirect &&
!is_same_or_direct_interface(
InstanceKlass::cast(current_class()->host_klass()),
current_class()->host_klass(),
host_klass_type, ref_class_type));
}
if (!subtype) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,8 +58,8 @@ class Verifier : AllStatic {
// -Xverify:all/none override this value
static bool should_verify_for(oop class_loader, bool should_verify_class);
// Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
static bool relax_verify_for(oop class_loader);
// Relax certain access checks to enable some broken 1.1 apps to run on 1.2.
static bool relax_access_for(oop class_loader);
// Print output for class+resolve
static void trace_class_resolution(Klass* resolve_class, InstanceKlass* verify_class);

View File

@ -366,6 +366,7 @@ bool vmIntrinsics::can_trap(vmIntrinsics::ID id) {
switch(id) {
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
case vmIntrinsics::_getClassId:
#endif
case vmIntrinsics::_currentTimeMillis:
case vmIntrinsics::_nanoTime:

View File

@ -437,7 +437,7 @@ CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
@ -455,32 +455,41 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
cb = (CodeBlob*)heap->allocate(size);
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Save original type for error reporting
if (orig_code_blob_type == CodeBlobType::All) {
orig_code_blob_type = code_blob_type;
}
// Expansion failed
if (SegmentedCodeCache && !strict) {
if (SegmentedCodeCache) {
// Fallback solution: Try to store code in another code heap.
// NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
// Note that in the sweeper, we check the reverse_free_ratio of the code heap
// and force stack scanning if less than 10% of the code heap are free.
int type = code_blob_type;
switch (type) {
case CodeBlobType::NonNMethod:
type = CodeBlobType::MethodNonProfiled;
strict = false; // Allow recursive search for other heaps
break;
case CodeBlobType::MethodProfiled:
type = CodeBlobType::MethodNonProfiled;
strict = true;
break;
case CodeBlobType::MethodNonProfiled:
type = CodeBlobType::MethodProfiled;
strict = true;
break;
case CodeBlobType::MethodProfiled:
// Avoid loop if we already tried that code heap
if (type == orig_code_blob_type) {
type = CodeBlobType::MethodNonProfiled;
}
break;
}
if (heap_available(type)) {
return allocate(size, type, strict);
if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
if (PrintCodeCacheExtension) {
tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
heap->name(), get_code_heap(type)->name());
}
return allocate(size, type, orig_code_blob_type);
}
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(code_blob_type);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
return NULL;
}
if (PrintCodeCacheExtension) {

View File

@ -126,7 +126,7 @@ class CodeCache : AllStatic {
static void initialize();
// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type, bool strict = false); // allocates a new CodeBlob
static CodeBlob* allocate(int size, int code_blob_type, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)

View File

@ -32,7 +32,6 @@
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
@ -893,15 +892,6 @@ void CompileBroker::compile_method_base(const methodHandle& method,
return;
}
// If the requesting thread is holding the pending list lock
// then we just return. We can't risk blocking while holding
// the pending list lock or a 3-way deadlock may occur
// between the reference handler thread, a GC (instigated
// by a compiler thread), and compiled method registration.
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
if (TieredCompilation) {
// Tiered policy requires MethodCounters to exist before adding a method to
// the queue. Create if we don't have them yet.

View File

@ -3511,6 +3511,7 @@ bool CMSCollector::do_marking_mt() {
conc_workers()->active_workers(),
Threads::number_of_non_daemon_threads());
num_workers = conc_workers()->update_active_workers(num_workers);
log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();

View File

@ -28,7 +28,6 @@
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
@ -77,23 +76,6 @@ void ConcurrentMarkSweepThread::run_service() {
log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
}
{
MutexLockerEx x(CGC_lock, true);
set_CMS_flag(CMS_cms_wants_token);
assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
// Wait until the surrogate locker thread that will do
// pending list locking on our behalf has been created.
// We cannot start the SLT thread ourselves since we need
// to be a JavaThread to do so.
CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) {
CGC_lock->wait(true, 200);
loopY.tick();
}
clear_CMS_flag(CMS_cms_wants_token);
}
while (!should_terminate()) {
sleepBeforeNextCycle();
if (should_terminate()) break;

View File

@ -899,6 +899,8 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
_old_gen = gch->old_gen();
// If the next generation is too full to accommodate worst-case promotion

View File

@ -37,14 +37,6 @@
//////////////////////////////////////////////////////////
// Methods in abstract class VM_CMS_Operation
//////////////////////////////////////////////////////////
void VM_CMS_Operation::acquire_pending_list_lock() {
_pending_list_locker.lock();
}
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
_pending_list_locker.unlock();
}
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
@ -85,17 +77,10 @@ bool VM_CMS_Operation::doit_prologue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock");
if (needs_pending_list_lock()) {
acquire_pending_list_lock();
}
// Get the Heap_lock after the pending_list_lock.
Heap_lock->lock();
if (lost_race()) {
assert(_prologue_succeeded == false, "Initialized in c'tor");
Heap_lock->unlock();
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
} else {
_prologue_succeeded = true;
}
@ -108,11 +93,10 @@ void VM_CMS_Operation::doit_epilogue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock");
// Release the Heap_lock first.
Heap_lock->unlock();
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Heap_lock->unlock();
}
//////////////////////////////////////////////////////////
@ -230,9 +214,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
Thread* thr = Thread::current();
assert(thr->is_Java_thread(), "just checking");
JavaThread* jt = (JavaThread*)thr;
// Release the Heap_lock first.
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Heap_lock->unlock();
release_and_notify_pending_list_lock();
// It is fine to test whether completed collections has
// exceeded our request count without locking because

View File

@ -28,7 +28,6 @@
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "runtime/vm_operations.hpp"
@ -52,9 +51,6 @@
class CMSCollector;
class VM_CMS_Operation: public VM_Operation {
private:
ReferencePendingListLocker _pending_list_locker;
protected:
CMSCollector* _collector; // associated collector
bool _prologue_succeeded; // whether doit_prologue succeeded
@ -62,10 +58,6 @@ class VM_CMS_Operation: public VM_Operation {
bool lost_race() const;
// java.lang.ref.Reference support
void acquire_pending_list_lock();
void release_and_notify_pending_list_lock();
public:
VM_CMS_Operation(CMSCollector* collector):
_collector(collector),

View File

@ -175,7 +175,7 @@ void ConcurrentMarkThread::run_service() {
TimeHelper::counter_to_millis(mark_end - mark_start));
CMCheckpointRootsFinalClosure final_cl(_cm);
VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
VM_CGC_Operation op(&final_cl, "Pause Remark");
VMThread::execute(&op);
}
if (cm()->restart_for_overflow()) {
@ -199,7 +199,7 @@ void ConcurrentMarkThread::run_service() {
delay_to_keep_mmu(g1_policy, false /* cleanup */);
CMCleanUp cl_cl(_cm);
VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
VMThread::execute(&op);
} else {
// We don't want to update the marking status if a GC pause

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeRootSetTable.hpp"
#include "gc/g1/g1CodeCacheRemSet.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/heap.hpp"
@ -33,58 +34,13 @@
#include "utilities/hashtable.inline.hpp"
#include "utilities/stack.inline.hpp"
class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
friend class G1CodeRootSetTest;
typedef HashtableEntry<nmethod*, mtGC> Entry;
G1CodeRootSetTable* volatile G1CodeRootSetTable::_purge_list = NULL;
static CodeRootSetTable* volatile _purge_list;
CodeRootSetTable* _purge_next;
unsigned int compute_hash(nmethod* nm) {
uintptr_t hash = (uintptr_t)nm;
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
}
void remove_entry(Entry* e, Entry* previous);
Entry* new_entry(nmethod* nm);
public:
CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
~CodeRootSetTable();
// Needs to be protected locks
bool add(nmethod* nm);
bool remove(nmethod* nm);
// Can be called without locking
bool contains(nmethod* nm);
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
void copy_to(CodeRootSetTable* new_table);
void nmethods_do(CodeBlobClosure* blk);
template<typename CB>
int remove_if(CB& should_remove);
static void purge_list_append(CodeRootSetTable* tbl);
static void purge();
static size_t static_mem_size() {
return sizeof(_purge_list);
}
size_t mem_size();
};
CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
size_t CodeRootSetTable::mem_size() {
return sizeof(CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
size_t G1CodeRootSetTable::mem_size() {
return sizeof(G1CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
}
CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
unsigned int hash = compute_hash(nm);
Entry* entry = (Entry*) new_entry_free_list();
if (entry == NULL) {
@ -96,7 +52,7 @@ CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
return entry;
}
void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
int index = hash_to_index(e->hash());
assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
@ -108,7 +64,7 @@ void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
free_entry(e);
}
CodeRootSetTable::~CodeRootSetTable() {
G1CodeRootSetTable::~G1CodeRootSetTable() {
for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; ) {
Entry* to_remove = e;
@ -125,7 +81,7 @@ CodeRootSetTable::~CodeRootSetTable() {
}
}
bool CodeRootSetTable::add(nmethod* nm) {
bool G1CodeRootSetTable::add(nmethod* nm) {
if (!contains(nm)) {
Entry* e = new_entry(nm);
int index = hash_to_index(e->hash());
@ -135,7 +91,7 @@ bool CodeRootSetTable::add(nmethod* nm) {
return false;
}
bool CodeRootSetTable::contains(nmethod* nm) {
bool G1CodeRootSetTable::contains(nmethod* nm) {
int index = hash_to_index(compute_hash(nm));
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
if (e->literal() == nm) {
@ -145,7 +101,7 @@ bool CodeRootSetTable::contains(nmethod* nm) {
return false;
}
bool CodeRootSetTable::remove(nmethod* nm) {
bool G1CodeRootSetTable::remove(nmethod* nm) {
int index = hash_to_index(compute_hash(nm));
Entry* previous = NULL;
for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
@ -157,7 +113,7 @@ bool CodeRootSetTable::remove(nmethod* nm) {
return false;
}
void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
new_table->add(e->literal());
@ -166,7 +122,7 @@ void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
new_table->copy_freelist(this);
}
void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
blk->do_code_blob(e->literal());
@ -175,7 +131,7 @@ void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
}
template<typename CB>
int CodeRootSetTable::remove_if(CB& should_remove) {
int G1CodeRootSetTable::remove_if(CB& should_remove) {
int num_removed = 0;
for (int index = 0; index < table_size(); ++index) {
Entry* previous = NULL;
@ -198,52 +154,52 @@ G1CodeRootSet::~G1CodeRootSet() {
delete _table;
}
CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
}
void G1CodeRootSet::allocate_small_table() {
CodeRootSetTable* temp = new CodeRootSetTable(SmallSize);
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
OrderAccess::release_store_ptr(&_table, temp);
}
void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
for (;;) {
table->_purge_next = _purge_list;
CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
if (old == table->_purge_next) {
break;
}
}
}
void CodeRootSetTable::purge() {
CodeRootSetTable* table = _purge_list;
void G1CodeRootSetTable::purge() {
G1CodeRootSetTable* table = _purge_list;
_purge_list = NULL;
while (table != NULL) {
CodeRootSetTable* to_purge = table;
G1CodeRootSetTable* to_purge = table;
table = table->_purge_next;
delete to_purge;
}
}
void G1CodeRootSet::move_to_large() {
CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
G1CodeRootSetTable* temp = new G1CodeRootSetTable(LargeSize);
_table->copy_to(temp);
CodeRootSetTable::purge_list_append(_table);
G1CodeRootSetTable::purge_list_append(_table);
OrderAccess::release_store_ptr(&_table, temp);
}
void G1CodeRootSet::purge() {
CodeRootSetTable::purge();
G1CodeRootSetTable::purge();
}
size_t G1CodeRootSet::static_mem_size() {
return CodeRootSetTable::static_mem_size();
return G1CodeRootSetTable::static_mem_size();
}
void G1CodeRootSet::add(nmethod* method) {
@ -278,7 +234,7 @@ bool G1CodeRootSet::remove(nmethod* method) {
}
bool G1CodeRootSet::contains(nmethod* method) {
CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
G1CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
if (table != NULL) {
return table->contains(method);
}
@ -348,67 +304,3 @@ void G1CodeRootSet::clean(HeapRegion* owner) {
clear();
}
}
#ifndef PRODUCT
class G1CodeRootSetTest {
public:
static void test() {
{
G1CodeRootSet set1;
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
"The code root set's static memory usage is incorrect, " SIZE_FORMAT " bytes", G1CodeRootSet::static_mem_size());
set1.add((nmethod*)1);
assert(set1.length() == 1, "Added exactly one element, but set contains "
SIZE_FORMAT " elements", set1.length());
const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
for (size_t i = 1; i <= num_to_add; i++) {
set1.add((nmethod*)1);
}
assert(set1.length() == 1,
"Duplicate detection should not have increased the set size but "
"is " SIZE_FORMAT, set1.length());
for (size_t i = 2; i <= num_to_add; i++) {
set1.add((nmethod*)(uintptr_t)(i));
}
assert(set1.length() == num_to_add,
"After adding in total " SIZE_FORMAT " distinct code roots, they "
"need to be in the set, but there are only " SIZE_FORMAT,
num_to_add, set1.length());
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
size_t num_popped = 0;
for (size_t i = 1; i <= num_to_add; i++) {
bool removed = set1.remove((nmethod*)i);
if (removed) {
num_popped += 1;
} else {
break;
}
}
assert(num_popped == num_to_add,
"Managed to pop " SIZE_FORMAT " code roots, but only " SIZE_FORMAT " "
"were added", num_popped, num_to_add);
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
G1CodeRootSet::purge();
assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
}
}
};
void TestCodeCacheRemSet_test() {
G1CodeRootSetTest::test();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "memory/allocation.hpp"
class CodeBlobClosure;
class CodeRootSetTable;
class G1CodeRootSetTable;
class HeapRegion;
class nmethod;
@ -42,8 +42,8 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
const static size_t Threshold = 24;
const static size_t LargeSize = 512;
CodeRootSetTable* _table;
CodeRootSetTable* load_acquire_table();
G1CodeRootSetTable* _table;
G1CodeRootSetTable* load_acquire_table();
size_t _length;

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
#define SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
#include "utilities/hashtable.hpp"
class nmethod;
class G1CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
friend class G1CodeRootSetTest;
typedef HashtableEntry<nmethod*, mtGC> Entry;
static G1CodeRootSetTable* volatile _purge_list;
G1CodeRootSetTable* _purge_next;
unsigned int compute_hash(nmethod* nm) {
uintptr_t hash = (uintptr_t)nm;
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
}
void remove_entry(Entry* e, Entry* previous);
Entry* new_entry(nmethod* nm);
public:
G1CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
~G1CodeRootSetTable();
// Needs to be protected by locks
bool add(nmethod* nm);
bool remove(nmethod* nm);
// Can be called without locking
bool contains(nmethod* nm);
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
void copy_to(G1CodeRootSetTable* new_table);
void nmethods_do(CodeBlobClosure* blk);
template<typename CB>
int remove_if(CB& should_remove);
static void purge_list_append(G1CodeRootSetTable* tbl);
static void purge();
static size_t static_mem_size() {
return sizeof(_purge_list);
}
size_t mem_size();
};
#endif /* SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP */

View File

@ -1332,6 +1332,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
workers()->update_active_workers(n_workers);
log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
ParRebuildRSTask rebuild_rs_task(this);
workers()->run_task(&rebuild_rs_task);
@ -3068,6 +3069,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@ -4513,6 +4515,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
#if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::update_pointers();
#endif
g1_policy()->print_age_table();
}
void G1CollectedHeap::record_obj_copy_mem_stats() {

View File

@ -1273,12 +1273,6 @@ public:
return true;
}
// The reference pending list lock is acquired from from the
// ConcurrentMarkThread.
virtual bool needs_reference_pending_list_locker_thread() const {
return true;
}
inline bool is_in_young(const oop obj);
virtual bool is_scavengable(const void* addr);

View File

@ -1035,6 +1035,8 @@ void G1ConcurrentMark::mark_from_roots() {
// worker threads may currently exist and more may not be
// available.
active_workers = _parallel_workers->update_active_workers(active_workers);
log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
// Parallel task terminator is set in "set_concurrency_and_phase()"
set_concurrency_and_phase(active_workers, true /* concurrent */);

View File

@ -885,6 +885,15 @@ bool G1DefaultPolicy::adaptive_young_list_length() const {
return _young_gen_sizer.adaptive_young_list_length();
}
size_t G1DefaultPolicy::desired_survivor_size() const {
size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
}
void G1DefaultPolicy::print_age_table() {
_survivors_age_table.print_age_table(_tenuring_threshold);
}
void G1DefaultPolicy::update_max_gc_locker_expansion() {
uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
@ -908,8 +917,11 @@ void G1DefaultPolicy::update_survivors_policy() {
// smaller than 1.0) we'll get 1.
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
HeapRegion::GrainWords * _max_survivor_regions, _policy_counters);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
if (UsePerfData) {
_policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
_policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
}
}
bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {

View File

@ -360,6 +360,8 @@ private:
AgeTable _survivors_age_table;
protected:
size_t desired_survivor_size() const;
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
@ -379,6 +381,8 @@ public:
_survivors_age_table.merge(age_table);
}
void print_age_table();
void update_max_gc_locker_expansion();
void update_survivors_policy();

View File

@ -181,6 +181,9 @@ public:
virtual void note_stop_adding_survivor_regions() = 0;
virtual void record_age_table(AgeTable* age_table) = 0;
virtual void print_age_table() = 0;
protected:
virtual size_t desired_survivor_size() const = 0;
};
#endif // SHARE_VM_GC_G1_G1POLICY_HPP

View File

@ -1,98 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1Predictions.hpp"
#ifndef PRODUCT
void G1Predictions::test() {
double const epsilon = 1e-6;
{
// Some basic formula tests with confidence = 0.0
G1Predictions predictor(0.0);
TruncatedSeq s;
double p0 = predictor.get_new_prediction(&s);
assert(p0 < epsilon, "Initial prediction of empty sequence must be 0.0 but is %f", p0);
s.add(5.0);
double p1 = predictor.get_new_prediction(&s);
assert(fabs(p1 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
for (int i = 0; i < 40; i++) {
s.add(5.0);
}
double p2 = predictor.get_new_prediction(&s);
assert(fabs(p2 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
}
{
// The following tests checks that the initial predictions are based on the
// average of the sequence and not on the stddev (which is 0).
G1Predictions predictor(0.5);
TruncatedSeq s;
s.add(1.0);
double p1 = predictor.get_new_prediction(&s);
assert(p1 > 1.0, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
s.add(1.0);
double p2 = predictor.get_new_prediction(&s);
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
s.add(1.0);
double p3 = predictor.get_new_prediction(&s);
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
s.add(1.0);
s.add(1.0); // Five elements are now in the sequence.
double p5 = predictor.get_new_prediction(&s);
assert(p5 < p3, "Fifth prediction must be smaller than third, but they are %f %f", p3, p5);
assert(fabs(p5 - 1.0) < epsilon, "Prediction must be 1.0+epsilon, but is %f", p5);
}
{
// The following tests checks that initially prediction based on the average is
// used, that gets overridden by the stddev prediction at the end.
G1Predictions predictor(0.5);
TruncatedSeq s;
s.add(0.5);
double p1 = predictor.get_new_prediction(&s);
assert(p1 > 0.5, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
s.add(0.2);
double p2 = predictor.get_new_prediction(&s);
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
s.add(0.5);
double p3 = predictor.get_new_prediction(&s);
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
s.add(0.2);
s.add(2.0);
double p5 = predictor.get_new_prediction(&s);
assert(p5 > p3, "Fifth prediction must be bigger than third, but they are %f %f", p3, p5);
}
}
void TestPredictions_test() {
G1Predictions::test();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,10 +57,6 @@ class G1Predictions VALUE_OBJ_CLASS_SPEC {
double get_new_prediction(TruncatedSeq const* seq) const {
return seq->davg() + _sigma * stddev_estimate(seq);
}
#ifndef PRODUCT
static void test();
#endif
};
#endif // SHARE_VM_GC_G1_G1PREDICTIONS_HPP

View File

@ -204,14 +204,6 @@ void VM_G1IncCollectionPause::doit_epilogue() {
}
}
void VM_CGC_Operation::acquire_pending_list_lock() {
_pending_list_locker.lock();
}
void VM_CGC_Operation::release_and_notify_pending_list_lock() {
_pending_list_locker.unlock();
}
void VM_CGC_Operation::doit() {
GCIdMark gc_id_mark(_gc_id);
GCTraceCPUTime tcpu;
@ -222,20 +214,13 @@ void VM_CGC_Operation::doit() {
}
bool VM_CGC_Operation::doit_prologue() {
// Note the relative order of the locks must match that in
// VM_GC_Operation::doit_prologue() or deadlocks can occur
if (_needs_pending_list_lock) {
acquire_pending_list_lock();
}
Heap_lock->lock();
return true;
}
void VM_CGC_Operation::doit_epilogue() {
// Note the relative order of the unlocks must match that in
// VM_GC_Operation::doit_epilogue()
Heap_lock->unlock();
if (_needs_pending_list_lock) {
release_and_notify_pending_list_lock();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Heap_lock->unlock();
}

View File

@ -27,7 +27,6 @@
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp"
// VM_operations for the G1 collector.
@ -103,20 +102,13 @@ public:
// Concurrent GC stop-the-world operations such as remark and cleanup;
// consider sharing these with CMS's counterparts.
class VM_CGC_Operation: public VM_Operation {
VoidClosure* _cl;
const char* _printGCMessage;
bool _needs_pending_list_lock;
ReferencePendingListLocker _pending_list_locker;
uint _gc_id;
protected:
// java.lang.ref.Reference support
void acquire_pending_list_lock();
void release_and_notify_pending_list_lock();
VoidClosure* _cl;
const char* _printGCMessage;
uint _gc_id;
public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pending_list_lock)
: _cl(cl), _printGCMessage(printGCMsg), _needs_pending_list_lock(needs_pending_list_lock), _gc_id(GCId::current()) {}
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
: _cl(cl), _printGCMessage(printGCMsg), _gc_id(GCId::current()) {}
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
virtual void doit();
virtual bool doit_prologue();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1133,18 +1133,3 @@ bool PSAdaptiveSizePolicy::print() const {
return false;
}
#ifndef PRODUCT
void TestOldFreeSpaceCalculation_test() {
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 20) == 25, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 50) == 100, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 60) == 150, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 75) == 300, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 20) == 100, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 50) == 400, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 60) == 600, "Calculation of free memory failed");
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 75) == 1200, "Calculation of free memory failed");
}
#endif /* !PRODUCT */

View File

@ -564,9 +564,18 @@ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
void DefNewGeneration::adjust_desired_tenuring_threshold() {
// Set the desired survivor size to half the real survivor space
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
_tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
size_t const survivor_capacity = to()->capacity() / HeapWordSize;
size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
_tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
if (UsePerfData) {
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
}
age_table()->print_age_table(_tenuring_threshold);
}
void DefNewGeneration::collect(bool full,

View File

@ -27,7 +27,6 @@
#include "gc/shared/ageTableTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "memory/resourceArea.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
@ -75,8 +74,7 @@ void AgeTable::merge(AgeTable* subTable) {
}
}
uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
uint AgeTable::compute_tenuring_threshold(size_t desired_survivor_size) {
uint result;
if (AlwaysTenure || NeverTenure) {
@ -99,9 +97,16 @@ uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCoun
log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold " UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);
desired_survivor_size * oopSize, (uintx) result, MaxTenuringThreshold);
return result;
}
void AgeTable::print_age_table(uint tenuring_threshold) {
if (log_is_enabled(Trace, gc, age) || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) {
log_trace(gc, age)("Age table with threshold %u (max threshold " UINTX_FORMAT ")",
tenuring_threshold, MaxTenuringThreshold);
size_t total = 0;
uint age = 1;
while (age < table_size) {
@ -109,20 +114,14 @@ uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCoun
total += wordSize;
if (wordSize > 0) {
log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
age, wordSize*oopSize, total*oopSize);
age, wordSize * oopSize, total * oopSize);
}
AgeTableTracer::send_tenuring_distribution_event(age, wordSize*oopSize);
AgeTableTracer::send_tenuring_distribution_event(age, wordSize * oopSize);
if (UsePerfData) {
_perf_sizes[age]->set_value(wordSize*oopSize);
_perf_sizes[age]->set_value(wordSize * oopSize);
}
age++;
}
if (UsePerfData) {
gc_counters->tenuring_threshold()->set_value(result);
gc_counters->desired_survivor_size()->set_value(
desired_survivor_size*oopSize);
}
}
return result;
}

View File

@ -29,8 +29,6 @@
#include "oops/oop.hpp"
#include "runtime/perfData.hpp"
class GCPolicyCounters;
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */
@ -67,10 +65,12 @@ class AgeTable VALUE_OBJ_CLASS_SPEC {
// for parallel young generation gc.
void merge(AgeTable* subTable);
// calculate new tenuring threshold based on age information
uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
// Calculate new tenuring threshold based on age information.
uint compute_tenuring_threshold(size_t desired_survivor_size);
void print_age_table(uint tenuring_threshold);
private:
PerfVariable* _perf_sizes[table_size];
};

View File

@ -441,12 +441,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// remembered set.
virtual void flush_deferred_store_barrier(JavaThread* thread);
// Should return true if the reference pending list lock is
// acquired from non-Java threads, such as a concurrent GC thread.
virtual bool needs_reference_pending_list_locker_thread() const {
return false;
}
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
// "CollectedHeap" supports.

View File

@ -281,10 +281,6 @@ public:
return UseConcMarkSweepGC;
}
virtual bool needs_reference_pending_list_locker_thread() const {
return UseConcMarkSweepGC;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS

View File

@ -1,222 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/universe.hpp"
#include "runtime/javaCalls.hpp"
#include "utilities/preserveException.hpp"
ReferencePendingListLockerThread::ReferencePendingListLockerThread() :
JavaThread(&start),
_monitor(Monitor::nonleaf, "ReferencePendingListLocker", false, Monitor::_safepoint_check_sometimes),
_message(NONE) {}
ReferencePendingListLockerThread* ReferencePendingListLockerThread::create(TRAPS) {
// Create Java thread objects
instanceKlassHandle thread_klass = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL);
instanceHandle thread_object = thread_klass->allocate_instance_handle(CHECK_NULL);
Handle thread_name = java_lang_String::create_from_str("Reference Pending List Locker", CHECK_NULL);
Handle thread_group = Universe::system_thread_group();
JavaValue result(T_VOID);
JavaCalls::call_special(&result,
thread_object,
thread_klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
thread_name,
CHECK_NULL);
{
MutexLocker ml(Threads_lock);
// Allocate thread
ReferencePendingListLockerThread* thread = new ReferencePendingListLockerThread();
if (thread == NULL || thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
// Initialize thread
java_lang_Thread::set_thread(thread_object(), thread);
java_lang_Thread::set_priority(thread_object(), NearMaxPriority);
java_lang_Thread::set_daemon(thread_object());
thread->set_threadObj(thread_object());
// Start thread
Threads::add(thread);
Thread::start(thread);
return thread;
}
}
void ReferencePendingListLockerThread::start(JavaThread* thread, TRAPS) {
ReferencePendingListLockerThread* locker_thread = static_cast<ReferencePendingListLockerThread*>(thread);
locker_thread->receive_and_handle_messages();
}
bool ReferencePendingListLockerThread::is_hidden_from_external_view() const {
return true;
}
void ReferencePendingListLockerThread::send_message(Message message) {
assert(message != NONE, "Should not be none");
MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
// Wait for completion of current message
while (_message != NONE) {
ml.wait(Monitor::_no_safepoint_check_flag);
}
// Send new message
_message = message;
ml.notify_all();
// Wait for completion of new message
while (_message != NONE) {
ml.wait(Monitor::_no_safepoint_check_flag);
}
}
void ReferencePendingListLockerThread::receive_and_handle_messages() {
ReferencePendingListLocker pending_list_locker;
MonitorLockerEx ml(&_monitor);
// Main loop, never terminates
for (;;) {
// Wait for message
while (_message == NONE) {
ml.wait();
}
// Handle message
if (_message == LOCK) {
pending_list_locker.lock();
} else if (_message == UNLOCK) {
pending_list_locker.unlock();
} else {
ShouldNotReachHere();
}
// Clear message
_message = NONE;
ml.notify_all();
}
}
void ReferencePendingListLockerThread::lock() {
send_message(LOCK);
}
void ReferencePendingListLockerThread::unlock() {
send_message(UNLOCK);
}
bool ReferencePendingListLocker::_is_initialized = false;
ReferencePendingListLockerThread* ReferencePendingListLocker::_locker_thread = NULL;
void ReferencePendingListLocker::initialize(bool needs_locker_thread, TRAPS) {
if (needs_locker_thread) {
_locker_thread = ReferencePendingListLockerThread::create(CHECK);
}
_is_initialized = true;
}
bool ReferencePendingListLocker::is_initialized() {
return _is_initialized;
}
bool ReferencePendingListLocker::is_locked_by_self() {
oop pending_list_lock = java_lang_ref_Reference::pending_list_lock();
if (pending_list_lock == NULL) {
return false;
}
JavaThread* thread = JavaThread::current();
Handle handle(thread, pending_list_lock);
return ObjectSynchronizer::current_thread_holds_lock(thread, handle);
}
void ReferencePendingListLocker::lock() {
assert(!Heap_lock->owned_by_self(), "Heap_lock must not be owned by requesting thread");
if (Thread::current()->is_Java_thread()) {
assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized");
// We may enter this with a pending exception
PRESERVE_EXCEPTION_MARK;
HandleMark hm;
Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock());
// Lock
ObjectSynchronizer::fast_enter(handle, &_basic_lock, false, THREAD);
assert(is_locked_by_self(), "Locking failed");
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
} else {
// Delegate operation to locker thread
assert(_locker_thread != NULL, "Locker thread not created");
_locker_thread->lock();
}
}
void ReferencePendingListLocker::unlock() {
if (Thread::current()->is_Java_thread()) {
assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized");
// We may enter this with a pending exception
PRESERVE_EXCEPTION_MARK;
HandleMark hm;
Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock());
assert(is_locked_by_self(), "Should be locked by self");
// Notify waiters if the pending list is non-empty
if (java_lang_ref_Reference::pending_list() != NULL) {
ObjectSynchronizer::notifyall(handle, THREAD);
}
// Unlock
ObjectSynchronizer::fast_exit(handle(), &_basic_lock, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
} else {
// Delegate operation to locker thread
assert(_locker_thread != NULL, "Locker thread not created");
_locker_thread->unlock();
}
}

View File

@ -1,95 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP
#define SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP
#include "memory/allocation.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/mutex.hpp"
#include "runtime/thread.hpp"
#include "utilities/exceptions.hpp"
//
// The ReferencePendingListLockerThread locks and unlocks the reference
// pending list lock on behalf a non-Java thread, typically a concurrent
// GC thread. This interface should not be directly accessed. All uses
// should instead go through the ReferencePendingListLocker, which calls
// this thread if needed.
//
class ReferencePendingListLockerThread : public JavaThread {
private:
enum Message {
NONE,
LOCK,
UNLOCK
};
Monitor _monitor;
Message _message;
ReferencePendingListLockerThread();
static void start(JavaThread* thread, TRAPS);
void send_message(Message message);
void receive_and_handle_messages();
public:
static ReferencePendingListLockerThread* create(TRAPS);
virtual bool is_hidden_from_external_view() const;
void lock();
void unlock();
};
//
// The ReferencePendingListLocker is the main interface for locking and
// unlocking the reference pending list lock, which needs to be held by
// the GC when adding references to the pending list. Since this is a
// Java-level monitor it can only be locked/unlocked by a Java thread.
// For this reason there is an option to spawn a helper thread, the
// ReferencePendingListLockerThread, during initialization. If a helper
// thread is spawned all lock operations from non-Java threads will be
// delegated to the helper thread. The helper thread is typically needed
// by concurrent GCs.
//
class ReferencePendingListLocker VALUE_OBJ_CLASS_SPEC {
private:
static bool _is_initialized;
static ReferencePendingListLockerThread* _locker_thread;
BasicLock _basic_lock;
public:
static void initialize(bool needs_locker_thread, TRAPS);
static bool is_initialized();
static bool is_locked_by_self();
void lock();
void unlock();
};
#endif // SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP

View File

@ -289,39 +289,16 @@ void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
complete_gc->do_void();
}
template <class T>
bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
AbstractRefProcTaskExecutor* task_executor) {
// Remember old value of pending references list
T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
T old_pending_list_value = *pending_list_addr;
void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
// Enqueue references that are not made active again, and
// clear the decks for the next collection (cycle).
ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
// Do the post-barrier on pending_list_addr missed in
// enqueue_discovered_reflist.
oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
enqueue_discovered_reflists(task_executor);
// Stop treating discovered references specially.
ref->disable_discovery();
// Return true if new pending references were added
return old_pending_list_value != *pending_list_addr;
disable_discovery();
}
bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
if (UseCompressedOops) {
return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
} else {
return enqueue_discovered_ref_helper<oop>(this, task_executor);
}
}
void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
HeapWord* pending_list_addr) {
void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
// Given a list of refs linked through the "discovered" field
// (java.lang.ref.Reference.discovered), self-loop their "next" field
// thus distinguishing them from active References, then
@ -354,10 +331,9 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
} else {
// This is the last object.
// Swap refs_list into pending_list_addr and
// set obj's discovered to what we read from pending_list_addr.
oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
// Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
// Swap refs_list into pending list and set obj's
// discovered to what we read from the pending list.
oop old = Universe::swap_reference_pending_list(refs_list.head());
java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
}
@ -369,10 +345,8 @@ class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
public:
RefProcEnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList discovered_refs[],
HeapWord* pending_list_addr,
int n_queues)
: EnqueueTask(ref_processor, discovered_refs,
pending_list_addr, n_queues)
: EnqueueTask(ref_processor, discovered_refs, n_queues)
{ }
virtual void work(unsigned int work_id) {
@ -387,8 +361,7 @@ public:
for (int j = 0;
j < ReferenceProcessor::number_of_subclasses_of_ref();
j++, index += _n_queues) {
_ref_processor.enqueue_discovered_reflist(
_refs_lists[index], _pending_list_addr);
_ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
_refs_lists[index].set_head(NULL);
_refs_lists[index].set_length(0);
}
@ -396,17 +369,15 @@ public:
};
// Enqueue references that are not made active again
void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
AbstractRefProcTaskExecutor* task_executor) {
void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) {
if (_processing_is_mt && task_executor != NULL) {
// Parallel code
RefProcEnqueueTask tsk(*this, _discovered_refs,
pending_list_addr, _max_num_q);
RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q);
task_executor->execute(tsk);
} else {
// Serial code: call the parent class's implementation
for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
enqueue_discovered_reflist(_discovered_refs[i]);
_discovered_refs[i].set_head(NULL);
_discovered_refs[i].set_length(0);
}

View File

@ -290,7 +290,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
VoidClosure* complete_gc);
// Enqueue references with a certain reachability level
void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
void enqueue_discovered_reflist(DiscoveredList& refs_list);
// "Preclean" all the discovered reference lists
// by removing references with strongly reachable referents.
@ -311,7 +311,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
// occupying the i / _num_q slot.
const char* list_name(uint i);
void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor);
protected:
// "Preclean" the given discovered reference list
@ -424,7 +424,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
GCTimer *gc_timer);
// Enqueue references at end of GC (called by the garbage collector)
bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
// If a discovery is in process that is being superceded, abandon it: all
// the discovered lists will be empty, and all the objects on them will
@ -613,11 +613,9 @@ class AbstractRefProcTaskExecutor::EnqueueTask {
protected:
EnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList refs_lists[],
HeapWord* pending_list_addr,
int n_queues)
: _ref_processor(ref_processor),
_refs_lists(refs_lists),
_pending_list_addr(pending_list_addr),
_n_queues(n_queues)
{ }
@ -627,7 +625,6 @@ public:
protected:
ReferenceProcessor& _ref_processor;
DiscoveredList* _refs_lists;
HeapWord* _pending_list_addr;
int _n_queues;
};

View File

@ -293,10 +293,11 @@ inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
verify_up_to_first_dead(space);
HeapWord* const bottom = space->bottom();
HeapWord* const end_of_live = space->_end_of_live;
assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
if (space->_first_dead == end_of_live && !oop(space->bottom())->is_gc_marked()) {
if (space->_first_dead == end_of_live && (bottom == end_of_live || !oop(bottom)->is_gc_marked())) {
// Nothing to compact. The space is either empty or all live object should be left in place.
clear_empty_region(space);
return;
@ -305,8 +306,8 @@ inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
const intx scan_interval = PrefetchScanIntervalInBytes;
const intx copy_interval = PrefetchCopyIntervalInBytes;
assert(space->bottom() < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(space->bottom()), p2i(end_of_live));
HeapWord* cur_obj = space->bottom();
assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live));
HeapWord* cur_obj = bottom;
if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
// All object before _first_dead can be skipped. They should not be moved.
// A pointer to the first live object is stored at the memory location for _first_dead.

View File

@ -62,14 +62,6 @@ void VM_GC_Operation::notify_gc_end() {
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void VM_GC_Operation::acquire_pending_list_lock() {
_pending_list_locker.lock();
}
void VM_GC_Operation::release_and_notify_pending_list_lock() {
_pending_list_locker.unlock();
}
// Allocations may fail in several threads at about the same time,
// resulting in multiple gc requests. We only want to do one of them.
// In case a GC locker is active and the need for a GC is already signaled,
@ -102,16 +94,13 @@ bool VM_GC_Operation::doit_prologue() {
proper_unit_for_byte_size(NewSize)));
}
acquire_pending_list_lock();
// If the GC count has changed someone beat us to the collection
// Get the Heap_lock after the pending_list_lock.
Heap_lock->lock();
// Check invocations
if (skip_operation()) {
// skip collection
Heap_lock->unlock();
release_and_notify_pending_list_lock();
_prologue_succeeded = false;
} else {
_prologue_succeeded = true;
@ -122,9 +111,10 @@ bool VM_GC_Operation::doit_prologue() {
void VM_GC_Operation::doit_epilogue() {
assert(Thread::current()->is_Java_thread(), "just checking");
// Release the Heap_lock first.
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Heap_lock->unlock();
release_and_notify_pending_list_lock();
}
bool VM_GC_HeapInspection::skip_operation() const {

View File

@ -27,7 +27,6 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/heapInspection.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.hpp"
@ -70,9 +69,6 @@
//
class VM_GC_Operation: public VM_Operation {
private:
ReferencePendingListLocker _pending_list_locker;
protected:
uint _gc_count_before; // gc count before acquiring PLL
uint _full_gc_count_before; // full gc count before acquiring PLL
@ -83,10 +79,6 @@ class VM_GC_Operation: public VM_Operation {
virtual bool skip_operation() const;
// java.lang.ref.Reference support
void acquire_pending_list_lock();
void release_and_notify_pending_list_lock();
public:
VM_GC_Operation(uint gc_count_before,
GCCause::Cause _cause,

View File

@ -162,7 +162,7 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
_active_workers = MIN2(v, _total_workers);
add_workers(false /* exit_on_failure */);
assert(v != 0, "Trying to set active workers to 0");
log_info(gc, task)("GC Workers: using %d out of %d", _active_workers, _total_workers);
log_trace(gc, task)("%s: using %d out of %d workers", name(), _active_workers, _total_workers);
return _active_workers;
}

View File

@ -203,6 +203,40 @@ void CompilerToVM::Data::initialize() {
#undef SET_TRIGFUNC
}
objArrayHandle CompilerToVM::initialize_intrinsics(TRAPS) {
objArrayHandle vmIntrinsics = oopFactory::new_objArray(VMIntrinsicMethod::klass(), (vmIntrinsics::ID_LIMIT - 1), CHECK_(objArrayHandle()));
int index = 0;
// The intrinsics for a class are usually adjacent to each other.
// When they are, the string for the class name can be reused.
vmSymbols::SID kls_sid = vmSymbols::NO_SID;
Handle kls_str;
#define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
#define VM_SYMBOL_TO_STRING(s) \
java_lang_String::create_from_symbol(vmSymbols::symbol_at(SID_ENUM(s)), CHECK_(objArrayHandle()))
#define VM_INTRINSIC_INFO(id, kls, name, sig, ignore_fcode) { \
instanceHandle vmIntrinsicMethod = InstanceKlass::cast(VMIntrinsicMethod::klass())->allocate_instance_handle(CHECK_(objArrayHandle())); \
if (kls_sid != SID_ENUM(kls)) { \
kls_str = VM_SYMBOL_TO_STRING(kls); \
kls_sid = SID_ENUM(kls); \
} \
Handle name_str = VM_SYMBOL_TO_STRING(name); \
Handle sig_str = VM_SYMBOL_TO_STRING(sig); \
VMIntrinsicMethod::set_declaringClass(vmIntrinsicMethod, kls_str()); \
VMIntrinsicMethod::set_name(vmIntrinsicMethod, name_str()); \
VMIntrinsicMethod::set_descriptor(vmIntrinsicMethod, sig_str()); \
VMIntrinsicMethod::set_id(vmIntrinsicMethod, vmIntrinsics::id); \
vmIntrinsics->obj_at_put(index++, vmIntrinsicMethod()); \
}
VM_INTRINSICS_DO(VM_INTRINSIC_INFO, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
#undef SID_ENUM
#undef VM_SYMBOL_TO_STRING
#undef VM_INTRINSIC_INFO
assert(index == vmIntrinsics::ID_LIMIT - 1, "must be");
return vmIntrinsics;
}
C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
#define BOXED_LONG(name, value) oop name; do { jvalue p; p.j = (jlong) (value); name = java_lang_boxing_object::create(T_LONG, &p, CHECK_NULL);} while(0)
#define BOXED_DOUBLE(name, value) oop name; do { jvalue p; p.d = (jdouble) (value); name = java_lang_boxing_object::create(T_DOUBLE, &p, CHECK_NULL);} while(0)
@ -211,8 +245,9 @@ C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
CompilerToVM::Data::initialize();
VMField::klass()->initialize(thread);
VMFlag::klass()->initialize(thread);
VMField::klass()->initialize(CHECK_NULL);
VMFlag::klass()->initialize(CHECK_NULL);
VMIntrinsicMethod::klass()->initialize(CHECK_NULL);
int len = JVMCIVMStructs::localHotSpotVMStructs_count();
objArrayHandle vmFields = oopFactory::new_objArray(VMField::klass(), len, CHECK_NULL);
@ -220,7 +255,7 @@ C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
VMStructEntry vmField = JVMCIVMStructs::localHotSpotVMStructs[i];
instanceHandle vmFieldObj = InstanceKlass::cast(VMField::klass())->allocate_instance_handle(CHECK_NULL);
size_t name_buf_len = strlen(vmField.typeName) + strlen(vmField.fieldName) + 2 /* "::" */;
char* name_buf = NEW_RESOURCE_ARRAY(char, name_buf_len + 1);
char* name_buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, name_buf_len + 1);
sprintf(name_buf, "%s::%s", vmField.typeName, vmField.fieldName);
Handle name = java_lang_String::create_from_str(name_buf, CHECK_NULL);
Handle type = java_lang_String::create_from_str(vmField.typeString, CHECK_NULL);
@ -338,12 +373,15 @@ C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
vmFlags->obj_at_put(i, vmFlagObj());
}
objArrayOop data = oopFactory::new_objArray(SystemDictionary::Object_klass(), 5, CHECK_NULL);
objArrayHandle vmIntrinsics = CompilerToVM::initialize_intrinsics(CHECK_NULL);
objArrayOop data = oopFactory::new_objArray(SystemDictionary::Object_klass(), 6, CHECK_NULL);
data->obj_at_put(0, vmFields());
data->obj_at_put(1, vmTypes());
data->obj_at_put(2, vmConstants());
data->obj_at_put(3, vmAddresses());
data->obj_at_put(4, vmFlags());
data->obj_at_put(5, vmIntrinsics());
return (jobjectArray) JNIHandles::make_local(THREAD, data);
#undef BOXED_LONG
@ -1266,10 +1304,23 @@ C2V_END
C2V_VMENTRY(void, resolveInvokeHandleInPool, (JNIEnv*, jobject, jobject jvmci_constant_pool, jint index))
constantPoolHandle cp = CompilerToVM::asConstantPool(jvmci_constant_pool);
CallInfo callInfo;
LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, Bytecodes::_invokehandle, CHECK);
ConstantPoolCacheEntry* cp_cache_entry = cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
cp_cache_entry->set_method_handle(cp, callInfo);
KlassHandle holder = cp->klass_ref_at(index, CHECK);
Symbol* name = cp->name_ref_at(index);
if (MethodHandles::is_signature_polymorphic_name(holder(), name)) {
CallInfo callInfo;
LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, Bytecodes::_invokehandle, CHECK);
ConstantPoolCacheEntry* cp_cache_entry = cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
cp_cache_entry->set_method_handle(cp, callInfo);
}
C2V_END
C2V_VMENTRY(jobject, getSignaturePolymorphicHolders, (JNIEnv*, jobject))
objArrayHandle holders = oopFactory::new_objArray(SystemDictionary::String_klass(), 2, CHECK_NULL);
Handle mh = java_lang_String::create_from_str("Ljava/lang/invoke/MethodHandle;", CHECK_NULL);
Handle vh = java_lang_String::create_from_str("Ljava/lang/invoke/VarHandle;", CHECK_NULL);
holders->obj_at_put(0, mh());
holders->obj_at_put(1, vh());
return JNIHandles::make_local(THREAD, holders());
C2V_END
C2V_VMENTRY(jboolean, shouldDebugNonSafepoints, (JNIEnv*, jobject))
@ -1511,6 +1562,7 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "resolveInvokeDynamicInPool", CC "(" HS_CONSTANT_POOL "I)V", FN_PTR(resolveInvokeDynamicInPool)},
{CC "resolveInvokeHandleInPool", CC "(" HS_CONSTANT_POOL "I)V", FN_PTR(resolveInvokeHandleInPool)},
{CC "resolveMethod", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(resolveMethod)},
{CC "getSignaturePolymorphicHolders", CC "()[" STRING, FN_PTR(getSignaturePolymorphicHolders)},
{CC "getVtableIndexForInterfaceMethod", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD ")I", FN_PTR(getVtableIndexForInterfaceMethod)},
{CC "getClassInitializer", CC "(" HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(getClassInitializer)},
{CC "hasFinalizableSubclass", CC "(" HS_RESOLVED_KLASS ")Z", FN_PTR(hasFinalizableSubclass)},

View File

@ -83,8 +83,10 @@ class CompilerToVM {
}
};
public:
static JNINativeMethod methods[];
static objArrayHandle initialize_intrinsics(TRAPS);
public:
static int methods_count();
static inline Method* asMethod(jobject jvmci_method) {

View File

@ -124,6 +124,12 @@ class JVMCIJavaClasses : AllStatic {
oop_field(VMFlag, type, "Ljava/lang/String;") \
oop_field(VMFlag, value, "Ljava/lang/Object;") \
end_class \
start_class(VMIntrinsicMethod) \
oop_field(VMIntrinsicMethod, declaringClass, "Ljava/lang/String;") \
oop_field(VMIntrinsicMethod, name, "Ljava/lang/String;") \
oop_field(VMIntrinsicMethod, descriptor, "Ljava/lang/String;") \
int_field(VMIntrinsicMethod, id) \
end_class \
start_class(Assumptions_NoFinalizableSubclass) \
oop_field(Assumptions_NoFinalizableSubclass, receiverType, "Ljdk/vm/ci/meta/ResolvedJavaType;") \
end_class \

View File

@ -84,6 +84,7 @@ bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
CHECK_NOT_SET(JVMCICountersExcludeCompiler, EnableJVMCI)
CHECK_NOT_SET(JVMCIUseFastLocking, EnableJVMCI)
CHECK_NOT_SET(JVMCINMethodSizeLimit, EnableJVMCI)
CHECK_NOT_SET(MethodProfileWidth, EnableJVMCI)
CHECK_NOT_SET(TraceUncollectedSpeculations, EnableJVMCI)
#ifndef PRODUCT

View File

@ -88,6 +88,9 @@
experimental(intx, JVMCINMethodSizeLimit, (80*K)*wordSize, \
"Maximum size of a compiled method.") \
\
experimental(intx, MethodProfileWidth, 0, \
"Number of methods to record in call profile") \
\
develop(bool, TraceUncollectedSpeculations, false, \
"Print message when a failed speculation was not collected")

View File

@ -51,6 +51,7 @@
do_klass(HotSpotCompilationRequestResult_klass, jdk_vm_ci_hotspot_HotSpotCompilationRequestResult, Jvmci) \
do_klass(VMField_klass, jdk_vm_ci_hotspot_VMField, Jvmci) \
do_klass(VMFlag_klass, jdk_vm_ci_hotspot_VMFlag, Jvmci) \
do_klass(VMIntrinsicMethod_klass, jdk_vm_ci_hotspot_VMIntrinsicMethod, Jvmci) \
do_klass(Assumptions_ConcreteMethod_klass, jdk_vm_ci_meta_Assumptions_ConcreteMethod, Jvmci) \
do_klass(Assumptions_NoFinalizableSubclass_klass, jdk_vm_ci_meta_Assumptions_NoFinalizableSubclass, Jvmci) \
do_klass(Assumptions_ConcreteSubtype_klass, jdk_vm_ci_meta_Assumptions_ConcreteSubtype, Jvmci) \

View File

@ -493,6 +493,7 @@
declare_constant(Method::_force_inline) \
declare_constant(Method::_dont_inline) \
declare_constant(Method::_hidden) \
declare_constant(Method::_intrinsic_candidate) \
declare_constant(Method::_reserved_stack_access) \
\
declare_constant(Method::nonvirtual_vtable_index) \

View File

@ -52,6 +52,7 @@
template(jdk_vm_ci_hotspot_HotSpotCompilationRequestResult, "jdk/vm/ci/hotspot/HotSpotCompilationRequestResult") \
template(jdk_vm_ci_hotspot_VMField, "jdk/vm/ci/hotspot/VMField") \
template(jdk_vm_ci_hotspot_VMFlag, "jdk/vm/ci/hotspot/VMFlag") \
template(jdk_vm_ci_hotspot_VMIntrinsicMethod, "jdk/vm/ci/hotspot/VMIntrinsicMethod") \
template(jdk_vm_ci_meta_JavaConstant, "jdk/vm/ci/meta/JavaConstant") \
template(jdk_vm_ci_meta_PrimitiveConstant, "jdk/vm/ci/meta/PrimitiveConstant") \
template(jdk_vm_ci_meta_RawConstant, "jdk/vm/ci/meta/RawConstant") \

View File

@ -1161,7 +1161,7 @@ void Test_invalid_log_file() {
// Attempt to log to a directory (existing log not a regular file)
create_directory(target_name);
LogFileOutput bad_file("tmplogdir");
LogFileOutput bad_file("file=tmplogdir");
assert(bad_file.initialize("", &ss) == false, "file was initialized "
"when there was an existing directory with the same name");
assert(strstr(ss.as_string(), "tmplogdir is not a regular file") != NULL,

View File

@ -44,6 +44,9 @@ size_t LogConfiguration::_n_outputs = 0;
LogConfiguration::UpdateListenerFunction* LogConfiguration::_listener_callbacks = NULL;
size_t LogConfiguration::_n_listener_callbacks = 0;
// LogFileOutput is the default type of output, its type prefix should be used if no type was specified
static const char* implicit_output_prefix = LogFileOutput::Prefix;
// Stack object to take the lock for configuring the logging.
// Should only be held during the critical parts of the configuration
// (when calling configure_output or reading/modifying the outputs array).
@ -107,6 +110,55 @@ void LogConfiguration::finalize() {
FREE_C_HEAP_ARRAY(LogOutput*, _outputs);
}
// Normalizes the given LogOutput name to type=name form.
// For example, foo, "foo", file="foo", will all be normalized to file=foo (no quotes, prefixed).
static bool normalize_output_name(const char* full_name, char* buffer, size_t len, outputStream* errstream) {
const char* start_quote = strchr(full_name, '"');
const char* equals = strchr(full_name, '=');
const bool quoted = start_quote != NULL;
const bool is_stdout_or_stderr = (strcmp(full_name, "stdout") == 0 || strcmp(full_name, "stderr") == 0);
// ignore equals sign within quotes
if (quoted && equals > start_quote) {
equals = NULL;
}
const char* prefix = "";
size_t prefix_len = 0;
const char* name = full_name;
if (equals != NULL) {
// split on equals sign
name = equals + 1;
prefix = full_name;
prefix_len = equals - full_name + 1;
} else if (!is_stdout_or_stderr) {
prefix = implicit_output_prefix;
prefix_len = strlen(prefix);
}
size_t name_len = strlen(name);
if (quoted) {
const char* end_quote = strchr(start_quote + 1, '"');
if (end_quote == NULL) {
errstream->print_cr("Output name has opening quote but is missing a terminating quote.");
return false;
}
if (start_quote != name || end_quote[1] != '\0') {
errstream->print_cr("Output name can not be partially quoted."
" Either surround the whole name with quotation marks,"
" or do not use quotation marks at all.");
return false;
}
// strip start and end quote
name++;
name_len -= 2;
}
int ret = jio_snprintf(buffer, len, "%.*s%.*s", prefix_len, prefix, name_len, name);
assert(ret > 0, "buffer issue");
return true;
}
size_t LogConfiguration::find_output(const char* name) {
for (size_t i = 0; i < _n_outputs; i++) {
if (strcmp(_outputs[i]->name(), name) == 0) {
@ -116,39 +168,14 @@ size_t LogConfiguration::find_output(const char* name) {
return SIZE_MAX;
}
LogOutput* LogConfiguration::new_output(char* name, const char* options, outputStream* errstream) {
const char* type;
char* equals_pos = strchr(name, '=');
if (equals_pos == NULL) {
type = "file";
} else {
*equals_pos = '\0';
type = name;
name = equals_pos + 1;
}
// Check if name is quoted, and if so, strip the quotes
char* quote = strchr(name, '"');
if (quote != NULL) {
char* end_quote = strchr(name + 1, '"');
if (end_quote == NULL) {
errstream->print_cr("Output name has opening quote but is missing a terminating quote.");
return NULL;
} else if (quote != name || end_quote[1] != '\0') {
errstream->print_cr("Output name can not be partially quoted."
" Either surround the whole name with quotation marks,"
" or do not use quotation marks at all.");
return NULL;
}
name++;
*end_quote = '\0';
}
LogOutput* LogConfiguration::new_output(const char* name,
const char* options,
outputStream* errstream) {
LogOutput* output;
if (strcmp(type, "file") == 0) {
if (strncmp(name, LogFileOutput::Prefix, strlen(LogFileOutput::Prefix)) == 0) {
output = new LogFileOutput(name);
} else {
errstream->print_cr("Unsupported log output type.");
errstream->print_cr("Unsupported log output type: %s", name);
return NULL;
}
@ -243,6 +270,7 @@ void LogConfiguration::configure_output(size_t idx, const LogTagLevelExpression&
}
void LogConfiguration::disable_output(size_t idx) {
assert(idx < _n_outputs, "invalid index: " SIZE_FORMAT " (_n_outputs: " SIZE_FORMAT ")", idx, _n_outputs);
LogOutput* out = _outputs[idx];
// Remove the output from all tagsets.
@ -253,7 +281,7 @@ void LogConfiguration::disable_output(size_t idx) {
// Delete the output unless stdout/stderr
if (out != LogOutput::Stderr && out != LogOutput::Stdout) {
delete_output(find_output(out->name()));
delete_output(idx);
} else {
out->set_config_string("all=off");
}
@ -261,8 +289,8 @@ void LogConfiguration::disable_output(size_t idx) {
void LogConfiguration::disable_logging() {
ConfigurationLock cl;
for (size_t i = 0; i < _n_outputs; i++) {
disable_output(i);
for (size_t i = _n_outputs; i > 0; i--) {
disable_output(i - 1);
}
notify_update_listeners();
}
@ -289,6 +317,8 @@ void LogConfiguration::configure_stdout(LogLevelType level, bool exact_match, ..
}
expr.set_level(level);
expr.new_combination();
assert(expr.verify_tagsets(),
"configure_stdout() called with invalid/non-existing tag set");
// Apply configuration to stdout (output #0), with the same decorators as before.
ConfigurationLock cl;
@ -334,9 +364,16 @@ bool LogConfiguration::parse_command_line_arguments(const char* opts) {
char errbuf[512];
stringStream ss(errbuf, sizeof(errbuf));
bool success = parse_log_arguments(output, what, decorators, output_options, &ss);
if (!success) {
errbuf[strlen(errbuf) - 1] = '\0'; // Strip trailing newline.
log_error(logging)("%s", errbuf);
if (ss.size() > 0) {
errbuf[strlen(errbuf) - 1] = '\0'; // Strip trailing newline
// If it failed, log the error. If it didn't fail, but something was written
// to the stream, log it as a warning.
if (!success) {
log_error(logging)("%s", ss.base());
} else {
log_warning(logging)("%s", ss.base());
}
}
os::free(copy);
@ -348,6 +385,7 @@ bool LogConfiguration::parse_log_arguments(const char* outputstr,
const char* decoratorstr,
const char* output_options,
outputStream* errstream) {
assert(errstream != NULL, "errstream can not be NULL");
if (outputstr == NULL || strlen(outputstr) == 0) {
outputstr = "stdout";
}
@ -364,28 +402,39 @@ bool LogConfiguration::parse_log_arguments(const char* outputstr,
ConfigurationLock cl;
size_t idx;
if (outputstr[0] == '#') {
int ret = sscanf(outputstr+1, SIZE_FORMAT, &idx);
if (outputstr[0] == '#') { // Output specified using index
int ret = sscanf(outputstr + 1, SIZE_FORMAT, &idx);
if (ret != 1 || idx >= _n_outputs) {
errstream->print_cr("Invalid output index '%s'", outputstr);
return false;
}
} else {
idx = find_output(outputstr);
} else { // Output specified using name
// Normalize the name, stripping quotes and ensures it includes type prefix
size_t len = strlen(outputstr) + strlen(implicit_output_prefix) + 1;
char* normalized = NEW_C_HEAP_ARRAY(char, len, mtLogging);
if (!normalize_output_name(outputstr, normalized, len, errstream)) {
return false;
}
idx = find_output(normalized);
if (idx == SIZE_MAX) {
char* tmp = os::strdup_check_oom(outputstr, mtLogging);
LogOutput* output = new_output(tmp, output_options, errstream);
os::free(tmp);
if (output == NULL) {
return false;
// Attempt to create and add the output
LogOutput* output = new_output(normalized, output_options, errstream);
if (output != NULL) {
idx = add_output(output);
}
idx = add_output(output);
} else if (output_options != NULL && strlen(output_options) > 0) {
errstream->print_cr("Output options for existing outputs are ignored.");
}
FREE_C_HEAP_ARRAY(char, normalized);
if (idx == SIZE_MAX) {
return false;
}
}
configure_output(idx, expr, decorators);
notify_update_listeners();
expr.verify_tagsets(errstream);
return true;
}

View File

@ -59,7 +59,7 @@ class LogConfiguration : public AllStatic {
static size_t _n_listener_callbacks;
// Create a new output. Returns NULL if failed.
static LogOutput* new_output(char* name, const char* options, outputStream* errstream);
static LogOutput* new_output(const char* name, const char* options, outputStream* errstream);
// Add an output to the list of configured outputs. Returns the assigned index.
static size_t add_output(LogOutput* out);

View File

@ -31,6 +31,7 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/defaultStream.hpp"
const char* LogFileOutput::Prefix = "file=";
const char* LogFileOutput::FileOpenMode = "a";
const char* LogFileOutput::PidFilenamePlaceholder = "%p";
const char* LogFileOutput::TimestampFilenamePlaceholder = "%t";
@ -45,7 +46,8 @@ LogFileOutput::LogFileOutput(const char* name)
_file_name(NULL), _archive_name(NULL), _archive_name_len(0),
_rotate_size(DefaultFileSize), _file_count(DefaultFileCount),
_current_size(0), _current_file(0), _rotation_semaphore(1) {
_file_name = make_file_name(name, _pid_str, _vm_start_time_str);
assert(strstr(name, Prefix) == name, "invalid output name '%s': missing prefix: %s", name, Prefix);
_file_name = make_file_name(name + strlen(Prefix), _pid_str, _vm_start_time_str);
}
void LogFileOutput::set_file_name_parameters(jlong vm_start_time) {

View File

@ -91,6 +91,7 @@ class LogFileOutput : public LogFileStreamOutput {
return _name;
}
static const char* Prefix;
static void set_file_name_parameters(jlong start_time);
};

View File

@ -29,6 +29,65 @@
const char* LogTagLevelExpression::DefaultExpressionString = "all";
static bool matches_tagset(const LogTagType tags[],
bool allow_other_tags,
const LogTagSet& ts) {
bool contains_all = true;
size_t tag_idx;
for (tag_idx = 0; tag_idx < LogTag::MaxTags && tags[tag_idx] != LogTag::__NO_TAG; tag_idx++) {
if (!ts.contains(tags[tag_idx])) {
contains_all = false;
break;
}
}
// All tags in the expression must be part of the tagset,
// and either the expression allows other tags (has a wildcard),
// or the number of tags in the expression and tagset must match.
return contains_all && (allow_other_tags || tag_idx == ts.ntags());
}
bool LogTagLevelExpression::verify_tagsets(outputStream* out) const {
bool valid = true;
for (size_t i = 0; i < _ncombinations; i++) {
bool matched = false;
for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
if (matches_tagset(_tags[i], _allow_other_tags[i], *ts)) {
matched = true;
break;
}
}
if (!matched) {
// If this was the first invalid combination, write the message header
if (valid && out != NULL) {
out->print("No tag set matches selection(s): ");
}
valid = false;
// Break as soon as possible unless listing all invalid combinations
if (out == NULL) {
break;
}
// List the combination on the outputStream
for (size_t t = 0; t < LogTag::MaxTags && _tags[i][t] != LogTag::__NO_TAG; t++) {
out->print("%s%s", (t == 0 ? "" : "+"), LogTag::name(_tags[i][t]));
}
if (_allow_other_tags[i]) {
out->print("*");
}
out->print(" ");
}
}
if (!valid && out != NULL) {
out->cr();
}
return valid;
}
bool LogTagLevelExpression::parse(const char* str, outputStream* errstream) {
bool success = true;
if (str == NULL || strcmp(str, "") == 0) {
@ -105,7 +164,14 @@ bool LogTagLevelExpression::parse(const char* str, outputStream* errstream) {
success = false;
break;
}
add_tag(tag);
if (!add_tag(tag)) {
if (errstream != NULL) {
errstream->print_cr("Tag combination have duplicate tag '%s' in what-expression.",
cur_tag);
}
success = false;
break;
}
cur_tag = plus_pos + 1;
} while (plus_pos != NULL);
@ -120,20 +186,10 @@ LogLevelType LogTagLevelExpression::level_for(const LogTagSet& ts) const {
// Return NotMentioned if the given tagset isn't covered by this expression.
LogLevelType level = LogLevel::NotMentioned;
for (size_t combination = 0; combination < _ncombinations; combination++) {
bool contains_all = true;
size_t tag_idx;
for (tag_idx = 0; tag_idx < LogTag::MaxTags && _tags[combination][tag_idx] != LogTag::__NO_TAG; tag_idx++) {
if (!ts.contains(_tags[combination][tag_idx])) {
contains_all = false;
break;
}
}
// All tags in the expression must be part of the tagset,
// and either the expression allows other tags (has a wildcard),
// or the number of tags in the expression and tagset must match.
if (contains_all && (_allow_other_tags[combination] || tag_idx == ts.ntags())) {
if (matches_tagset(_tags[combination], _allow_other_tags[combination], ts)) {
level = _level[combination];
}
}
return level;
}

View File

@ -59,9 +59,15 @@ class LogTagLevelExpression : public StackObj {
_ntags = 0;
}
void add_tag(LogTagType tag) {
bool add_tag(LogTagType tag) {
assert(_ntags < LogTag::MaxTags, "Can't have more tags than MaxTags!");
for (size_t i = 0; i < _ntags; i++) {
if (_tags[_ncombinations][i] == tag) {
return false;
}
}
_tags[_ncombinations][_ntags++] = tag;
return true;
}
void set_level(LogLevelType level) {
@ -83,6 +89,11 @@ class LogTagLevelExpression : public StackObj {
bool parse(const char* str, outputStream* errstream = NULL);
LogLevelType level_for(const LogTagSet& ts) const;
// Verify the tagsets/selections mentioned in this expression.
// Returns false if some invalid tagset was found. If given an outputstream,
// this function will list all the invalid selections on the stream.
bool verify_tagsets(outputStream* out = NULL) const;
};
#endif // SHARE_VM_LOGGING_LOGTAGLEVELEXPRESSION_HPP

View File

@ -86,7 +86,7 @@ class LogTagSet VALUE_OBJ_CLASS_SPEC {
}
bool contains(LogTagType tag) const {
for (size_t i = 0; _tag[i] != LogTag::__NO_TAG; i++) {
for (size_t i = 0; i < LogTag::MaxTags && _tag[i] != LogTag::__NO_TAG; i++) {
if (tag == _tag[i]) {
return true;
}

View File

@ -135,6 +135,7 @@ oop Universe::_arithmetic_exception_instance = NULL;
oop Universe::_virtual_machine_error_instance = NULL;
oop Universe::_vm_exception = NULL;
oop Universe::_allocation_context_notification_obj = NULL;
oop Universe::_reference_pending_list = NULL;
Array<int>* Universe::_the_empty_int_array = NULL;
Array<u2>* Universe::_the_empty_short_array = NULL;
@ -212,6 +213,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_system_thread_group);
f->do_oop((oop*)&_vm_exception);
f->do_oop((oop*)&_allocation_context_notification_obj);
f->do_oop((oop*)&_reference_pending_list);
debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
}
@ -488,6 +490,35 @@ void Universe::fixup_mirrors(TRAPS) {
java_lang_Class::set_fixup_mirror_list(NULL);
}
#define assert_pll_locked(test) \
assert(Heap_lock->test(), "Reference pending list access requires lock")
#define assert_pll_ownership() assert_pll_locked(owned_by_self)
oop Universe::reference_pending_list() {
assert_pll_ownership();
return _reference_pending_list;
}
void Universe::set_reference_pending_list(oop list) {
assert_pll_ownership();
_reference_pending_list = list;
}
bool Universe::has_reference_pending_list() {
assert_pll_ownership();
return _reference_pending_list != NULL;
}
oop Universe::swap_reference_pending_list(oop list) {
assert_pll_locked(is_locked);
return (oop)Atomic::xchg_ptr(list, &_reference_pending_list);
}
#undef assert_pll_locked
#undef assert_pll_ownership
static bool has_run_finalizers_on_exit = false;
void Universe::run_finalizers_on_exit() {
@ -565,12 +596,14 @@ bool Universe::should_fill_in_stack_trace(Handle throwable) {
oop Universe::gen_out_of_memory_error(oop default_err) {
// generate an out of memory error:
// - if there is a preallocated error with backtrace available then return it wth
// a filled in stack trace.
// - if there are no preallocated errors with backtrace available then return
// an error without backtrace.
// - if there is a preallocated error and stack traces are available
// (j.l.Throwable is initialized), then return the preallocated
// error with a filled in stack trace, and with the message
// provided by the default error.
// - otherwise, return the default error, without a stack trace.
int next;
if (_preallocated_out_of_memory_error_avail_count > 0) {
if ((_preallocated_out_of_memory_error_avail_count > 0) &&
SystemDictionary::Throwable_klass()->is_initialized()) {
next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
} else {

View File

@ -185,6 +185,9 @@ class Universe: AllStatic {
static oop _allocation_context_notification_obj;
// References waiting to be transferred to the ReferenceHandler
static oop _reference_pending_list;
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
@ -334,6 +337,17 @@ class Universe: AllStatic {
static inline oop allocation_context_notification_obj();
static inline void set_allocation_context_notification_obj(oop obj);
// Reference pending list manipulation. Access is protected by
// Heap_lock. The getter, setter and predicate require the caller
// owns the lock. Swap is used by parallel non-concurrent reference
// processing threads, where some higher level controller owns
// Heap_lock, so requires the lock is locked, but not necessarily by
// the current thread.
static oop reference_pending_list();
static void set_reference_pending_list(oop list);
static bool has_reference_pending_list();
static oop swap_reference_pending_list(oop list);
static Array<int>* the_empty_int_array() { return _the_empty_int_array; }
static Array<u2>* the_empty_short_array() { return _the_empty_short_array; }
static Array<Method*>* the_empty_method_array() { return _the_empty_method_array; }

View File

@ -56,7 +56,9 @@ class ArrayKlass: public Klass {
void set_dimension(int dimension) { _dimension = dimension; }
Klass* higher_dimension() const { return _higher_dimension; }
inline Klass* higher_dimension_acquire() const; // load with acquire semantics
void set_higher_dimension(Klass* k) { _higher_dimension = k; }
inline void release_set_higher_dimension(Klass* k); // store with release semantics
Klass** adr_higher_dimension() { return (Klass**)&this->_higher_dimension;}
Klass* lower_dimension() const { return _lower_dimension; }

Some files were not shown because too many files have changed in this diff Show More