This commit is contained in:
Lana Steuck 2011-04-29 20:15:22 -07:00
commit 0d5b47ed3c
820 changed files with 37014 additions and 22200 deletions
.hgtags.hgtags-top-repo
corba
hotspot
.hgtags
agent/src/share/classes/sun/jvm/hotspot
make
hotspot_version
linux/makefiles
solaris/makefiles
windows/makefiles
src
cpu
os
linux/vm
windows/vm
share

@ -112,3 +112,5 @@ def8e16dd237a47fc067d66d4c616d7baaec6001 jdk7-b134
f75a1efb141210901aabe00a834e0fc32bb8b337 jdk7-b135
46acf76a533954cfd594bb88fdea79938abfbe20 jdk7-b136
d1cf7d4ee16c341f5b8c7e7f1d68a8c412b6c693 jdk7-b137
62b8e328f8c8c66c14b0713222116f2add473f3f jdk7-b138
955488f34ca418f6cdab843d61c20d2c615637d9 jdk7-b139

@ -112,3 +112,5 @@ ddc2fcb3682ffd27f44354db666128827be7e3c3 jdk7-b134
783bd02b4ab4596059c74b10a1793d7bd2f1c157 jdk7-b135
2fe76e73adaa5133ac559f0b3c2c0707eca04580 jdk7-b136
7654afc6a29e43cb0a1343ce7f1287bf690d5e5f jdk7-b137
fc47c97bbbd91b1f774d855c48a7e285eb1a351a jdk7-b138
7ed6d0b9aaa12320832a7ddadb88d6d8d0dda4c1 jdk7-b139

@ -112,3 +112,5 @@ d7532bcd3742f1576dd07ff9fbb535c9c9a276e9 jdk7-b126
e0b72ae5dc5e824b342801c8d1d336a55eb54e2c jdk7-b135
48ef0c712e7cbf272f47f9224db92a3c6a9e2612 jdk7-b136
a66c01d8bf895261715955df0b95545c000ed6a8 jdk7-b137
78d8cf04697e9df54f7f11e195b7da29b8e345a2 jdk7-b138
60b074ec6fcf5cdf9efce22fdfb02326ed8fa2d3 jdk7-b139

@ -162,3 +162,7 @@ bd586e392d93b7ed7a1636dcc8da2b6a4203a102 jdk7-b136
bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06
2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137
2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07
0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138
0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08
611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139
611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ public class HelloWorld {
synchronized(lock) {
if (useMethodInvoke) {
try {
Method method = HelloWorld.class.getMethod("e", null);
Method method = HelloWorld.class.getMethod("e");
Integer result = (Integer) method.invoke(null, new Object[0]);
return result.intValue();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,12 +52,10 @@ public class ByteValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
byte other = ((ByteValue)obj).value();
return value() - other;
public int compareTo(ByteValue byteVal) {
return value() - byteVal.value();
}
public Type type() {
return vm.theByteType();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,8 @@ public class CharValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
char other = ((CharValue)obj).value();
return value() - other;
public int compareTo(CharValue charVal) {
return value() - charVal.value();
}
public Type type() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -186,7 +186,7 @@ abstract class ConnectorImpl implements Connector {
// assert isVMVersionMismatch(throwable), "not a VMVersionMismatch"
Class expClass = throwable.getClass();
Method targetVersionMethod = expClass.getMethod("getTargetVersion", new Class[0]);
return (String) targetVersionMethod.invoke(throwable, null);
return (String) targetVersionMethod.invoke(throwable);
}
/** If the causal chain has a sun.jvm.hotspot.runtime.VMVersionMismatchException,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,8 @@ public class DoubleValueImpl extends PrimitiveValueImpl
}
}
public int compareTo(Object obj) {
double other = ((DoubleValue)obj).value();
public int compareTo(DoubleValue doubleVal) {
double other = doubleVal.value();
if (value() < other) {
return -1;
} else if (value() == other) {

@ -145,8 +145,7 @@ public class FieldImpl extends TypeComponentImpl implements Field {
}
// From interface Comparable
public int compareTo(Object object) {
Field field = (Field)object;
public int compareTo(Field field) {
ReferenceTypeImpl declaringType = (ReferenceTypeImpl)declaringType();
int rc = declaringType.compareTo(field.declaringType());
if (rc == 0) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,8 @@ public class FloatValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
float other = ((FloatValue)obj).value();
public int compareTo(FloatValue floatVal) {
float other = floatVal.value();
if (value() < other) {
return -1;
} else if (value() == other) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,8 @@ public class IntegerValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
int other = ((IntegerValue)obj).value();
return value() - other;
public int compareTo(IntegerValue integerVal) {
return value() - integerVal.value();
}
public Type type() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,8 +67,8 @@ public class LocalVariableImpl extends MirrorImpl
return (int)method.hashCode() + slot();
}
public int compareTo(Object object) {
LocalVariableImpl other = (LocalVariableImpl)object;
public int compareTo(LocalVariable localVar) {
LocalVariableImpl other = (LocalVariableImpl) localVar;
int rc = method.compareTo(other.method);
if (rc == 0) {
rc = slot() - other.slot();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,8 +78,7 @@ public class LocationImpl extends MirrorImpl implements Location {
return method().hashCode() + (int)codeIndex();
}
public int compareTo(Object object) {
LocationImpl other = (LocationImpl)object;
public int compareTo(Location other) {
int rc = method().compareTo(other.method());
if (rc == 0) {
long diff = codeIndex() - other.codeIndex();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,8 @@ public class LongValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
long other = ((LongValue)obj).value();
public int compareTo(LongValue longVal) {
long other = longVal.value();
if (value() < other) {
return -1;
} else if (value() == other) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -200,8 +200,7 @@ public abstract class MethodImpl extends TypeComponentImpl implements Method {
}
// From interface Comparable
public int compareTo(Object object) {
Method method = (Method)object;
public int compareTo(Method method) {
ReferenceTypeImpl declaringType = (ReferenceTypeImpl)declaringType();
int rc = declaringType.compareTo(method.declaringType());
if (rc == 0) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,7 +99,7 @@ implements ReferenceType {
return saKlass.hashCode();
}
public int compareTo(Object object) {
public int compareTo(ReferenceType refType) {
/*
* Note that it is critical that compareTo() == 0
* implies that equals() == true. Otherwise, TreeSet
@ -108,7 +108,7 @@ implements ReferenceType {
* (Classes of the same name loaded by different class loaders
* or in different VMs must not return 0).
*/
ReferenceTypeImpl other = (ReferenceTypeImpl)object;
ReferenceTypeImpl other = (ReferenceTypeImpl)refType;
int comp = name().compareTo(other.name());
if (comp == 0) {
Oop rf1 = ref();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,8 @@ public class ShortValueImpl extends PrimitiveValueImpl
return intValue();
}
public int compareTo(Object obj) {
short other = ((ShortValue)obj).value();
return value() - other;
public int compareTo(ShortValue shortVal) {
return value() - shortVal.value();
}
public Type type() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -798,12 +798,11 @@ public class VirtualMachineImpl extends MirrorImpl implements PathSearchingVirtu
}
public String description() {
String[] versionParts = {"" + vmmgr.majorInterfaceVersion(),
"" + vmmgr.minorInterfaceVersion(),
name()};
return java.text.MessageFormat.format(java.util.ResourceBundle.
getBundle("com.sun.tools.jdi.resources.jdi").getString("version_format"),
versionParts);
"" + vmmgr.majorInterfaceVersion(),
"" + vmmgr.minorInterfaceVersion(),
name());
}
public String version() {

@ -331,8 +331,6 @@ public class ConstantPool extends Oop implements ClassConstants {
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool");
}
if (getTagAt(i).value() == JVM_CONSTANT_InvokeDynamicTrans)
return null;
int bsmSpec = extractLowShortFromInt(this.getIntAt(i));
TypeArray operands = getOperands();
if (operands == null) return null; // safety first
@ -368,7 +366,6 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle";
case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType";
case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic";
case JVM_CONSTANT_InvokeDynamicTrans: return "JVM_CONSTANT_InvokeDynamic/transitional";
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
@ -428,7 +425,6 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_InvokeDynamic:
case JVM_CONSTANT_InvokeDynamicTrans:
visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
break;
}
@ -592,7 +588,6 @@ public class ConstantPool extends Oop implements ClassConstants {
break;
}
case JVM_CONSTANT_InvokeDynamicTrans:
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = getIntAt(ci);

@ -42,7 +42,7 @@ public interface ClassConstants
public static final int JVM_CONSTANT_NameAndType = 12;
public static final int JVM_CONSTANT_MethodHandle = 15;
public static final int JVM_CONSTANT_MethodType = 16;
public static final int JVM_CONSTANT_InvokeDynamicTrans = 17; // only occurs in old class files
// static final int JVM_CONSTANT_(unused) = 17;
public static final int JVM_CONSTANT_InvokeDynamic = 18;
// JVM_CONSTANT_MethodHandle subtypes

@ -321,7 +321,6 @@ public class ClassWriter implements /* imports */ ClassConstants
break;
}
case JVM_CONSTANT_InvokeDynamicTrans:
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);

@ -598,7 +598,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.cell(Integer.toString(cpool.getIntAt(index)));
break;
case JVM_CONSTANT_InvokeDynamicTrans:
case JVM_CONSTANT_InvokeDynamic:
buf.cell("JVM_CONSTANT_InvokeDynamic");
buf.cell(genLowHighShort(cpool.getIntAt(index)) +

@ -40,7 +40,7 @@ public class ConstantTag {
private static int JVM_CONSTANT_NameAndType = 12;
private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292
private static int JVM_CONSTANT_MethodType = 16; // JSR 292
private static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files
// static int JVM_CONSTANT_(unused) = 17; // JSR 292 early drafts only
private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292
private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization
private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use
@ -83,7 +83,6 @@ public class ConstantTag {
public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; }
public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; }
public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; }
public boolean isInvokeDynamicTrans() { return tag == JVM_CONSTANT_InvokeDynamicTrans; }
public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; }

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=21
HS_MINOR_VER=0
HS_BUILD_NUMBER=08
HS_BUILD_NUMBER=10
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -97,8 +97,8 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST)))
$(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST)))
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
$(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)

@ -142,13 +142,15 @@ COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/opto
COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
COMPILER2_PATHS += $(GENERATED)/adfiles
SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
# Include dirs per type.
Src_Dirs/CORE := $(CORE_PATHS)
Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
Src_Dirs/ZERO := $(CORE_PATHS)
Src_Dirs/SHARK := $(CORE_PATHS)
Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS)
Src_Dirs := $(Src_Dirs/$(TYPE))
COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -88,8 +88,8 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST)))
$(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST)))
$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
$(QUIETLY) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
$(QUIETLY) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
$(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -55,9 +55,9 @@ default:: $(GENERATED)\sa-jdi.jar
$(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar
@echo ...$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) ....
@$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
@$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
@echo ...$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) ....
@$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
@$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js

@ -4257,34 +4257,14 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
///////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
static uint num_stores = 0;
static uint num_null_pre_stores = 0;
static address satb_log_enqueue_with_frame = NULL;
static u_char* satb_log_enqueue_with_frame_end = NULL;
static void count_null_pre_vals(void* pre_val) {
num_stores++;
if (pre_val == NULL) num_null_pre_stores++;
if ((num_stores % 1000000) == 0) {
tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
num_stores, num_null_pre_stores,
100.0*(float)num_null_pre_stores/(float)num_stores);
}
}
static address satb_log_enqueue_with_frame = 0;
static u_char* satb_log_enqueue_with_frame_end = 0;
static address satb_log_enqueue_frameless = 0;
static u_char* satb_log_enqueue_frameless_end = 0;
static address satb_log_enqueue_frameless = NULL;
static u_char* satb_log_enqueue_frameless_end = NULL;
static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
// The calls to this don't work. We'd need to do a fair amount of work to
// make it work.
static void check_index(int ind) {
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
"Invariants.");
}
static void generate_satb_log_enqueue(bool with_frame) {
BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
CodeBuffer buf(bb);
@ -4388,13 +4368,27 @@ static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
}
}
void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
assert(offset == 0 || index == noreg, "choose one");
if (G1DisablePreBarrier) return;
// satb_log_barrier(tmp, obj, offset, preserve_o_regs);
void MacroAssembler::g1_write_barrier_pre(Register obj,
Register index,
int offset,
Register pre_val,
Register tmp,
bool preserve_o_regs) {
Label filtered;
// satb_log_barrier_work0(tmp, filtered);
if (obj == noreg) {
// We are not loading the previous value so make
// sure that we don't trash the value in pre_val
// with the code below.
assert_different_registers(pre_val, tmp);
} else {
// We will be loading the previous value
// in this code so...
assert(offset == 0 || index == noreg, "choose one");
assert(pre_val == noreg, "check this code");
}
// Is marking active?
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
ld(G2,
in_bytes(JavaThread::satb_mark_queue_offset() +
@ -4413,61 +4407,46 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
delayed() -> nop();
// satb_log_barrier_work1(tmp, offset);
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
load_heap_oop(obj, offset, tmp);
// Do we need to load the previous value?
if (obj != noreg) {
// Load the previous value...
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
load_heap_oop(obj, offset, tmp);
} else {
set(offset, tmp);
load_heap_oop(obj, tmp, tmp);
}
} else {
set(offset, tmp);
load_heap_oop(obj, tmp, tmp);
load_heap_oop(obj, index, tmp);
}
} else {
load_heap_oop(obj, index, tmp);
// Previous value has been loaded into tmp
pre_val = tmp;
}
// satb_log_barrier_work2(obj, tmp, offset);
// satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
const Register pre_val = tmp;
if (G1SATBBarrierPrintNullPreVals) {
save_frame(0);
mov(pre_val, O0);
// Save G-regs that target may use.
mov(G1, L1);
mov(G2, L2);
mov(G3, L3);
mov(G4, L4);
mov(G5, L5);
call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
delayed()->nop();
// Restore G-regs that target may have used.
mov(L1, G1);
mov(L2, G2);
mov(L3, G3);
mov(L4, G4);
mov(L5, G5);
restore(G0, G0, G0);
}
assert(pre_val != noreg, "must have a real register");
// Is the previous value null?
// Check on whether to annul.
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
delayed() -> nop();
// OK, it's not filtered, so we'll need to call enqueue. In the normal
// case, pre_val will be a scratch G-reg, but there's some cases in which
// it's an O-reg. In the first case, do a normal call. In the latter,
// do a save here and call the frameless version.
// case, pre_val will be a scratch G-reg, but there are some cases in
// which it's an O-reg. In the first case, do a normal call. In the
// latter, do a save here and call the frameless version.
guarantee(pre_val->is_global() || pre_val->is_out(),
"Or we need to think harder.");
if (pre_val->is_global() && !preserve_o_regs) {
generate_satb_log_enqueue_if_necessary(true); // with frame.
generate_satb_log_enqueue_if_necessary(true); // with frame
call(satb_log_enqueue_with_frame);
delayed()->mov(pre_val, O0);
} else {
generate_satb_log_enqueue_if_necessary(false); // with frameless.
generate_satb_log_enqueue_if_necessary(false); // frameless
save_frame(0);
call(satb_log_enqueue_frameless);
delayed()->mov(pre_val->after_save(), O0);
@ -4614,7 +4593,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
MacroAssembler* post_filter_masm = this;
if (new_val == G0) return;
if (G1DisablePostBarrier) return;
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::G1SATBCT ||
@ -4626,6 +4604,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
#else
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#endif
if (G1PrintCTFilterStats) {
guarantee(tmp->is_global(), "Or stats won't work...");
// This is a sleazy hack: I'm temporarily hijacking G2, which I

@ -2210,15 +2210,11 @@ public:
void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
#ifndef SERIALGC
// Array store and offset
void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
// General G1 pre-barrier generator.
void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
// General G1 post-barrier generator
void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
// May do filtering, depending on the boolean arguments.
void g1_card_table_write(jbyte* byte_map_base,
Register tmp, Register obj, Register new_val,
bool region_filter, bool null_filter);
#endif // SERIALGC
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack

@ -408,13 +408,20 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
pre_val_reg, _continuation);
@ -431,6 +438,96 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
src_reg, _continuation);
} else {
__ cmp(src_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
}
// Generate src->_klass->_reference_type() == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
__ ld(ref_type_adr, tmp_reg);
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
tmp_reg, _continuation);
} else {
__ cmp(tmp_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ ld(in_progress, tmp_reg);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(in_progress, tmp_reg);
}
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
tmp_reg, _continuation);
} else {
__ cmp(tmp_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
val_reg, _continuation);
} else {
__ cmp(val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

@ -2058,6 +2058,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
#ifdef _LP64
// higher 32bits must be null
__ sra(dst_pos, 0, dst_pos);
__ sra(src_pos, 0, src_pos);
__ sra(length, 0, length);
#endif
// set up the arraycopy stub information
ArrayCopyStub* stub = op->stub();
@ -2065,20 +2072,36 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// the known type isn't loaded since the code sanity checks
// in debug mode and the type isn't required when we know the exact type
// also check that the type is an array type.
// We also, for now, always call the stub if the barrier set requires a
// write_ref_pre barrier (which the stub does, but none of the optimized
// cases currently does).
if (op->expected_type() == NULL ||
Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
if (op->expected_type() == NULL) {
__ mov(src, O0);
__ mov(src_pos, O1);
__ mov(dst, O2);
__ mov(dst_pos, O3);
__ mov(length, O4);
__ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
address copyfunc_addr = StubRoutines::generic_arraycopy();
__ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
__ delayed()->nop();
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
__ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
__ inc_counter(counter, G1, G3);
}
#endif
__ call_VM_leaf(tmp, copyfunc_addr);
}
if (copyfunc_addr != NULL) {
__ xor3(O0, -1, tmp);
__ sub(length, tmp, length);
__ add(src_pos, tmp, src_pos);
__ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
__ delayed()->add(dst_pos, tmp, dst_pos);
} else {
__ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
__ delayed()->nop();
}
__ bind(*stub->continuation());
return;
}
@ -2135,20 +2158,137 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ delayed()->nop();
}
int shift = shift_amount(basic_type);
if (flags & LIR_OpArrayCopy::type_check) {
if (UseCompressedOops) {
// We don't need decode because we just need to compare
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
__ cmp(tmp, tmp2);
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedOops) {
// We don't need decode because we just need to compare
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
__ cmp(tmp, tmp2);
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
} else {
__ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
__ cmp(tmp, tmp2);
__ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
}
__ delayed()->nop();
} else {
__ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
__ cmp(tmp, tmp2);
__ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
// For object arrays, if src is a sub class of dst then we can
// safely do the copy.
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
Label cont, slow;
assert_different_registers(tmp, tmp2, G3, G1);
__ load_klass(src, G3);
__ load_klass(dst, G1);
__ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
if (copyfunc_addr != NULL) { // use stub if available
// src is not a sub class of dst so we have to do a
// per-element check.
__ br(Assembler::notEqual, false, Assembler::pt, cont);
__ delayed()->nop();
__ bind(slow);
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
if ((flags & mask) != mask) {
// Check that at least both of them object arrays.
assert(flags & mask, "one of the two should be known to be an object array");
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(src, tmp);
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(dst, tmp);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
__ lduw(tmp, lh_offset, tmp2);
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ set(objArray_lh, tmp);
__ cmp(tmp, tmp2);
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
__ delayed()->nop();
}
Register src_ptr = O0;
Register dst_ptr = O1;
Register len = O2;
Register chk_off = O3;
Register super_k = O4;
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
if (shift == 0) {
__ add(src_ptr, src_pos, src_ptr);
} else {
__ sll(src_pos, shift, tmp);
__ add(src_ptr, tmp, src_ptr);
}
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
if (shift == 0) {
__ add(dst_ptr, dst_pos, dst_ptr);
} else {
__ sll(dst_pos, shift, tmp);
__ add(dst_ptr, tmp, dst_ptr);
}
__ mov(length, len);
__ load_klass(dst, tmp);
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
__ ld_ptr(tmp, ek_offset, super_k);
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
__ lduw(super_k, sco_offset, chk_off);
__ call_VM_leaf(tmp, copyfunc_addr);
#ifndef PRODUCT
if (PrintC1Statistics) {
Label failed;
__ br_notnull(O0, false, Assembler::pn, failed);
__ delayed()->nop();
__ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
__ bind(failed);
}
#endif
__ br_null(O0, false, Assembler::pt, *stub->continuation());
__ delayed()->xor3(O0, -1, tmp);
#ifndef PRODUCT
if (PrintC1Statistics) {
__ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
}
#endif
__ sub(length, tmp, length);
__ add(src_pos, tmp, src_pos);
__ br(Assembler::always, false, Assembler::pt, *stub->entry());
__ delayed()->add(dst_pos, tmp, dst_pos);
__ bind(cont);
} else {
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
__ bind(cont);
}
}
__ delayed()->nop();
}
#ifdef ASSERT
@ -2207,14 +2347,18 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
#endif
int shift = shift_amount(basic_type);
#ifndef PRODUCT
if (PrintC1Statistics) {
address counter = Runtime1::arraycopy_count_address(basic_type);
__ inc_counter(counter, G1, G3);
}
#endif
Register src_ptr = O0;
Register dst_ptr = O1;
Register len = O2;
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(src_ptr, src_pos, src_ptr);
} else {
@ -2223,7 +2367,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(dst_ptr, dst_pos, dst_ptr);
} else {
@ -2231,18 +2374,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ add(dst_ptr, tmp, dst_ptr);
}
if (basic_type != T_OBJECT) {
if (shift == 0) {
__ mov(length, len);
} else {
__ sll(length, shift, len);
}
__ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
} else {
// oop_arraycopy takes a length in number of elements, so don't scale it.
__ mov(length, len);
__ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
}
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
const char *name;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
// arraycopy stubs takes a length in number of elements, so don't scale it.
__ mov(length, len);
__ call_VM_leaf(tmp, entry);
__ bind(*stub->continuation());
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -387,7 +387,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ move(value.result(), array_addr, null_check_info);
if (obj_store) {
@ -687,7 +688,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
__ add(obj.result(), offset.result(), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
pre_barrier(addr, false, NULL);
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
if (type == objectType)
@ -1187,7 +1189,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
}
if (is_obj) {
pre_barrier(LIR_OprFact::address(addr), false, NULL);
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
}
__ move(data, addr);

@ -387,7 +387,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
void C1_MacroAssembler::verify_not_null_oop(Register r) {
Label not_null;
br_zero(Assembler::notEqual, false, Assembler::pt, r, not_null);
br_notnull(r, false, Assembler::pt, not_null);
delayed()->nop();
stop("non-null oop required");
bind(not_null);

@ -551,6 +551,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return NULL;
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (C++ interpreter)
// This sets up a somewhat different looking stack for calling the native method

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void save_native_result(void);
void restore_native_result(void);

@ -407,6 +407,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

@ -763,6 +763,87 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return NULL;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// In the G1 code we don't check if we need to reach a safepoint. We
// continue and the thread will safepoint at the next bytecode dispatch.
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Load the value of the referent field.
if (Assembler::is_simm13(referent_offset)) {
__ load_heap_oop(Otos_i, referent_offset, Otos_i);
} else {
__ set(referent_offset, G3_scratch);
__ load_heap_oop(Otos_i, G3_scratch, Otos_i);
}
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer. Note with
// these parameters the pre-barrier does not generate
// the load of the previous value
__ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
Otos_i /* pre_val */,
G3_scratch /* tmp */,
true /* preserve_o_regs */);
// _areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method

@ -57,7 +57,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
__ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
// Load and record the previous value.
__ g1_write_barrier_pre(base, index, offset,
noreg /* pre_val */,
tmp, true /*preserve_o_regs*/);
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
__ store_heap_oop(val, base, offset);

@ -2317,7 +2317,7 @@ void Assembler::prefetchnta(Address src) {
}
void Assembler::prefetchr(Address src) {
NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
InstructionMark im(this);
prefetch_prefix(src);
emit_byte(0x0D);
@ -2349,7 +2349,7 @@ void Assembler::prefetcht2(Address src) {
}
void Assembler::prefetchw(Address src) {
NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
InstructionMark im(this);
prefetch_prefix(src);
emit_byte(0x0D);
@ -6902,26 +6902,39 @@ void MacroAssembler::testl(Register dst, AddressLiteral src) {
#ifndef SERIALGC
void MacroAssembler::g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register pre_val,
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live) {
LP64_ONLY(Register thread = r15_thread;)
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
#ifdef _LP64
assert(thread == r15_thread, "must be");
#endif // _LP64
Label done;
Label runtime;
assert(pre_val != noreg, "check this code");
if (obj != noreg) {
assert_different_registers(obj, pre_val, tmp);
assert(pre_val != rax, "check this code");
}
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf()));
Label done;
Label runtime;
// if (!marking_in_progress) goto done;
// Is marking active?
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
cmpl(in_progress, 0);
} else {
@ -6930,65 +6943,92 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
}
jcc(Assembler::equal, done);
// if (x.f == NULL) goto done;
#ifdef _LP64
load_heap_oop(tmp2, Address(obj, 0));
#else
movptr(tmp2, Address(obj, 0));
#endif
cmpptr(tmp2, (int32_t) NULL_WORD);
// Do we need to load the previous value?
if (obj != noreg) {
load_heap_oop(pre_val, Address(obj, 0));
}
// Is the previous value null?
cmpptr(pre_val, (int32_t) NULL_WORD);
jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
// Is index == 0?
// (The index field is typed as size_t.)
#ifdef _LP64
movslq(tmp, index);
cmpq(tmp, 0);
#else
cmpl(index, 0);
#endif
jcc(Assembler::equal, runtime);
#ifdef _LP64
subq(tmp, wordSize);
movl(index, tmp);
addq(tmp, buffer);
#else
subl(index, wordSize);
movl(tmp, buffer);
addl(tmp, index);
#endif
movptr(Address(tmp, 0), tmp2);
movptr(tmp, index); // tmp := *index_adr
cmpptr(tmp, 0); // tmp == 0?
jcc(Assembler::equal, runtime); // If yes, goto runtime
subptr(tmp, wordSize); // tmp := tmp - wordSize
movptr(index, tmp); // *index_adr := tmp
addptr(tmp, buffer); // tmp := tmp + *buffer_adr
// Record the previous value
movptr(Address(tmp, 0), pre_val);
jmp(done);
bind(runtime);
// save the live input values
if(tosca_live) push(rax);
push(obj);
#ifdef _LP64
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
#else
push(thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
pop(thread);
#endif
pop(obj);
if(tosca_live) pop(rax);
bind(done);
if (obj != noreg && obj != rax)
push(obj);
if (pre_val != rax)
push(pre_val);
// Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
//
// If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to
// the caller frame and so this check will most likely fail at runtime.
//
// Expanding the call directly bypasses the generation of the check.
// So when we do not have have a full interpreter frame on the stack
// expand_call should be passed true.
NOT_LP64( push(thread); )
if (expand_call) {
LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
pass_arg1(this, thread);
pass_arg0(this, pre_val);
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
} else {
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
}
NOT_LP64( pop(thread); )
// save the live input values
if (pre_val != rax)
pop(pre_val);
if (obj != noreg && obj != rax)
pop(obj);
if(tosca_live) pop(rax);
bind(done);
}
void MacroAssembler::g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2) {
#ifdef _LP64
assert(thread == r15_thread, "must be");
#endif // _LP64
LP64_ONLY(Register thread = r15_thread;)
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
Label done;
@ -7067,7 +7107,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
pop(store_addr);
bind(done);
}
#endif // SERIALGC
@ -7941,12 +7980,12 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
#endif
push(rax); // save rax,
// addr may contain rsp so we will have to adjust it based on the push
// we just did
// we just did (and on 64 bit we do two pushes)
// NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
// stores rax into addr which is backwards of what was intended.
if (addr.uses(rsp)) {
lea(rax, addr);
pushptr(Address(rax, BytesPerWord));
pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
} else {
pushptr(addr);
}
@ -8396,6 +8435,17 @@ void MacroAssembler::load_heap_oop(Register dst, Address src) {
movptr(dst, src);
}
// Doesn't do verfication, generates fixed size code
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
#ifdef _LP64
if (UseCompressedOops) {
movl(dst, src);
decode_heap_oop_not_null(dst);
} else
#endif
movptr(dst, src);
}
void MacroAssembler::store_heap_oop(Address dst, Register src) {
#ifdef _LP64
if (UseCompressedOops) {

@ -385,10 +385,18 @@ class OopAddress: public AddressLiteral {
};
class ExternalAddress: public AddressLiteral {
private:
static relocInfo::relocType reloc_for_target(address target) {
// Sometimes ExternalAddress is used for values which aren't
// exactly addresses, like the card table base.
// external_word_type can't be used for values in the first page
// so just skip the reloc in that case.
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
}
public:
public:
ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){}
ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
};
@ -1445,6 +1453,7 @@ private:
class MacroAssembler: public Assembler {
friend class LIR_Assembler;
friend class Runtime1; // as_Address()
protected:
Address as_Address(AddressLiteral adr);
@ -1666,21 +1675,22 @@ class MacroAssembler: public Assembler {
void store_check(Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
#ifndef SERIALGC
void g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register pre_val,
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live);
bool tosca_live,
bool expand_call);
void g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2);
#endif // SERIALGC
// split store_check(Register obj) to enhance instruction interleaving
void store_check_part_1(Register obj);
@ -1701,6 +1711,7 @@ class MacroAssembler: public Assembler {
void store_klass(Register dst, Register src);
void load_heap_oop(Register dst, Address src);
void load_heap_oop_not_null(Register dst, Address src);
void store_heap_oop(Address dst, Register src);
// Used for storing NULL. All other oop constants should be

@ -316,7 +316,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
Register tmp2 = rbx;
__ push(tmp);
__ push(tmp2);
__ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
// Load without verification to keep code size small. We need it because
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
__ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp);
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ pop(tmp2);
@ -464,15 +466,19 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
@ -482,6 +488,68 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
__ cmpptr(src_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
}
// Generate src->_klass->_reference_type == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
__ cmpl(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ jcc(Assembler::equal, _continuation);
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
__ cmpptr(val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

@ -1401,7 +1401,7 @@ void LIR_Assembler::prefetchr(LIR_Opr src) {
default:
ShouldNotReachHere(); break;
}
} else if (VM_Version::supports_3dnow()) {
} else if (VM_Version::supports_3dnow_prefetch()) {
__ prefetchr(from_addr);
}
}
@ -1424,7 +1424,7 @@ void LIR_Assembler::prefetchw(LIR_Opr src) {
default:
ShouldNotReachHere(); break;
}
} else if (VM_Version::supports_3dnow()) {
} else if (VM_Version::supports_3dnow_prefetch()) {
__ prefetchw(from_addr);
}
}
@ -3102,7 +3102,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
// if we don't know anything or it's an object array, just go through the generic arraycopy
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) {
Label done;
// save outgoing arguments on stack in case call to System.arraycopy is needed
@ -3123,7 +3123,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
store_parameter(src, 4);
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
address copyfunc_addr = StubRoutines::generic_arraycopy();
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
#ifdef _LP64
@ -3141,11 +3143,29 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
store_parameter(j_rarg4, 4);
__ call(RuntimeAddress(entry));
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
__ call(RuntimeAddress(C_entry));
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
}
#endif
__ call(RuntimeAddress(copyfunc_addr));
}
__ addptr(rsp, 6*wordSize);
#else
__ mov(c_rarg4, j_rarg4);
__ call(RuntimeAddress(entry));
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
__ call(RuntimeAddress(C_entry));
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
}
#endif
__ call(RuntimeAddress(copyfunc_addr));
}
#endif // _WIN64
#else
__ push(length);
@ -3153,13 +3173,28 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ push(dst);
__ push(src_pos);
__ push(src);
__ call_VM_leaf(entry, 5); // removes pushed parameter from the stack
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
__ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
}
#endif
__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
}
#endif // _LP64
__ cmpl(rax, 0);
__ jcc(Assembler::equal, *stub->continuation());
if (copyfunc_addr != NULL) {
__ mov(tmp, rax);
__ xorl(tmp, -1);
}
// Reload values from the stack so they are where the stub
// expects them.
__ movptr (dst, Address(rsp, 0*BytesPerWord));
@ -3167,6 +3202,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ movptr (length, Address(rsp, 2*BytesPerWord));
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
__ movptr (src, Address(rsp, 4*BytesPerWord));
if (copyfunc_addr != NULL) {
__ subl(length, tmp);
__ addl(src_pos, tmp);
__ addl(dst_pos, tmp);
}
__ jmp(*stub->entry());
__ bind(*stub->continuation());
@ -3226,10 +3267,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ testl(dst_pos, dst_pos);
__ jcc(Assembler::less, *stub->entry());
}
if (flags & LIR_OpArrayCopy::length_positive_check) {
__ testl(length, length);
__ jcc(Assembler::less, *stub->entry());
}
if (flags & LIR_OpArrayCopy::src_range_check) {
__ lea(tmp, Address(src_pos, length, Address::times_1, 0));
@ -3242,15 +3279,190 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ jcc(Assembler::above, *stub->entry());
}
if (flags & LIR_OpArrayCopy::length_positive_check) {
__ testl(length, length);
__ jcc(Assembler::less, *stub->entry());
__ jcc(Assembler::zero, *stub->continuation());
}
#ifdef _LP64
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
#endif
if (flags & LIR_OpArrayCopy::type_check) {
if (UseCompressedOops) {
__ movl(tmp, src_klass_addr);
__ cmpl(tmp, dst_klass_addr);
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedOops) {
__ movl(tmp, src_klass_addr);
__ cmpl(tmp, dst_klass_addr);
} else {
__ movptr(tmp, src_klass_addr);
__ cmpptr(tmp, dst_klass_addr);
}
__ jcc(Assembler::notEqual, *stub->entry());
} else {
__ movptr(tmp, src_klass_addr);
__ cmpptr(tmp, dst_klass_addr);
// For object arrays, if src is a sub class of dst then we can
// safely do the copy.
Label cont, slow;
__ push(src);
__ push(dst);
__ load_klass(src, src);
__ load_klass(dst, dst);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
__ push(src);
__ push(dst);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(dst);
__ pop(src);
__ cmpl(src, 0);
__ jcc(Assembler::notEqual, cont);
__ bind(slow);
__ pop(dst);
__ pop(src);
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
if (copyfunc_addr != NULL) { // use stub if available
// src is not a sub class of dst so we have to do a
// per-element check.
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
if ((flags & mask) != mask) {
// Check that at least both of them object arrays.
assert(flags & mask, "one of the two should be known to be an object array");
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
Address klass_lh_addr(tmp, lh_offset);
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ cmpl(klass_lh_addr, objArray_lh);
__ jcc(Assembler::notEqual, *stub->entry());
}
#ifndef _LP64
// save caller save registers
store_parameter(rax, 2);
store_parameter(rcx, 1);
store_parameter(rdx, 0);
__ movptr(tmp, dst_klass_addr);
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ push(tmp);
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ push(tmp);
__ push(length);
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ push(tmp);
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ push(tmp);
__ call_VM_leaf(copyfunc_addr, 5);
#else
__ movl2ptr(length, length); //higher 32bits must be null
// save caller save registers: copy them to callee save registers
__ mov(rbx, rdx);
__ mov(r13, r8);
__ mov(r14, r9);
#ifndef _WIN64
store_parameter(rsi, 1);
store_parameter(rcx, 0);
// on WIN64 other incoming parameters are in rdi and rsi saved
// across the call
#endif
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg0, dst, dst_pos, length);
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg1, dst, length);
__ mov(c_rarg2, length);
assert_different_registers(c_rarg2, dst);
#ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst);
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ load_klass(c_rarg4, dst);
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ call(RuntimeAddress(copyfunc_addr));
#endif
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
Label failed;
__ testl(rax, rax);
__ jcc(Assembler::notZero, failed);
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
__ bind(failed);
}
#endif
__ testl(rax, rax);
__ jcc(Assembler::zero, *stub->continuation());
#ifndef PRODUCT
if (PrintC1Statistics) {
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
}
#endif
__ mov(tmp, rax);
__ xorl(tmp, -1);
#ifndef _LP64
// restore caller save registers
assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost
__ movptr(rdx, Address(rsp, 0*BytesPerWord));
__ movptr(rcx, Address(rsp, 1*BytesPerWord));
__ movptr(rax, Address(rsp, 2*BytesPerWord));
#else
// restore caller save registers
__ mov(rdx, rbx);
__ mov(r8, r13);
__ mov(r9, r14);
#ifndef _WIN64
assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost
__ movptr(rcx, Address(rsp, 0*BytesPerWord));
__ movptr(rsi, Address(rsp, 1*BytesPerWord));
#else
assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost
#endif
#endif
__ subl(length, tmp);
__ addl(src_pos, tmp);
__ addl(dst_pos, tmp);
}
__ jmp(*stub->entry());
__ bind(cont);
__ pop(dst);
__ pop(src);
}
__ jcc(Assembler::notEqual, *stub->entry());
}
#ifdef ASSERT
@ -3291,16 +3503,16 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
#endif
if (shift_amount > 0 && basic_type != T_OBJECT) {
__ shlptr(length, shift_amount);
#ifndef PRODUCT
if (PrintC1Statistics) {
__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
}
#endif
#ifdef _LP64
assert_different_registers(c_rarg0, dst, dst_pos, length);
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg1, length);
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ mov(c_rarg2, length);
@ -3311,11 +3523,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
store_parameter(tmp, 1);
store_parameter(length, 2);
#endif // _LP64
if (basic_type == T_OBJECT) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0);
}
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
const char *name;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
__ call_VM_leaf(entry, 0);
__ bind(*stub->continuation());
}

@ -326,7 +326,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(value.result(), array_addr, null_check_info);
// Seems to be a precise
post_barrier(LIR_OprFact::address(array_addr), value.result());
@ -794,7 +795,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
pre_barrier(addr, false, NULL);
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
@ -1339,7 +1341,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), false, NULL);
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void generate_stack_overflow_check(void);

@ -936,6 +936,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// C++ Interpreter stub for calling a native method.
// This sets up a somewhat different looking stack for calling the native method
@ -2210,6 +2230,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry();
void lock_method(void);
void generate_stack_overflow_check(void);

@ -776,6 +776,98 @@ address InterpreterGenerator::generate_accessor_entry(void) {
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code below can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
// rbx,: methodOop
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize));
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// rax: local 0 (must be preserved across the G1 barrier call)
//
// rbx: method (at this point it's scratch)
// rcx: receiver (at this point it's scratch)
// rdx: scratch
// rdi: scratch
//
// rsi: sender sp
// Preserve the sender sp in case the pre-barrier
// calls the runtime
__ push(rsi);
// Load the value of the referent field.
const Address field_address(rax, referent_offset);
__ movptr(rax, field_address);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
__ get_thread(rcx);
__ g1_write_barrier_pre(noreg /* obj */,
rax /* pre_val */,
rcx /* thread */,
rbx /* tmp */,
true /* tosca_save */,
true /* expand_call */);
// _areturn
__ pop(rsi); // get sender sp
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method
@ -1444,6 +1536,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

@ -757,6 +757,95 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return entry_point;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
//
// rbx: methodOop
// r13: senderSP must preserve for slow path, set SP to it on fast path
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// rbx: method
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize));
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// rax: local 0
// rbx: method (but can be used as scratch now)
// rdx: scratch
// rdi: scratch
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
// Load the value of the referent field.
const Address field_address(rax, referent_offset);
__ load_heap_oop(rax, field_address);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
__ g1_write_barrier_pre(noreg /* obj */,
rax /* pre_val */,
r15_thread /* thread */,
rbx /* tmp */,
true /* tosca_live */,
true /* expand_call */);
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
@ -1463,6 +1552,8 @@ address AbstractInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

@ -140,7 +140,12 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
__ get_thread(rcx);
__ save_bcp();
__ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
__ g1_write_barrier_pre(rdx /* obj */,
rbx /* pre_val */,
rcx /* thread */,
rsi /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
// Do the actual store
// noreg means NULL
@ -149,7 +154,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
// No post barrier for NULL
} else {
__ movl(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
__ g1_write_barrier_post(rdx /* store_adr */,
val /* new_val */,
rcx /* thread */,
rbx /* tmp */,
rsi /* tmp2 */);
}
__ restore_bcp();

@ -147,12 +147,21 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} else {
__ leaq(rdx, obj);
}
__ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
__ g1_write_barrier_pre(rdx /* obj */,
rbx /* pre_val */,
r15_thread /* thread */,
r8 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
if (val == noreg) {
__ store_heap_oop_null(Address(rdx, 0));
} else {
__ store_heap_oop(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, val, r8, rbx);
__ g1_write_barrier_post(rdx /* store_adr */,
val /* new_val */,
r15_thread /* thread */,
r8 /* tmp */,
rbx /* tmp2 */);
}
}

@ -348,7 +348,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -363,8 +363,7 @@ void VM_Version::get_processor_features() {
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_popcnt() ? ", popcnt" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow() ? ", 3dnow" : ""),
(supports_3dnow2() ? ", 3dnowext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""),
(supports_sse4a() ? ", sse4a": ""),
(supports_ht() ? ", ht": ""));
@ -522,13 +521,13 @@ void VM_Version::get_processor_features() {
// set valid Prefetch instruction
if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0;
if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3;
if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0;
if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3;
if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0;
if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3;
if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3;
if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0;
if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3;
if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
// Allocation prefetch settings
intx cache_line_size = L1_data_cache_line_size();
@ -576,10 +575,10 @@ void VM_Version::get_processor_features() {
logical_processors_per_package());
tty->print_cr("UseSSE=%d",UseSSE);
tty->print("Allocation: ");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) {
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
tty->print_cr("no prefetching");
} else {
if (UseSSE == 0 && supports_3dnow()) {
if (UseSSE == 0 && supports_3dnow_prefetch()) {
tty->print("PREFETCHW");
} else if (UseSSE >= 1) {
if (AllocatePrefetchInstr == 0) {

@ -188,7 +188,8 @@ protected:
CPU_FXSR = (1 << 2),
CPU_HT = (1 << 3),
CPU_MMX = (1 << 4),
CPU_3DNOW = (1 << 5), // 3DNow comes from cpuid 0x80000001 (EDX)
CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions
// may not necessarily support other 3dnow instructions
CPU_SSE = (1 << 6),
CPU_SSE2 = (1 << 7),
CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX)
@ -328,8 +329,9 @@ protected:
// AMD features.
if (is_amd()) {
if (_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0)
result |= CPU_3DNOW;
if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
(_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
result |= CPU_3DNOW_PREFETCH;
if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0)
result |= CPU_LZCNT;
if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
@ -446,9 +448,8 @@ public:
//
// AMD features
//
static bool supports_3dnow() { return (_cpuFeatures & CPU_3DNOW) != 0; }
static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; }
static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
static bool supports_3dnow2() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow2 != 0; }
static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; }
static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }

@ -3423,7 +3423,7 @@ encode %{
masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
@ -3467,7 +3467,7 @@ encode %{
masm.movptr(boxReg, tmpReg) ;
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
@ -3614,7 +3614,7 @@ encode %{
// See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
masm.get_thread (boxReg) ;
if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) {
if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [ebx + Offset(_owner)-2]
masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
}
@ -7333,7 +7333,7 @@ instruct loadSSD(regD dst, stackSlotD src) %{
// Must be safe to execute with invalid address (cannot fault).
instruct prefetchr0( memory mem ) %{
predicate(UseSSE==0 && !VM_Version::supports_3dnow());
predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
match(PrefetchRead mem);
ins_cost(0);
size(0);
@ -7343,7 +7343,7 @@ instruct prefetchr0( memory mem ) %{
%}
instruct prefetchr( memory mem ) %{
predicate(UseSSE==0 && VM_Version::supports_3dnow() || ReadPrefetchInstr==3);
predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3);
match(PrefetchRead mem);
ins_cost(100);
@ -7387,7 +7387,7 @@ instruct prefetchrT2( memory mem ) %{
%}
instruct prefetchw0( memory mem ) %{
predicate(UseSSE==0 && !VM_Version::supports_3dnow());
predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
match(PrefetchWrite mem);
ins_cost(0);
size(0);
@ -7397,7 +7397,7 @@ instruct prefetchw0( memory mem ) %{
%}
instruct prefetchw( memory mem ) %{
predicate(UseSSE==0 && VM_Version::supports_3dnow() || AllocatePrefetchInstr==3);
predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3);
match( PrefetchWrite mem );
ins_cost(100);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* Copyright 2007, 2008, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -150,4 +150,22 @@
#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
((VMJavaVal64*)(addr))->l)
// VMSlots implementation
#define VMSLOTS_SLOT(offset) ((intptr_t*)&vmslots[(offset)])
#define VMSLOTS_ADDR(offset) ((address)vmslots[(offset)])
#define VMSLOTS_INT(offset) (*((jint*)&vmslots[(offset)]))
#define VMSLOTS_FLOAT(offset) (*((jfloat*)&vmslots[(offset)]))
#define VMSLOTS_OBJECT(offset) ((oop)vmslots[(offset)])
#define VMSLOTS_DOUBLE(offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->d)
#define VMSLOTS_LONG(offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->l)
#define SET_VMSLOTS_SLOT(value, offset) (*(intptr_t*)&vmslots[(offset)] = *(intptr_t *)(value))
#define SET_VMSLOTS_ADDR(value, offset) (*((address *)&vmslots[(offset)]) = (value))
#define SET_VMSLOTS_INT(value, offset) (*((jint *)&vmslots[(offset)]) = (value))
#define SET_VMSLOTS_FLOAT(value, offset) (*((jfloat *)&vmslots[(offset)]) = (value))
#define SET_VMSLOTS_OBJECT(value, offset) (*((oop *)&vmslots[(offset)]) = (value))
#define SET_VMSLOTS_DOUBLE(value, offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->d = (value))
#define SET_VMSLOTS_LONG(value, offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->l = (value))
#endif // CPU_ZERO_VM_BYTECODEINTERPRETER_ZERO_HPP

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,10 +56,13 @@
#define fixup_after_potential_safepoint() \
method = istate->method()
#define CALL_VM_NOCHECK(func) \
#define CALL_VM_NOCHECK_NOFIX(func) \
thread->set_last_Java_frame(); \
func; \
thread->reset_last_Java_frame(); \
thread->reset_last_Java_frame();
#define CALL_VM_NOCHECK(func) \
CALL_VM_NOCHECK_NOFIX(func) \
fixup_after_potential_safepoint()
int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
@ -177,6 +180,25 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
method, istate->osr_entry(), istate->osr_buf(), THREAD);
return;
}
else if (istate->msg() == BytecodeInterpreter::call_method_handle) {
oop method_handle = istate->callee();
// Trim back the stack to put the parameters at the top
stack->set_sp(istate->stack() + 1);
// Make the call
process_method_handle(method_handle, THREAD);
fixup_after_potential_safepoint();
// Convert the result
istate->set_stack(stack->sp() - 1);
// Restore the stack
stack->set_sp(istate->stack_limit() + 1);
// Resume the interpreter
istate->set_msg(BytecodeInterpreter::method_resume);
}
else {
ShouldNotReachHere();
}
@ -607,6 +629,549 @@ int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
return 0;
}
int CppInterpreter::method_handle_entry(methodOop method,
intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
int argument_slots = method->size_of_parameters();
int result_slots = type2size[result_type_of(method)];
intptr_t *vmslots = stack->sp();
intptr_t *unwind_sp = vmslots + argument_slots;
// Find the MethodType
address p = (address) method;
for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) {
p = *(address*)(p + (*pc));
}
oop method_type = (oop) p;
// The MethodHandle is in the slot after the arguments
oop form = java_lang_invoke_MethodType::form(method_type);
int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form);
assert(argument_slots == num_vmslots + 1, "should be");
oop method_handle = VMSLOTS_OBJECT(num_vmslots);
// InvokeGeneric requires some extra shuffling
oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
bool is_exact = mhtype == method_type;
if (!is_exact) {
if (method->intrinsic_id() == vmIntrinsics::_invokeExact) {
CALL_VM_NOCHECK_NOFIX(
InterpreterRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
stack->set_sp(unwind_sp);
return 0;
}
assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be");
// Load up an adapter from the calling type
// NB the x86 code for this (in methodHandles_x86.cpp, search for
// "genericInvoker") is really really odd. I'm hoping it's trying
// to accomodate odd VM/class library combinations I can ignore.
oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form);
if (adapter == NULL) {
CALL_VM_NOCHECK_NOFIX(
InterpreterRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
stack->set_sp(unwind_sp);
return 0;
}
// Adapters are shared among form-families of method-type. The
// type being called is passed as a trusted first argument so that
// the adapter knows the actual types of its arguments and return
// values.
insert_vmslots(num_vmslots + 1, 1, THREAD);
if (HAS_PENDING_EXCEPTION) {
// NB all oops trashed!
stack->set_sp(unwind_sp);
return 0;
}
vmslots = stack->sp();
num_vmslots++;
SET_VMSLOTS_OBJECT(method_type, num_vmslots);
method_handle = adapter;
}
// Start processing
process_method_handle(method_handle, THREAD);
if (HAS_PENDING_EXCEPTION)
result_slots = 0;
// If this is an invokeExact then the eventual callee will not
// have unwound the method handle argument so we have to do it.
// If a result is being returned the it will be above the method
// handle argument we're unwinding.
if (is_exact) {
intptr_t result[2];
for (int i = 0; i < result_slots; i++)
result[i] = stack->pop();
stack->pop();
for (int i = result_slots - 1; i >= 0; i--)
stack->push(result[i]);
}
// Check
assert(stack->sp() == unwind_sp - result_slots, "should be");
// No deoptimized frames on the stack
return 0;
}
void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
intptr_t *vmslots = stack->sp();
bool direct_to_method = false;
BasicType src_rtype = T_ILLEGAL;
BasicType dst_rtype = T_ILLEGAL;
MethodHandleEntry *entry =
java_lang_invoke_MethodHandle::vmentry(method_handle);
MethodHandles::EntryKind entry_kind =
(MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff);
methodOop method = NULL;
switch (entry_kind) {
case MethodHandles::_invokestatic_mh:
direct_to_method = true;
break;
case MethodHandles::_invokespecial_mh:
case MethodHandles::_invokevirtual_mh:
case MethodHandles::_invokeinterface_mh:
{
oop receiver =
VMSLOTS_OBJECT(
java_lang_invoke_MethodHandle::vmslots(method_handle) - 1);
if (receiver == NULL) {
stack->set_sp(calculate_unwind_sp(stack, method_handle));
CALL_VM_NOCHECK_NOFIX(
throw_exception(
thread, vmSymbols::java_lang_NullPointerException()));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
return;
}
if (entry_kind != MethodHandles::_invokespecial_mh) {
int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
instanceKlass* rcvrKlass =
(instanceKlass *) receiver->klass()->klass_part();
if (entry_kind == MethodHandles::_invokevirtual_mh) {
method = (methodOop) rcvrKlass->start_of_vtable()[index];
}
else {
oop iclass = java_lang_invoke_MethodHandle::vmtarget(method_handle);
itableOffsetEntry* ki =
(itableOffsetEntry *) rcvrKlass->start_of_itable();
int i, length = rcvrKlass->itable_length();
for (i = 0; i < length; i++, ki++ ) {
if (ki->interface_klass() == iclass)
break;
}
if (i == length) {
stack->set_sp(calculate_unwind_sp(stack, method_handle));
CALL_VM_NOCHECK_NOFIX(
throw_exception(
thread, vmSymbols::java_lang_IncompatibleClassChangeError()));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
return;
}
itableMethodEntry* im = ki->first_method_entry(receiver->klass());
method = im[index].method();
if (method == NULL) {
stack->set_sp(calculate_unwind_sp(stack, method_handle));
CALL_VM_NOCHECK_NOFIX(
throw_exception(
thread, vmSymbols::java_lang_AbstractMethodError()));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
return;
}
}
}
}
direct_to_method = true;
break;
case MethodHandles::_bound_ref_direct_mh:
case MethodHandles::_bound_int_direct_mh:
case MethodHandles::_bound_long_direct_mh:
direct_to_method = true;
// fall through
case MethodHandles::_bound_ref_mh:
case MethodHandles::_bound_int_mh:
case MethodHandles::_bound_long_mh:
{
BasicType arg_type = T_ILLEGAL;
int arg_mask = -1;
int arg_slots = -1;
MethodHandles::get_ek_bound_mh_info(
entry_kind, arg_type, arg_mask, arg_slots);
int arg_slot =
java_lang_invoke_BoundMethodHandle::vmargslot(method_handle);
// Create the new slot(s)
intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
insert_vmslots(arg_slot, arg_slots, THREAD);
if (HAS_PENDING_EXCEPTION) {
// all oops trashed
stack->set_sp(unwind_sp);
return;
}
vmslots = stack->sp();
// Store bound argument into new stack slot
oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle);
if (arg_type == T_OBJECT) {
assert(arg_slots == 1, "should be");
SET_VMSLOTS_OBJECT(arg, arg_slot);
}
else {
jvalue arg_value;
arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
switch (arg_type) {
case T_BOOLEAN:
SET_VMSLOTS_INT(arg_value.z, arg_slot);
break;
case T_CHAR:
SET_VMSLOTS_INT(arg_value.c, arg_slot);
break;
case T_BYTE:
SET_VMSLOTS_INT(arg_value.b, arg_slot);
break;
case T_SHORT:
SET_VMSLOTS_INT(arg_value.s, arg_slot);
break;
case T_INT:
SET_VMSLOTS_INT(arg_value.i, arg_slot);
break;
case T_FLOAT:
SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
break;
case T_LONG:
SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1);
break;
case T_DOUBLE:
SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1);
break;
default:
tty->print_cr("unhandled type %s", type2name(arg_type));
ShouldNotReachHere();
}
}
}
break;
case MethodHandles::_adapter_retype_only:
case MethodHandles::_adapter_retype_raw:
src_rtype = result_type_of_handle(
java_lang_invoke_MethodHandle::vmtarget(method_handle));
dst_rtype = result_type_of_handle(method_handle);
break;
case MethodHandles::_adapter_check_cast:
{
int arg_slot =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
oop arg = VMSLOTS_OBJECT(arg_slot);
if (arg != NULL) {
klassOop objKlassOop = arg->klass();
klassOop klassOf = java_lang_Class::as_klassOop(
java_lang_invoke_AdapterMethodHandle::argument(method_handle));
if (objKlassOop != klassOf &&
!objKlassOop->klass_part()->is_subtype_of(klassOf)) {
ResourceMark rm(THREAD);
const char* objName = Klass::cast(objKlassOop)->external_name();
const char* klassName = Klass::cast(klassOf)->external_name();
char* message = SharedRuntime::generate_class_cast_message(
objName, klassName);
stack->set_sp(calculate_unwind_sp(stack, method_handle));
CALL_VM_NOCHECK_NOFIX(
throw_exception(
thread, vmSymbols::java_lang_ClassCastException(), message));
// NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do");
return;
}
}
}
break;
case MethodHandles::_adapter_dup_args:
{
int arg_slot =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
int conv =
java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
int num_slots = -MethodHandles::adapter_conversion_stack_move(conv);
assert(num_slots > 0, "should be");
// Create the new slot(s)
intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
stack->overflow_check(num_slots, THREAD);
if (HAS_PENDING_EXCEPTION) {
// all oops trashed
stack->set_sp(unwind_sp);
return;
}
// Duplicate the arguments
for (int i = num_slots - 1; i >= 0; i--)
stack->push(*VMSLOTS_SLOT(arg_slot + i));
vmslots = stack->sp(); // unused, but let the compiler figure that out
}
break;
case MethodHandles::_adapter_drop_args:
{
int arg_slot =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
int conv =
java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
int num_slots = MethodHandles::adapter_conversion_stack_move(conv);
assert(num_slots > 0, "should be");
remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap
vmslots = stack->sp(); // unused, but let the compiler figure that out
}
break;
case MethodHandles::_adapter_opt_swap_1:
case MethodHandles::_adapter_opt_swap_2:
case MethodHandles::_adapter_opt_rot_1_up:
case MethodHandles::_adapter_opt_rot_1_down:
case MethodHandles::_adapter_opt_rot_2_up:
case MethodHandles::_adapter_opt_rot_2_down:
{
int arg1 =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
int conv =
java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
int arg2 = MethodHandles::adapter_conversion_vminfo(conv);
int swap_bytes = 0, rotate = 0;
MethodHandles::get_ek_adapter_opt_swap_rot_info(
entry_kind, swap_bytes, rotate);
int swap_slots = swap_bytes >> LogBytesPerWord;
intptr_t tmp;
switch (rotate) {
case 0: // swap
for (int i = 0; i < swap_slots; i++) {
tmp = *VMSLOTS_SLOT(arg1 + i);
SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i);
SET_VMSLOTS_SLOT(&tmp, arg2 + i);
}
break;
case 1: // up
assert(arg1 - swap_slots > arg2, "should be");
tmp = *VMSLOTS_SLOT(arg1);
for (int i = arg1 - swap_slots; i >= arg2; i--)
SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots);
SET_VMSLOTS_SLOT(&tmp, arg2);
break;
case -1: // down
assert(arg2 - swap_slots > arg1, "should be");
tmp = *VMSLOTS_SLOT(arg1);
for (int i = arg1 + swap_slots; i <= arg2; i++)
SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots);
SET_VMSLOTS_SLOT(&tmp, arg2);
break;
default:
ShouldNotReachHere();
}
}
break;
case MethodHandles::_adapter_opt_i2l:
{
int arg_slot =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
int arg = VMSLOTS_INT(arg_slot);
intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
insert_vmslots(arg_slot, 1, THREAD);
if (HAS_PENDING_EXCEPTION) {
// all oops trashed
stack->set_sp(unwind_sp);
return;
}
vmslots = stack->sp();
arg_slot++;
SET_VMSLOTS_LONG(arg, arg_slot);
}
break;
case MethodHandles::_adapter_opt_unboxi:
case MethodHandles::_adapter_opt_unboxl:
{
int arg_slot =
java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
oop arg = VMSLOTS_OBJECT(arg_slot);
jvalue arg_value;
BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
if (arg_type == T_LONG || arg_type == T_DOUBLE) {
intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
insert_vmslots(arg_slot, 1, THREAD);
if (HAS_PENDING_EXCEPTION) {
// all oops trashed
stack->set_sp(unwind_sp);
return;
}
vmslots = stack->sp();
arg_slot++;
}
switch (arg_type) {
case T_BOOLEAN:
SET_VMSLOTS_INT(arg_value.z, arg_slot);
break;
case T_CHAR:
SET_VMSLOTS_INT(arg_value.c, arg_slot);
break;
case T_BYTE:
SET_VMSLOTS_INT(arg_value.b, arg_slot);
break;
case T_SHORT:
SET_VMSLOTS_INT(arg_value.s, arg_slot);
break;
case T_INT:
SET_VMSLOTS_INT(arg_value.i, arg_slot);
break;
case T_FLOAT:
SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
break;
case T_LONG:
SET_VMSLOTS_LONG(arg_value.j, arg_slot);
break;
case T_DOUBLE:
SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot);
break;
default:
tty->print_cr("unhandled type %s", type2name(arg_type));
ShouldNotReachHere();
}
}
break;
default:
tty->print_cr("unhandled entry_kind %s",
MethodHandles::entry_name(entry_kind));
ShouldNotReachHere();
}
// Continue along the chain
if (direct_to_method) {
if (method == NULL) {
method =
(methodOop) java_lang_invoke_MethodHandle::vmtarget(method_handle);
}
address entry_point = method->from_interpreted_entry();
Interpreter::invoke_method(method, entry_point, THREAD);
}
else {
process_method_handle(
java_lang_invoke_MethodHandle::vmtarget(method_handle), THREAD);
}
// NB all oops now trashed
// Adapt the result type, if necessary
if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) {
switch (dst_rtype) {
case T_VOID:
for (int i = 0; i < type2size[src_rtype]; i++)
stack->pop();
return;
case T_INT:
switch (src_rtype) {
case T_VOID:
stack->overflow_check(1, CHECK);
stack->push(0);
return;
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
return;
}
}
tty->print_cr("unhandled conversion:");
tty->print_cr("src_rtype = %s", type2name(src_rtype));
tty->print_cr("dst_rtype = %s", type2name(dst_rtype));
ShouldNotReachHere();
}
}
// The new slots will be inserted before slot insert_before.
// Slots < insert_before will have the same slot number after the insert.
// Slots >= insert_before will become old_slot + num_slots.
void CppInterpreter::insert_vmslots(int insert_before, int num_slots, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
// Allocate the space
stack->overflow_check(num_slots, CHECK);
stack->alloc(num_slots * wordSize);
intptr_t *vmslots = stack->sp();
// Shuffle everything up
for (int i = 0; i < insert_before; i++)
SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i + num_slots), i);
}
void CppInterpreter::remove_vmslots(int first_slot, int num_slots, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
intptr_t *vmslots = stack->sp();
// Move everything down
for (int i = first_slot - 1; i >= 0; i--)
SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + num_slots);
// Deallocate the space
stack->set_sp(stack->sp() + num_slots);
}
BasicType CppInterpreter::result_type_of_handle(oop method_handle) {
oop method_type = java_lang_invoke_MethodHandle::type(method_handle);
oop return_type = java_lang_invoke_MethodType::rtype(method_type);
return java_lang_Class::as_BasicType(return_type, (klassOop *) NULL);
}
intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack,
oop method_handle) {
oop method_type = java_lang_invoke_MethodHandle::type(method_handle);
oop form = java_lang_invoke_MethodType::form(method_type);
int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form);
return stack->sp() + argument_slots;
}
IRT_ENTRY(void, CppInterpreter::throw_exception(JavaThread* thread,
Symbol* name,
char* message))
THROW_MSG(name, message);
IRT_END
InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
@ -737,6 +1302,26 @@ address InterpreterGenerator::generate_accessor_entry() {
return generate_entry((address) CppInterpreter::accessor_entry);
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {
assert(synchronized == false, "should be");
@ -792,6 +1377,10 @@ address AbstractInterpreterGenerator::generate_method_entry(
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
break;
case Interpreter::java_lang_ref_reference_get:
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
break;
default:
ShouldNotReachHere();
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,21 @@
static int native_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int method_handle_entry(methodOop method, intptr_t UNUSED, TRAPS);
public:
// Main loop of normal_entry
static void main_loop(int recurse, TRAPS);
private:
// Helpers for method_handle_entry
static void process_method_handle(oop method_handle, TRAPS);
static void insert_vmslots(int insert_before, int num_slots, TRAPS);
static void remove_vmslots(int first_slot, int num_slots, TRAPS);
static BasicType result_type_of_handle(oop method_handle);
static intptr_t* calculate_unwind_sp(ZeroStack* stack, oop method_handle);
static void throw_exception(JavaThread* thread, Symbol* name,char *msg=NULL);
private:
// Fast result type determination
static BasicType result_type_of(methodOop method);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,4 +54,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -37,6 +37,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry();
address generate_accessor_entry();
address generate_Reference_get_entry();
address generate_method_handle_entry();
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,6 +49,9 @@
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef CC_INTERP
#include "interpreter/cppInterpreter.hpp"
#endif
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
_masm->advance(1);
@ -64,11 +67,15 @@ address InterpreterGenerator::generate_math_entry(
}
address InterpreterGenerator::generate_abstract_entry() {
return ShouldNotCallThisEntry();
return generate_entry((address) ShouldNotCallThisEntry());
}
address InterpreterGenerator::generate_method_handle_entry() {
return ShouldNotCallThisEntry();
#ifdef CC_INTERP
return generate_entry((address) CppInterpreter::method_handle_entry);
#else
return generate_entry((address) ShouldNotCallThisEntry());
#endif // CC_INTERP
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010 Red Hat, Inc.
* Copyright 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,10 +29,21 @@
#include "prims/methodHandles.hpp"
int MethodHandles::adapter_conversion_ops_supported_mask() {
ShouldNotCallThis();
return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
MethodHandles::EntryKind ek) {
ShouldNotCallThis();
init_entry(ek, (MethodHandleEntry *) ek);
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2009 Red Hat, Inc.
* Copyright 2007, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o) {
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
ShouldNotCallThis();
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,15 +78,17 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
int total_in_args,
int comp_args_on_stack,
BasicType *in_sig_bt,
VMRegPair *in_regs,
int compile_id,
int total_args_passed,
int max_arg,
BasicType *sig_bt,
VMRegPair *regs,
BasicType ret_type) {
#ifdef SHARK
return SharkCompiler::compiler()->generate_native_wrapper(masm,
method,
in_sig_bt,
compile_id,
sig_bt,
ret_type);
#else
ShouldNotCallThis();

@ -2648,45 +2648,39 @@ bool os::uncommit_memory(char* addr, size_t size) {
// writing thread stacks don't use growable mappings (i.e. those
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
// only applies to the main thread.
static bool
get_stack_bounds(uintptr_t *bottom, uintptr_t *top)
{
FILE *f = fopen("/proc/self/maps", "r");
if (f == NULL)
static
bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
char buf[128];
int fd, sz;
if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
return false;
while (!feof(f)) {
size_t dummy;
char *str = NULL;
ssize_t len = getline(&str, &dummy, f);
if (len == -1) {
fclose(f);
return false;
}
if (len > 0 && str[len-1] == '\n') {
str[len-1] = 0;
len--;
}
static const char *stack_str = "[stack]";
if (len > (ssize_t)strlen(stack_str)
&& (strcmp(str + len - strlen(stack_str), stack_str) == 0)) {
if (sscanf(str, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
uintptr_t sp = (uintptr_t)__builtin_frame_address(0);
if (sp >= *bottom && sp <= *top) {
free(str);
fclose(f);
return true;
}
}
}
free(str);
}
fclose(f);
const char kw[] = "[stack]";
const int kwlen = sizeof(kw)-1;
// Address part of /proc/self/maps couldn't be more than 128 bytes
while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
// Extract addresses
if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
if (sp >= *bottom && sp <= *top) {
::close(fd);
return true;
}
}
}
}
::close(fd);
return false;
}
// If the (growable) stack mapping already extends beyond the point
// where we're going to put our guard pages, truncate the mapping at
// that point by munmap()ping it. This ensures that when we later

@ -921,6 +921,8 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
HINSTANCE dbghelp;
EXCEPTION_POINTERS ep;
MINIDUMP_EXCEPTION_INFORMATION mei;
MINIDUMP_EXCEPTION_INFORMATION* pmei;
HANDLE hProcess = GetCurrentProcess();
DWORD processId = GetCurrentProcessId();
HANDLE dumpFile;
@ -971,17 +973,22 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
VMError::report_coredump_status("Failed to create file for dumping", false);
return;
}
if (exceptionRecord != NULL && contextRecord != NULL) {
ep.ContextRecord = (PCONTEXT) contextRecord;
ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
ep.ContextRecord = (PCONTEXT) contextRecord;
ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
mei.ThreadId = GetCurrentThreadId();
mei.ExceptionPointers = &ep;
pmei = &mei;
} else {
pmei = NULL;
}
mei.ThreadId = GetCurrentThreadId();
mei.ExceptionPointers = &ep;
// Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, &mei, NULL, NULL) == false &&
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, &mei, NULL, NULL) == false) {
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
} else {
VMError::report_coredump_status(buffer, true);

@ -497,6 +497,9 @@ class CompilerInterfaceVC10 extends CompilerInterface {
addAttr(rv, "TargetMachine", "MachineX64");
}
// We always want the /DEBUG option to get full symbol information in the pdb files
addAttr(rv, "GenerateDebugInformation", "true");
return rv;
}
@ -504,8 +507,7 @@ class CompilerInterfaceVC10 extends CompilerInterface {
Vector getDebugLinkerFlags() {
Vector rv = new Vector();
// /DEBUG option
addAttr(rv, "GenerateDebugInformation", "true");
// Empty now that /DEBUG option is used by all configs
return rv;
}

@ -519,42 +519,126 @@ class ArrayCopyStub: public CodeStub {
// Code stubs for Garbage-First barriers.
class G1PreBarrierStub: public CodeStub {
private:
bool _do_load;
LIR_Opr _addr;
LIR_Opr _pre_val;
LIR_PatchCode _patch_code;
CodeEmitInfo* _info;
public:
// pre_val (a temporary register) must be a register;
// Version that _does_ generate a load of the previous value from addr.
// addr (the address of the field to be read) must be a LIR_Address
// pre_val (a temporary register) must be a register;
G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
_addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info)
_addr(addr), _pre_val(pre_val), _do_load(true),
_patch_code(patch_code), _info(info)
{
assert(_pre_val->is_register(), "should be temporary register");
assert(_addr->is_address(), "should be the address of the field");
}
// Version that _does not_ generate load of the previous value; the
// previous value is assumed to have already been loaded into pre_val.
G1PreBarrierStub(LIR_Opr pre_val) :
_addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
_patch_code(lir_patch_none), _info(NULL)
{
assert(_pre_val->is_register(), "should be a register");
}
LIR_Opr addr() const { return _addr; }
LIR_Opr pre_val() const { return _pre_val; }
LIR_PatchCode patch_code() const { return _patch_code; }
CodeEmitInfo* info() const { return _info; }
bool do_load() const { return _do_load; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
visitor->do_slow_case(_info);
else
if (_do_load) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
visitor->do_slow_case(_info);
else
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_temp(_pre_val);
} else {
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_temp(_pre_val);
visitor->do_input(_pre_val);
}
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
#endif // PRODUCT
};
// This G1 barrier code stub is used in Unsafe.getObject.
// It generates a sequence of guards around the SATB
// barrier code that are used to detect when we have
// the referent field of a Reference object.
// The first check is assumed to have been generated
// in the code generated for Unsafe.getObject().
class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
private:
LIR_Opr _val;
LIR_Opr _src;
LIR_Opr _tmp;
LIR_Opr _thread;
bool _gen_src_check;
public:
// A G1 barrier that is guarded by generated guards that determine whether
// val (which is the result of Unsafe.getObject() should be recorded in an
// SATB log buffer. We could be reading the referent field of a Reference object
// using Unsafe.getObject() and we need to record the referent.
//
// * val is the operand returned by the unsafe.getObject routine.
// * src is the base object
// * tmp is a temp used to load the klass of src, and then reference type
// * thread is the thread object.
G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
LIR_Opr tmp, LIR_Opr thread,
bool gen_src_check) :
_val(val), _src(src),
_tmp(tmp), _thread(thread),
_gen_src_check(gen_src_check)
{
assert(_val->is_register(), "should have already been loaded");
assert(_src->is_register(), "should have already been loaded");
assert(_tmp->is_register(), "should be a temporary register");
}
LIR_Opr val() const { return _val; }
LIR_Opr src() const { return _src; }
LIR_Opr tmp() const { return _tmp; }
LIR_Opr thread() const { return _thread; }
bool gen_src_check() const { return _gen_src_check; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case();
visitor->do_input(_val);
visitor->do_input(_src);
visitor->do_input(_thread);
visitor->do_temp(_tmp);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
#endif // PRODUCT
};
class G1PostBarrierStub: public CodeStub {
private:
LIR_Opr _addr;

@ -2824,7 +2824,7 @@ ValueStack* GraphBuilder::state_at_entry() {
int idx = 0;
if (!method()->is_static()) {
// we should always see the receiver
state->store_local(idx, new Local(objectType, idx));
state->store_local(idx, new Local(method()->holder(), objectType, idx));
idx = 1;
}
@ -2836,7 +2836,7 @@ ValueStack* GraphBuilder::state_at_entry() {
// don't allow T_ARRAY to propagate into locals types
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
ValueType* vt = as_ValueType(basic_type);
state->store_local(idx, new Local(vt, idx));
state->store_local(idx, new Local(type, vt, idx));
idx += type->size();
}
@ -2913,6 +2913,46 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
block()->set_end(end);
break;
}
case vmIntrinsics::_Reference_get:
{
if (UseG1GC) {
// With java.lang.ref.reference.get() we must go through the
// intrinsic - when G1 is enabled - even when get() is the root
// method of the compile so that, if necessary, the value in
// the referent field of the reference object gets recorded by
// the pre-barrier code.
// Specifically, if G1 is enabled, the value in the referent
// field is recorded by the G1 SATB pre barrier. This will
// result in the referent being marked live and the reference
// object removed from the list of discovered references during
// reference processing.
// Set up a stream so that appending instructions works properly.
ciBytecodeStream s(scope->method());
s.reset_to_bci(0);
scope_data()->set_stream(&s);
s.next();
// setup the initial block state
_block = start_block;
_state = start_block->state()->copy_for_parsing();
_last = start_block;
load_local(objectType, 0);
// Emit the intrinsic node.
bool result = try_inline_intrinsics(scope->method());
if (!result) BAILOUT("failed to inline intrinsic");
method_return(apop());
// connect the begin and end blocks and we're all done.
BlockEnd* end = last()->as_BlockEnd();
block()->set_end(end);
break;
}
// Otherwise, fall thru
}
default:
scope_data()->add_to_work_list(start_block);
iterate_all_blocks();
@ -3150,6 +3190,15 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
append_unsafe_CAS(callee);
return true;
case vmIntrinsics::_Reference_get:
// It is only when G1 is enabled that we absolutely
// need to use the intrinsic version of Reference.get()
// so that the value in the referent field, if necessary,
// can be registered by the pre-barrier code.
if (!UseG1GC) return false;
preserves_state = true;
break;
default : return false; // do not inline
}
// create intrinsic node

@ -135,6 +135,33 @@ bool AccessIndexed::compute_needs_range_check() {
}
ciType* Local::exact_type() const {
ciType* type = declared_type();
// for primitive arrays, the declared type is the exact type
if (type->is_type_array_klass()) {
return type;
} else if (type->is_instance_klass()) {
ciInstanceKlass* ik = (ciInstanceKlass*)type;
if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
return type;
}
} else if (type->is_obj_array_klass()) {
ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
ciType* base = oak->base_element_type();
if (base->is_instance_klass()) {
ciInstanceKlass* ik = base->as_instance_klass();
if (ik->is_loaded() && ik->is_final()) {
return type;
}
} else if (base->is_primitive_type()) {
return type;
}
}
return NULL;
}
ciType* LoadIndexed::exact_type() const {
ciType* array_type = array()->exact_type();
if (array_type == NULL) {
@ -189,16 +216,21 @@ ciType* NewTypeArray::exact_type() const {
return ciTypeArrayKlass::make(elt_type());
}
ciType* NewObjectArray::exact_type() const {
return ciObjArrayKlass::make(klass());
}
ciType* NewArray::declared_type() const {
return exact_type();
}
ciType* NewInstance::exact_type() const {
return klass();
}
ciType* NewInstance::declared_type() const {
return exact_type();
}
ciType* CheckCast::declared_type() const {
return klass();
@ -349,6 +381,11 @@ void Invoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}
ciType* Invoke::declared_type() const {
ciType *t = _target->signature()->return_type();
assert(t->basic_type() != T_VOID, "need return value of void method?");
return t;
}
// Implementation of Contant
intx Constant::hash() const {
@ -559,7 +596,7 @@ void BlockBegin::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
// of the inserted block, without recomputing the values of the other blocks
// in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
BlockBegin* new_sux = new BlockBegin(-99);
BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
// mark this block (special treatment when block order is computed)
new_sux->set(critical_edge_split_flag);

@ -621,16 +621,21 @@ LEAF(Phi, Instruction)
LEAF(Local, Instruction)
private:
int _java_index; // the local index within the method to which the local belongs
ciType* _declared_type;
public:
// creation
Local(ValueType* type, int index)
Local(ciType* declared, ValueType* type, int index)
: Instruction(type)
, _java_index(index)
, _declared_type(declared)
{}
// accessors
int java_index() const { return _java_index; }
ciType* declared_type() const { return _declared_type; }
ciType* exact_type() const;
// generic
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
};
@ -1146,6 +1151,8 @@ LEAF(Invoke, StateSplit)
BasicTypeList* signature() const { return _signature; }
ciMethod* target() const { return _target; }
ciType* declared_type() const;
// Returns false if target is not loaded
bool target_is_final() const { return check_flag(TargetIsFinalFlag); }
bool target_is_loaded() const { return check_flag(TargetIsLoadedFlag); }
@ -1187,6 +1194,7 @@ LEAF(NewInstance, StateSplit)
// generic
virtual bool can_trap() const { return true; }
ciType* exact_type() const;
ciType* declared_type() const;
};
@ -1208,6 +1216,8 @@ BASE(NewArray, StateSplit)
virtual bool needs_exception_state() const { return false; }
ciType* declared_type() const;
// generic
virtual bool can_trap() const { return true; }
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_length); }
@ -1397,6 +1407,7 @@ LEAF(Intrinsic, StateSplit)
vmIntrinsics::ID _id;
Values* _args;
Value _recv;
int _nonnull_state; // mask identifying which args are nonnull
public:
// preserves_state can be set to true for Intrinsics
@ -1417,6 +1428,7 @@ LEAF(Intrinsic, StateSplit)
, _id(id)
, _args(args)
, _recv(NULL)
, _nonnull_state(AllBits)
{
assert(args != NULL, "args must exist");
ASSERT_VALUES
@ -1442,6 +1454,23 @@ LEAF(Intrinsic, StateSplit)
Value receiver() const { assert(has_receiver(), "must have receiver"); return _recv; }
bool preserves_state() const { return check_flag(PreservesStateFlag); }
bool arg_needs_null_check(int i) {
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
return is_set_nth_bit(_nonnull_state, i);
}
return true;
}
void set_arg_needs_null_check(int i, bool check) {
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
if (check) {
_nonnull_state |= nth_bit(i);
} else {
_nonnull_state &= ~(nth_bit(i));
}
}
}
// generic
virtual bool can_trap() const { return check_flag(CanTrapFlag); }
virtual void input_values_do(ValueVisitor* f) {

@ -1215,7 +1215,11 @@ public:
src_range_check = 1 << 5,
dst_range_check = 1 << 6,
type_check = 1 << 7,
all_flags = (1 << 8) - 1
overlapping = 1 << 8,
unaligned = 1 << 9,
src_objarray = 1 << 10,
dst_objarray = 1 << 11,
all_flags = (1 << 12) - 1
};
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,

@ -836,6 +836,9 @@ void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
_masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
}
}
check_codespace();
CHECK_BAILOUT();
s.next();
}
VerifyOops = v;

@ -706,6 +706,38 @@ static ciArrayKlass* as_array_klass(ciType* type) {
}
}
static Value maxvalue(IfOp* ifop) {
switch (ifop->cond()) {
case If::eql: return NULL;
case If::neq: return NULL;
case If::lss: // x < y ? x : y
case If::leq: // x <= y ? x : y
if (ifop->x() == ifop->tval() &&
ifop->y() == ifop->fval()) return ifop->y();
return NULL;
case If::gtr: // x > y ? y : x
case If::geq: // x >= y ? y : x
if (ifop->x() == ifop->tval() &&
ifop->y() == ifop->fval()) return ifop->y();
return NULL;
}
}
static ciType* phi_declared_type(Phi* phi) {
ciType* t = phi->operand_at(0)->declared_type();
if (t == NULL) {
return NULL;
}
for(int i = 1; i < phi->operand_count(); i++) {
if (t != phi->operand_at(i)->declared_type()) {
return NULL;
}
}
return t;
}
void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
Instruction* src = x->argument_at(0);
Instruction* src_pos = x->argument_at(1);
@ -715,12 +747,20 @@ void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** ex
// first try to identify the likely type of the arrays involved
ciArrayKlass* expected_type = NULL;
bool is_exact = false;
bool is_exact = false, src_objarray = false, dst_objarray = false;
{
ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
Phi* phi;
if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
src_declared_type = as_array_klass(phi_declared_type(phi));
}
ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
dst_declared_type = as_array_klass(phi_declared_type(phi));
}
if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
// the types exactly match so the type is fully known
is_exact = true;
@ -744,17 +784,60 @@ void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** ex
if (expected_type == NULL) expected_type = dst_exact_type;
if (expected_type == NULL) expected_type = src_declared_type;
if (expected_type == NULL) expected_type = dst_declared_type;
src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
}
// if a probable array type has been identified, figure out if any
// of the required checks for a fast case can be elided.
int flags = LIR_OpArrayCopy::all_flags;
if (!src_objarray)
flags &= ~LIR_OpArrayCopy::src_objarray;
if (!dst_objarray)
flags &= ~LIR_OpArrayCopy::dst_objarray;
if (!x->arg_needs_null_check(0))
flags &= ~LIR_OpArrayCopy::src_null_check;
if (!x->arg_needs_null_check(2))
flags &= ~LIR_OpArrayCopy::dst_null_check;
if (expected_type != NULL) {
// try to skip null checks
if (src->as_NewArray() != NULL)
Value length_limit = NULL;
IfOp* ifop = length->as_IfOp();
if (ifop != NULL) {
// look for expressions like min(v, a.length) which ends up as
// x > y ? y : x or x >= y ? y : x
if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
ifop->x() == ifop->fval() &&
ifop->y() == ifop->tval()) {
length_limit = ifop->y();
}
}
// try to skip null checks and range checks
NewArray* src_array = src->as_NewArray();
if (src_array != NULL) {
flags &= ~LIR_OpArrayCopy::src_null_check;
if (dst->as_NewArray() != NULL)
if (length_limit != NULL &&
src_array->length() == length_limit &&
is_constant_zero(src_pos)) {
flags &= ~LIR_OpArrayCopy::src_range_check;
}
}
NewArray* dst_array = dst->as_NewArray();
if (dst_array != NULL) {
flags &= ~LIR_OpArrayCopy::dst_null_check;
if (length_limit != NULL &&
dst_array->length() == length_limit &&
is_constant_zero(dst_pos)) {
flags &= ~LIR_OpArrayCopy::dst_range_check;
}
}
// check from incoming constant values
if (positive_constant(src_pos))
@ -788,6 +871,28 @@ void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** ex
}
}
IntConstant* src_int = src_pos->type()->as_IntConstant();
IntConstant* dst_int = dst_pos->type()->as_IntConstant();
if (src_int && dst_int) {
int s_offs = src_int->value();
int d_offs = dst_int->value();
if (src_int->value() >= dst_int->value()) {
flags &= ~LIR_OpArrayCopy::overlapping;
}
if (expected_type != NULL) {
BasicType t = expected_type->element_type()->basic_type();
int element_size = type2aelembytes(t);
if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
flags &= ~LIR_OpArrayCopy::unaligned;
}
}
} else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
// src and dest positions are the same, or dst is zero so assume
// nonoverlapping copy.
flags &= ~LIR_OpArrayCopy::overlapping;
}
if (src == dst) {
// moving within a single array so no type checks are needed
if (flags & LIR_OpArrayCopy::type_check) {
@ -1104,6 +1209,38 @@ void LIRGenerator::do_Return(Return* x) {
set_no_result(x);
}
// Examble: ref.get()
// Combination of LoadField and g1 pre-write barrier
void LIRGenerator::do_Reference_get(Intrinsic* x) {
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem reference(x->argument_at(0), this);
reference.load_item();
// need to perform the null check on the reference objecy
CodeEmitInfo* info = NULL;
if (x->needs_null_check()) {
info = state_for(x);
}
LIR_Address* referent_field_adr =
new LIR_Address(reference.result(), referent_offset, T_OBJECT);
LIR_Opr result = rlock_result(x);
__ load(referent_field_adr, result, info);
// Register the value in the referent field with the pre-barrier
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
result /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
// Example: object.getClass ()
void LIRGenerator::do_getClass(Intrinsic* x) {
@ -1246,13 +1383,14 @@ LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
// Various barriers
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// Do the pre-write barrier, if any.
switch (_bs->kind()) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
@ -1293,9 +1431,8 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
if (G1DisablePreBarrier) return;
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// First we test whether marking is in progress.
BasicType flag_type;
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
@ -1314,26 +1451,40 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
// Read the marking-in-progress flag.
LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val);
LIR_PatchCode pre_val_patch_code =
patch ? lir_patch_normal : lir_patch_none;
LIR_Opr pre_val = new_register(T_OBJECT);
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
LIR_PatchCode pre_val_patch_code = lir_patch_none;
CodeStub* slow;
if (do_load) {
assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
if (patch)
pre_val_patch_code = lir_patch_normal;
pre_val = new_register(T_OBJECT);
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
} else {
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
assert(pre_val->is_register(), "must be");
assert(pre_val->type() == T_OBJECT, "must be an object");
assert(info == NULL, "sanity");
slow = new G1PreBarrierStub(pre_val);
}
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info);
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch_destination(slow->continuation());
}
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
if (G1DisablePostBarrier) return;
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
new_val->as_constant_ptr()->as_jobject() == NULL) return;
@ -1351,7 +1502,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
LIR_Opr ptr = new_register(T_OBJECT);
LIR_Opr ptr = new_pointer_register();
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
@ -1403,7 +1554,9 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
LIR_Opr ptr = new_register(T_OBJECT);
// ptr cannot be an object because we use this barrier for array card marks
// and addr can point in the middle of an array.
LIR_Opr ptr = new_pointer_register();
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
@ -1555,6 +1708,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
if (is_oop) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(address),
LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load*/,
needs_patching,
(info ? new CodeEmitInfo(info) : NULL));
}
@ -1984,9 +2139,144 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
off.load_item();
src.load_item();
LIR_Opr reg = reg = rlock_result(x, x->basic_type());
LIR_Opr reg = rlock_result(x, x->basic_type());
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
#ifndef SERIALGC
// We might be reading the value of the referent field of a
// Reference object in order to attach it back to the live
// object graph. If G1 is enabled then we need to record
// the value that is being returned in an SATB log buffer.
//
// We need to generate code similar to the following...
//
// if (offset == java_lang_ref_Reference::referent_offset) {
// if (src != NULL) {
// if (klass(src)->reference_type() != REF_NONE) {
// pre_barrier(..., reg, ...);
// }
// }
// }
//
// The first non-constant check of either the offset or
// the src operand will be done here; the remainder
// will take place in the generated code stub.
if (UseG1GC && type == T_OBJECT) {
bool gen_code_stub = true; // Assume we need to generate the slow code stub.
bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
bool gen_source_check = true; // Assume the code stub has to check the src object for null.
if (off.is_constant()) {
jlong off_con = (off.type()->is_int() ?
(jlong) off.get_jint_constant() :
off.get_jlong_constant());
if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
// The constant offset is something other than referent_offset.
// We can skip generating/checking the remaining guards and
// skip generation of the code stub.
gen_code_stub = false;
} else {
// The constant offset is the same as referent_offset -
// we do not need to generate a runtime offset check.
gen_offset_check = false;
}
}
// We don't need to generate stub if the source object is an array
if (gen_code_stub && src.type()->is_array()) {
gen_code_stub = false;
}
if (gen_code_stub) {
// We still need to continue with the checks.
if (src.is_constant()) {
ciObject* src_con = src.get_jobject_constant();
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
// generating the code stub.
gen_code_stub = false;
} else {
// Non-null constant source object. We still have to generate
// the slow stub - but we don't need to generate the runtime
// null object check.
gen_source_check = false;
}
}
}
if (gen_code_stub) {
// Temoraries.
LIR_Opr src_klass = new_register(T_OBJECT);
// Get the thread pointer for the pre-barrier
LIR_Opr thread = getThreadPointer();
CodeStub* stub;
// We can have generate one runtime check here. Let's start with
// the offset check.
if (gen_offset_check) {
// if (offset == referent_offset) -> slow code stub
// If offset is an int then we can do the comparison with the
// referent_offset constant; otherwise we need to move
// referent_offset into a temporary register and generate
// a reg-reg compare.
LIR_Opr referent_off;
if (off.type()->is_int()) {
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
} else {
assert(off.type()->is_long(), "what else?");
referent_off = new_register(T_LONG);
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
}
__ cmp(lir_cond_equal, off.result(), referent_off);
// Optionally generate "src == null" check.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
gen_source_check);
__ branch(lir_cond_equal, as_BasicType(off.type()), stub);
} else {
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source != null) -> slow code stub
__ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
// Since we are generating the "if src == null" guard here,
// there is no need to generate the "src == null" check again.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_notEqual, T_OBJECT, stub);
} else {
// We have statically determined that offset == referent_offset
// && src != null so we unconditionally branch to code stub
// to perform the guards and record reg in the SATB log buffer.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_always, T_ILLEGAL, stub);
}
}
// Continuation point
__ branch_destination(stub->continuation());
}
}
#endif // SERIALGC
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
}
@ -2652,6 +2942,10 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_AttemptUpdate(x);
break;
case vmIntrinsics::_Reference_get:
do_Reference_get(x);
break;
default: ShouldNotReachHere(); break;
}
}

@ -246,6 +246,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_AttemptUpdate(Intrinsic* x);
void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x);
void do_Reference_get(Intrinsic* x);
void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
@ -260,13 +261,14 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
// generic interface
void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
// specific implementations
// pre barriers
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info);
// post barriers

@ -644,7 +644,7 @@ void NullCheckVisitor::do_CheckCast (CheckCast* x) {}
void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {}
void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); }
void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); }
void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->handle_Intrinsic(x); }
void NullCheckVisitor::do_BlockBegin (BlockBegin* x) {}
void NullCheckVisitor::do_Goto (Goto* x) {}
void NullCheckVisitor::do_If (If* x) {}
@ -1023,6 +1023,12 @@ void NullCheckEliminator::handle_AccessMonitor(AccessMonitor* x) {
void NullCheckEliminator::handle_Intrinsic(Intrinsic* x) {
if (!x->has_receiver()) {
if (x->id() == vmIntrinsics::_arraycopy) {
for (int i = 0; i < x->number_of_arguments(); i++) {
x->set_arg_needs_null_check(i, !set_contains(x->argument_at(i)));
}
}
// Be conservative
clear_last_explicit_null_check();
return;

@ -103,7 +103,10 @@ const char *Runtime1::_blob_names[] = {
int Runtime1::_generic_arraycopy_cnt = 0;
int Runtime1::_primitive_arraycopy_cnt = 0;
int Runtime1::_oop_arraycopy_cnt = 0;
int Runtime1::_generic_arraycopystub_cnt = 0;
int Runtime1::_arraycopy_slowcase_cnt = 0;
int Runtime1::_arraycopy_checkcast_cnt = 0;
int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
int Runtime1::_new_type_array_slowcase_cnt = 0;
int Runtime1::_new_object_array_slowcase_cnt = 0;
int Runtime1::_new_instance_slowcase_cnt = 0;
@ -119,6 +122,32 @@ int Runtime1::_throw_class_cast_exception_count = 0;
int Runtime1::_throw_incompatible_class_change_error_count = 0;
int Runtime1::_throw_array_store_exception_count = 0;
int Runtime1::_throw_count = 0;
static int _byte_arraycopy_cnt = 0;
static int _short_arraycopy_cnt = 0;
static int _int_arraycopy_cnt = 0;
static int _long_arraycopy_cnt = 0;
static int _oop_arraycopy_cnt = 0;
address Runtime1::arraycopy_count_address(BasicType type) {
switch (type) {
case T_BOOLEAN:
case T_BYTE: return (address)&_byte_arraycopy_cnt;
case T_CHAR:
case T_SHORT: return (address)&_short_arraycopy_cnt;
case T_FLOAT:
case T_INT: return (address)&_int_arraycopy_cnt;
case T_DOUBLE:
case T_LONG: return (address)&_long_arraycopy_cnt;
case T_ARRAY:
case T_OBJECT: return (address)&_oop_arraycopy_cnt;
default:
ShouldNotReachHere();
return NULL;
}
}
#endif
// Simple helper to see if the caller of a runtime stub which
@ -1229,9 +1258,17 @@ void Runtime1::print_statistics() {
tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);
tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);
tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt);
tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);
tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt);
tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt);
tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt);
tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt);
tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt);
tty->print_cr(" _oop_arraycopy_cnt: %d", _oop_arraycopy_cnt);
tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt);
tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt);
tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);
tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);
tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);
tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);

@ -94,7 +94,10 @@ class Runtime1: public AllStatic {
static int _generic_arraycopy_cnt;
static int _primitive_arraycopy_cnt;
static int _oop_arraycopy_cnt;
static int _generic_arraycopystub_cnt;
static int _arraycopy_slowcase_cnt;
static int _arraycopy_checkcast_cnt;
static int _arraycopy_checkcast_attempt_cnt;
static int _new_type_array_slowcase_cnt;
static int _new_object_array_slowcase_cnt;
static int _new_instance_slowcase_cnt;
@ -174,7 +177,8 @@ class Runtime1: public AllStatic {
static void trace_block_entry(jint block_id);
#ifndef PRODUCT
static address throw_count_address() { return (address)&_throw_count; }
static address throw_count_address() { return (address)&_throw_count; }
static address arraycopy_count_address(BasicType type);
#endif
// directly accessible leaf routine

@ -66,8 +66,8 @@ ciConstant ciInstance::field_value(ciField* field) {
"invalid access");
VM_ENTRY_MARK;
ciConstant result;
oop obj = get_oop();
assert(obj != NULL, "bad oop");
Handle obj = get_oop();
assert(!obj.is_null(), "bad oop");
BasicType field_btype = field->type()->basic_type();
int offset = field->offset();

@ -42,9 +42,20 @@ ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
methodHandle callee(_callee->get_methodOop());
// We catch all exceptions here that could happen in the method
// handle compiler and stop the VM.
MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH);
methodHandle m = mhc.compile(CATCH);
return CURRENT_ENV->get_object(m())->as_method();
MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
if (!HAS_PENDING_EXCEPTION) {
methodHandle m = mhc.compile(THREAD);
if (!HAS_PENDING_EXCEPTION) {
return CURRENT_ENV->get_object(m())->as_method();
}
}
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print("*** ciMethodHandle::get_adapter => ");
PENDING_EXCEPTION->print();
tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@
}
CLEAR_PENDING_EXCEPTION;
return NULL;
}

@ -34,6 +34,7 @@
#include "ci/ciEnv.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMethodBlocks.hpp"
#include "shark/shark_globals.hpp"
#endif

@ -170,7 +170,6 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
ShouldNotReachHere();
}
break;
case JVM_CONSTANT_InvokeDynamicTrans : // this tag appears only in old classfiles
case JVM_CONSTANT_InvokeDynamic :
{
if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
@ -186,14 +185,6 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags
u2 bootstrap_specifier_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
if (tag == JVM_CONSTANT_InvokeDynamicTrans) {
if (!AllowTransitionalJSR292)
classfile_parse_error(
"This JVM does not support transitional InvokeDynamic tag %u in class file %s",
tag, CHECK);
cp->invoke_dynamic_trans_at_put(index, bootstrap_specifier_index, name_and_type_index);
break;
}
if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index)
_max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later
cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
@ -492,7 +483,6 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
ref_index, CHECK_(nullHandle));
}
break;
case JVM_CONSTANT_InvokeDynamicTrans :
case JVM_CONSTANT_InvokeDynamic :
{
int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
@ -501,14 +491,6 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
"Invalid constant pool index %u in class file %s",
name_and_type_ref_index,
CHECK_(nullHandle));
if (tag == JVM_CONSTANT_InvokeDynamicTrans) {
int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
check_property(valid_cp_range(bootstrap_method_ref_index, length) &&
cp->tag_at(bootstrap_method_ref_index).is_method_handle(),
"Invalid constant pool index %u in class file %s",
bootstrap_method_ref_index,
CHECK_(nullHandle));
}
// bootstrap specifier index must be checked later, when BootstrapMethods attr is available
break;
}
@ -578,6 +560,7 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
}
break;
}
case JVM_CONSTANT_InvokeDynamic:
case JVM_CONSTANT_Fieldref:
case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_InterfaceMethodref: {
@ -2213,11 +2196,12 @@ typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
TRAPS) {
typeArrayHandle nullHandle;
int length = methods()->length();
// If JVMTI original method ordering is enabled we have to
// If JVMTI original method ordering or sharing is enabled we have to
// remember the original class file ordering.
// We temporarily use the vtable_index field in the methodOop to store the
// class file index, so we can read in after calling qsort.
if (JvmtiExport::can_maintain_original_method_order()) {
// Put the method ordering in the shared archive.
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
for (int index = 0; index < length; index++) {
methodOop m = methodOop(methods->obj_at(index));
assert(!m->valid_vtable_index(), "vtable index should not be set");
@ -2231,8 +2215,9 @@ typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
methods_parameter_annotations(),
methods_default_annotations());
// If JVMTI original method ordering is enabled construct int array remembering the original ordering
if (JvmtiExport::can_maintain_original_method_order()) {
// If JVMTI original method ordering or sharing is enabled construct int
// array remembering the original ordering
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle));
typeArrayHandle method_ordering(THREAD, new_ordering);
for (int index = 0; index < length; index++) {
@ -2783,7 +2768,6 @@ void ClassFileParser::java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle c
}
}
if (AllowTransitionalJSR292 && word_sig_index == 0) return;
if (word_sig_index == 0)
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing I or J signature (for vmentry) in java.lang.invoke.MethodHandle");
@ -2823,7 +2807,6 @@ void ClassFileParser::java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle c
}
}
if (AllowTransitionalJSR292 && !found_vmentry) return;
if (!found_vmentry)
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing vmentry byte field in java.lang.invoke.MethodHandle");
@ -3194,15 +3177,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
if (EnableInvokeDynamic && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) {
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
if (AllowTransitionalJSR292 &&
EnableInvokeDynamic && class_name == vmSymbols::java_dyn_MethodHandle() && class_loader.is_null()) {
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
if (AllowTransitionalJSR292 &&
EnableInvokeDynamic && class_name == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) {
// allow vmentry field in MethodHandleImpl also
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
// Add a fake "discovered" field if it is not present
// for compatibility with earlier jdk's.

@ -67,28 +67,6 @@ static bool find_field(instanceKlass* ik,
return ik->find_local_field(name_symbol, signature_symbol, fd);
}
static bool find_hacked_field(instanceKlass* ik,
Symbol* name_symbol, Symbol* signature_symbol,
fieldDescriptor* fd,
bool allow_super = false) {
bool found = find_field(ik, name_symbol, signature_symbol, fd, allow_super);
if (!found && AllowTransitionalJSR292) {
Symbol* backup_sig = SystemDictionary::find_backup_signature(signature_symbol);
if (backup_sig != NULL) {
found = find_field(ik, name_symbol, backup_sig, fd, allow_super);
if (TraceMethodHandles) {
ResourceMark rm;
tty->print_cr("MethodHandles: %s.%s: backup for %s => %s%s",
ik->name()->as_C_string(), name_symbol->as_C_string(),
signature_symbol->as_C_string(), backup_sig->as_C_string(),
(found ? "" : " (NOT FOUND)"));
}
}
}
return found;
}
#define find_field find_hacked_field /* remove after AllowTransitionalJSR292 */
// Helpful routine for computing field offsets at run time rather than hardcoding them
static void
compute_offset(int &dest_offset,
@ -1453,32 +1431,41 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
}
}
#ifdef ASSERT
assert(st_method() == method && st.bci() == bci,
"Wrong stack trace");
st.next();
// vframeStream::method isn't GC-safe so store off a copy
// of the methodOop in case we GC.
if (!st.at_end()) {
st_method = st.method();
}
assert(st_method() == method && st.bci() == bci,
"Wrong stack trace");
st.next();
// vframeStream::method isn't GC-safe so store off a copy
// of the methodOop in case we GC.
if (!st.at_end()) {
st_method = st.method();
}
#endif
// the format of the stacktrace will be:
// - 1 or more fillInStackTrace frames for the exception class (skipped)
// - 0 or more <init> methods for the exception class (skipped)
// - rest of the stack
if (!skip_fillInStackTrace_check) {
// check "fillInStackTrace" only once, so we negate the flag
// after the first time check.
skip_fillInStackTrace_check = true;
if (method->name() == vmSymbols::fillInStackTrace_name()) {
if ((method->name() == vmSymbols::fillInStackTrace_name() ||
method->name() == vmSymbols::fillInStackTrace0_name()) &&
throwable->is_a(method->method_holder())) {
continue;
}
else {
skip_fillInStackTrace_check = true; // gone past them all
}
}
// skip <init> methods of the exceptions klass. If there is <init> methods
// that belongs to a superclass of the exception we are going to skipping
// them in stack trace. This is simlar to classic VM.
if (!skip_throwableInit_check) {
assert(skip_fillInStackTrace_check, "logic error in backtrace filtering");
// skip <init> methods of the exception class and superclasses
// This is simlar to classic VM.
if (method->name() == vmSymbols::object_initializer_name() &&
throwable->is_a(method->method_holder())) {
continue;
} else {
// if no "Throwable.init()" method found, we stop checking it next time.
// there are none or we've seen them all - either way stop checking
skip_throwableInit_check = true;
}
}
@ -2333,7 +2320,6 @@ void java_lang_invoke_MethodHandle::compute_offsets() {
klassOop k = SystemDictionary::MethodHandle_klass();
if (k != NULL && EnableInvokeDynamic) {
bool allow_super = false;
if (AllowTransitionalJSR292) allow_super = true; // temporary, to access java.dyn.MethodHandleImpl
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super);
compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), allow_super);
compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), allow_super);

@ -208,8 +208,10 @@ bool StackMapFrame::has_flag_match_exception(
return true;
}
bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const {
if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) {
bool StackMapFrame::is_assignable_to(
const StackMapFrame* target, bool is_exception_handler, TRAPS) const {
if (_max_locals != target->max_locals() ||
_stack_size != target->stack_size()) {
return false;
}
// Only need to compare type elements up to target->locals() or target->stack().
@ -222,7 +224,7 @@ bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const {
bool match_flags = (_flags | target->flags()) == target->flags();
return match_locals && match_stack &&
(match_flags || has_flag_match_exception(target));
(match_flags || (is_exception_handler && has_flag_match_exception(target)));
}
VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {

@ -134,7 +134,8 @@ class StackMapFrame : public ResourceObj {
void copy_stack(const StackMapFrame* src);
// Return true if this stack map frame is assignable to target.
bool is_assignable_to(const StackMapFrame* target, TRAPS) const;
bool is_assignable_to(const StackMapFrame* target,
bool is_exception_handler, TRAPS) const;
// Push type into stack type array.
inline void push_stack(VerificationType type, TRAPS) {

@ -98,10 +98,13 @@ bool StackMapTable::match_stackmap(
bool result = true;
StackMapFrame *stackmap_frame = _frame_array[frame_index];
if (match) {
// when checking handler target, match == true && update == false
bool is_exception_handler = !update;
// Has direct control flow from last instruction, need to match the two
// frames.
result = frame->is_assignable_to(
stackmap_frame, CHECK_VERIFY_(frame->verifier(), false));
stackmap_frame, is_exception_handler,
CHECK_VERIFY_(frame->verifier(), false));
}
if (update) {
// Use the frame in stackmap table as current frame

@ -1255,6 +1255,16 @@ instanceKlassHandle SystemDictionary::load_shared_class(
methodHandle m(THREAD, methodOop(methods->obj_at(index2)));
m()->link_method(m, CHECK_(nh));
}
if (JvmtiExport::has_redefined_a_class()) {
// Reinitialize vtable because RedefineClasses may have changed some
// entries in this vtable for super classes so the CDS vtable might
// point to old or obsolete entries. RedefineClasses doesn't fix up
// vtables in the shared system dictionary, only the main one.
// It also redefines the itable too so fix that too.
ResourceMark rm(THREAD);
ik->vtable()->initialize_vtable(false, CHECK_(nh));
ik->itable()->initialize_itable(false, CHECK_(nh));
}
}
if (TraceClassLoading) {
@ -1887,99 +1897,27 @@ static const short wk_init_info[] = {
0
};
Symbol* SystemDictionary::find_backup_symbol(Symbol* symbol,
const char* from_prefix,
const char* to_prefix) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
Symbol* backup_symbol = NULL;
size_t from_len = strlen(from_prefix);
if (strncmp((const char*) symbol->base(), from_prefix, from_len) != 0)
return NULL;
char buf[100];
size_t to_len = strlen(to_prefix);
size_t tail_len = symbol->utf8_length() - from_len;
size_t new_len = to_len + tail_len;
guarantee(new_len < sizeof(buf), "buf too small");
memcpy(buf, to_prefix, to_len);
memcpy(buf + to_len, symbol->base() + from_len, tail_len);
buf[new_len] = '\0';
vmSymbols::SID backup_sid = vmSymbols::find_sid(buf);
if (backup_sid != vmSymbols::NO_SID) {
backup_symbol = vmSymbols::symbol_at(backup_sid);
}
return backup_symbol;
}
Symbol* SystemDictionary::find_backup_class_name(Symbol* symbol) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
if (symbol == NULL) return NULL;
Symbol* backup_symbol = find_backup_symbol(symbol, "java/lang/invoke/", "java/dyn/"); // AllowTransitionalJSR292 ONLY
if (backup_symbol == NULL)
backup_symbol = find_backup_symbol(symbol, "java/dyn/", "sun/dyn/"); // AllowTransitionalJSR292 ONLY
return backup_symbol;
}
Symbol* SystemDictionary::find_backup_signature(Symbol* symbol) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
if (symbol == NULL) return NULL;
return find_backup_symbol(symbol, "Ljava/lang/invoke/", "Ljava/dyn/");
}
bool SystemDictionary::initialize_wk_klass(WKID id, int init_opt, TRAPS) {
assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
int info = wk_init_info[id - FIRST_WKID];
int sid = (info >> CEIL_LG_OPTION_LIMIT);
Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid);
klassOop* klassp = &_well_known_klasses[id];
bool pre_load = (init_opt < SystemDictionary::Opt);
bool try_load = true;
bool must_load = (init_opt < SystemDictionary::Opt);
bool try_load = true;
if (init_opt == SystemDictionary::Opt_Kernel) {
#ifndef KERNEL
try_load = false;
#endif //KERNEL
}
Symbol* backup_symbol = NULL; // symbol to try if the current symbol fails
if (init_opt == SystemDictionary::Pre_JSR292) {
if (!EnableInvokeDynamic) try_load = false; // do not bother to load such classes
if (AllowTransitionalJSR292) {
backup_symbol = find_backup_class_name(symbol);
if (try_load && PreferTransitionalJSR292) {
while (backup_symbol != NULL) {
(*klassp) = resolve_or_null(backup_symbol, CHECK_0); // try backup early
if (TraceMethodHandles) {
ResourceMark rm;
tty->print_cr("MethodHandles: try backup first for %s => %s (%s)",
symbol->as_C_string(), backup_symbol->as_C_string(),
((*klassp) == NULL) ? "no such class" : "backup load succeeded");
}
if ((*klassp) != NULL) return true;
backup_symbol = find_backup_class_name(backup_symbol); // find next backup
}
}
}
}
if ((*klassp) != NULL) return true;
if (!try_load) return false;
while (symbol != NULL) {
bool must_load = (pre_load && (backup_symbol == NULL));
if ((*klassp) == NULL && try_load) {
if (must_load) {
(*klassp) = resolve_or_fail(symbol, true, CHECK_0); // load required class
} else {
(*klassp) = resolve_or_null(symbol, CHECK_0); // load optional klass
}
if ((*klassp) != NULL) return true;
// Go around again. Example of long backup sequence:
// java.lang.invoke.MemberName, java.dyn.MemberName, sun.dyn.MemberName, ONLY if AllowTransitionalJSR292
if (TraceMethodHandles && (backup_symbol != NULL)) {
ResourceMark rm;
tty->print_cr("MethodHandles: backup for %s => %s",
symbol->as_C_string(), backup_symbol->as_C_string());
}
symbol = backup_symbol;
if (AllowTransitionalJSR292)
backup_symbol = find_backup_class_name(symbol);
}
return false;
return ((*klassp) != NULL);
}
void SystemDictionary::initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS) {
@ -2409,9 +2347,7 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
// Must create lots of stuff here, but outside of the SystemDictionary lock.
if (THREAD->is_Compiler_thread())
return NULL; // do not attempt from within compiler
bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name));
if (AllowInvokeForInvokeGeneric && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
for_invokeGeneric = true;
bool for_invokeGeneric = (name_id != vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name));
bool found_on_bcp = false;
Handle mt = find_method_handle_type(signature, accessing_klass,
for_invokeGeneric,
@ -2498,14 +2434,10 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
JavaCallArguments args(Handle(THREAD, rt()));
args.push_oop(pts());
JavaValue result(T_OBJECT);
Symbol* findMethodHandleType_signature = vmSymbols::findMethodHandleType_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) {
findMethodHandleType_signature = vmSymbols::findMethodHandleType_TRANS_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::findMethodHandleType_name(),
findMethodHandleType_signature,
vmSymbols::findMethodHandleType_signature(),
&args, CHECK_(empty));
Handle method_type(THREAD, (oop) result.get_jobject());
@ -2513,14 +2445,10 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
// call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
JavaCallArguments args(Handle(THREAD, method_type()));
JavaValue no_result(T_VOID);
Symbol* notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) {
notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_TRANS_signature();
}
JavaCalls::call_static(&no_result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::notifyGenericMethodType_name(),
notifyGenericMethodType_signature,
vmSymbols::notifyGenericMethodType_signature(),
&args, THREAD);
if (HAS_PENDING_EXCEPTION) {
// If the notification fails, just kill it.
@ -2569,14 +2497,10 @@ Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
args.push_oop(name());
args.push_oop(type());
JavaValue result(T_OBJECT);
Symbol* linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandle_klass()->name() == vmSymbols::java_dyn_MethodHandle()) {
linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_TRANS_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::linkMethodHandleConstant_name(),
linkMethodHandleConstant_signature,
vmSymbols::linkMethodHandleConstant_signature(),
&args, CHECK_(empty));
return Handle(THREAD, (oop) result.get_jobject());
}
@ -2607,17 +2531,10 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
args.push_oop(caller_mname());
args.push_int(caller_bci);
JavaValue result(T_OBJECT);
Symbol* makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::sun_dyn_MethodHandleNatives()) {
makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS_signature();
}
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::java_dyn_MethodHandleNatives()) {
makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS2_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::makeDynamicCallSite_name(),
makeDynamicCallSite_signature,
vmSymbols::makeDynamicCallSite_signature(),
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
assert(call_site_oop->is_oop()
@ -2698,28 +2615,10 @@ Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int c
argument_info_result = argument_info; // return argument_info to caller
return bsm;
}
// else null BSM; fall through
} else if (tag.is_name_and_type()) {
// JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic
// a bare name&type defaults its BSM to null, so fall through...
} else {
ShouldNotReachHere(); // verifier does not allow this
}
// Fall through to pick up the per-class bootstrap method.
// This mechanism may go away in the PFD.
assert(AllowTransitionalJSR292, "else the verifier should have stopped us already");
argument_info_result = empty; // return no argument_info to caller
oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method();
if (bsm_oop != NULL) {
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":",
(intptr_t) caller_method(), (intptr_t) bsm_oop);
}
assert(bsm_oop->is_oop(), "must be sane");
return Handle(THREAD, bsm_oop);
}
return empty;
}

@ -146,7 +146,6 @@ class SymbolPropertyTable;
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \
template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \
template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) /* AllowTransitionalJSR292 ONLY */ \
template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \
template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \
template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \
@ -154,7 +153,6 @@ class SymbolPropertyTable;
template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \
template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(Linkage_klass, java_lang_invoke_Linkage, Opt) /* AllowTransitionalJSR292 ONLY */ \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and CallSite last in group */ \
\
@ -422,8 +420,6 @@ public:
initialize_wk_klasses_until((WKID) limit, start_id, THREAD);
}
static Symbol* find_backup_symbol(Symbol* symbol, const char* from_prefix, const char* to_prefix);
public:
#define WK_KLASS_DECLARE(name, ignore_symbol, option) \
static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); }
@ -445,9 +441,6 @@ public:
static void load_abstract_ownable_synchronizer_klass(TRAPS);
static Symbol* find_backup_class_name(Symbol* class_name_symbol);
static Symbol* find_backup_signature(Symbol* signature_symbol);
private:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }

@ -1671,19 +1671,13 @@ void ClassVerifier::verify_ldc(
VerificationType::long_type(),
VerificationType::long2_type(), CHECK_VERIFY(this));
} else if (tag.is_method_handle()) {
Symbol* methodHandle_name = vmSymbols::java_lang_invoke_MethodHandle();
if (AllowTransitionalJSR292 && !Universe::is_bootstrapping())
methodHandle_name = SystemDictionaryHandles::MethodHandle_klass()->name();
current_frame->push_stack(
VerificationType::reference_type(
methodHandle_name), CHECK_VERIFY(this));
vmSymbols::java_lang_invoke_MethodHandle()), CHECK_VERIFY(this));
} else if (tag.is_method_type()) {
Symbol* methodType_name = vmSymbols::java_lang_invoke_MethodType();
if (AllowTransitionalJSR292 && !Universe::is_bootstrapping())
methodType_name = SystemDictionaryHandles::MethodType_klass()->name();
current_frame->push_stack(
VerificationType::reference_type(
methodType_name), CHECK_VERIFY(this));
vmSymbols::java_lang_invoke_MethodType()), CHECK_VERIFY(this));
} else {
verify_error(bci, "Invalid index in ldc");
return;
@ -1950,8 +1944,7 @@ void ClassVerifier::verify_invoke_instructions(
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
? ((AllowTransitionalJSR292 ? 1 << JVM_CONSTANT_NameAndType : 0)
|1 << JVM_CONSTANT_InvokeDynamic)
? 1 << JVM_CONSTANT_InvokeDynamic
: 1 << JVM_CONSTANT_Methodref);
verify_cp_type(index, cp, types, CHECK_VERIFY(this));

@ -245,44 +245,15 @@
template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
/* temporary transitional public names from 6839872: */ \
template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_Linkage, "java/dyn/Linkage") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_CallSite, "java/dyn/CallSite") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodType, "java/dyn/MethodType") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \
/* temporary transitional internal names from 6839872: */ \
template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MemberName, "java/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandleNatives, "java/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_AdapterMethodHandle, "java/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_BoundMethodHandle, "java/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_DirectMethodHandle, "java/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
/* temporary transitional internal names from EDR: */ \
template(sun_dyn_MemberName, "sun/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
template(findMethodHandleType_TRANS_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \
template(notifyGenericMethodType_name, "notifyGenericMethodType") \
template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \
template(notifyGenericMethodType_TRANS_signature, "(Ljava/dyn/MethodType;)V") /* AllowTransitionalJSR292 ONLY */ \
template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
template(linkMethodHandleConstant_TRANS_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
template(makeDynamicCallSite_TRANS_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \
template(makeDynamicCallSite_TRANS2_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Ljava/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
\
@ -330,6 +301,7 @@
template(dispatch_name, "dispatch") \
template(getSystemClassLoader_name, "getSystemClassLoader") \
template(fillInStackTrace_name, "fillInStackTrace") \
template(fillInStackTrace0_name, "fillInStackTrace0") \
template(getCause_name, "getCause") \
template(initCause_name, "initCause") \
template(setProperty_name, "setProperty") \
@ -706,6 +678,10 @@
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \
\
/* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\
\
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
/* (symbols get_name and void_long_signature defined above) */ \
@ -910,8 +886,6 @@
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
do_intrinsic(_checkSpreadArgument_TRANS,sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \
do_intrinsic(_checkSpreadArgument_TRANS2,java_dyn_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \
do_name( checkSpreadArgument_name, "checkSpreadArgument") \
do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \
do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \

@ -964,3 +964,14 @@ void CodeCache::log_state(outputStream* st) {
nof_blobs(), nof_nmethods(), nof_adapters(),
unallocated_capacity(), largest_free_block());
}
size_t CodeCache::largest_free_block() {
// This is called both with and without CodeCache_lock held so
// handle both cases.
if (CodeCache_lock->owned_by_self()) {
return _heap->largest_free_block();
} else {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
return _heap->largest_free_block();
}
}

@ -160,7 +160,7 @@ class CodeCache : AllStatic {
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static size_t largest_free_block() { return _heap->largest_free_block(); }
static size_t largest_free_block();
static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_cache_clean() { return _needs_cache_clean; }

@ -472,20 +472,14 @@ RelocationHolder Relocation::spec_simple(relocInfo::relocType rtype) {
return itr._rh;
}
static inline bool is_index(intptr_t index) {
return 0 < index && index < os::vm_page_size();
}
int32_t Relocation::runtime_address_to_index(address runtime_address) {
assert(!is_index((intptr_t)runtime_address), "must not look like an index");
assert(!is_reloc_index((intptr_t)runtime_address), "must not look like an index");
if (runtime_address == NULL) return 0;
StubCodeDesc* p = StubCodeDesc::desc_for(runtime_address);
if (p != NULL && p->begin() == runtime_address) {
assert(is_index(p->index()), "there must not be too many stubs");
assert(is_reloc_index(p->index()), "there must not be too many stubs");
return (int32_t)p->index();
} else {
// Known "miscellaneous" non-stub pointers:
@ -506,7 +500,7 @@ int32_t Relocation::runtime_address_to_index(address runtime_address) {
address Relocation::index_to_runtime_address(int32_t index) {
if (index == 0) return NULL;
if (is_index(index)) {
if (is_reloc_index(index)) {
StubCodeDesc* p = StubCodeDesc::desc_for_index(index);
assert(p != NULL, "there must be a stub for this index");
return p->begin();
@ -634,7 +628,7 @@ void external_word_Relocation::pack_data_to(CodeSection* dest) {
#ifndef _LP64
p = pack_1_int_to(p, index);
#else
if (is_index(index)) {
if (is_reloc_index(index)) {
p = pack_2_ints_to(p, index, 0);
} else {
jlong t = (jlong) _target;
@ -642,7 +636,7 @@ void external_word_Relocation::pack_data_to(CodeSection* dest) {
int32_t hi = high(t);
p = pack_2_ints_to(p, lo, hi);
DEBUG_ONLY(jlong t1 = jlong_from(hi, lo));
assert(!is_index(t1) && (address) t1 == _target, "not symmetric");
assert(!is_reloc_index(t1) && (address) t1 == _target, "not symmetric");
}
#endif /* _LP64 */
dest->set_locs_end((relocInfo*) p);
@ -656,7 +650,7 @@ void external_word_Relocation::unpack_data() {
int32_t lo, hi;
unpack_2_ints(lo, hi);
jlong t = jlong_from(hi, lo);;
if (is_index(t)) {
if (is_reloc_index(t)) {
_target = index_to_runtime_address(t);
} else {
_target = (address) t;

@ -703,6 +703,10 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
assert(datalen()==0 || type()==relocInfo::none, "no data here");
}
static bool is_reloc_index(intptr_t index) {
return 0 < index && index < os::vm_page_size();
}
protected:
// Helper functions for pack_data_to() and unpack_data().
@ -1127,6 +1131,12 @@ class external_word_Relocation : public DataRelocation {
return rh;
}
// Some address looking values aren't safe to treat as relocations
// and should just be treated as constants.
static bool can_be_relocated(address target) {
return target != NULL && !is_reloc_index((intptr_t)target);
}
private:
address _target; // address in runtime

@ -847,9 +847,9 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// Initialize the compilation queue
void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK;
#ifndef ZERO
#if !defined(ZERO) && !defined(SHARK)
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
#endif // !ZERO
#endif // !ZERO && !SHARK
if (c2_compiler_count > 0) {
_c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock);
}
@ -1118,7 +1118,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
// some prerequisites that are compiler specific
if (compiler(comp_level)->is_c2()) {
if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
method->constants()->resolve_string_constants(CHECK_0);
// Resolve all classes seen in the signature of the method
// we are compiling.
@ -1736,8 +1736,14 @@ void CompileBroker::handle_full_code_cache() {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
if (xtty != NULL) {
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
CodeCache::log_state(&s);
// Lock to prevent tearing
ttyLocker ttyl;
xtty->begin_elem("code_cache_full");
CodeCache::log_state(xtty);
xtty->print(s.as_string());
xtty->stamp();
xtty->end_elem();
}

@ -172,7 +172,7 @@ class ConcurrentG1Refine: public CHeapObj {
// hash a given key (index of card_ptr) with the specified size
static unsigned int hash(size_t key, size_t size) {
return (unsigned int) key % size;
return (unsigned int) (key % size);
}
// hash a given key (index of card_ptr)
@ -180,11 +180,11 @@ class ConcurrentG1Refine: public CHeapObj {
return hash(key, _n_card_counts);
}
unsigned ptr_2_card_num(jbyte* card_ptr) {
return (unsigned) (card_ptr - _ct_bot);
unsigned int ptr_2_card_num(jbyte* card_ptr) {
return (unsigned int) (card_ptr - _ct_bot);
}
jbyte* card_num_2_ptr(unsigned card_num) {
jbyte* card_num_2_ptr(unsigned int card_num) {
return (jbyte*) (_ct_bot + card_num);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,9 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
assert(pre_val->is_oop_or_null(true), "Error");
// Nulls should have been already filtered.
assert(pre_val->is_oop(true), "Error");
if (!JavaThread::satb_mark_queue_set().is_active()) return;
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
@ -59,20 +61,6 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
}
}
// When we know the current java thread:
template <class T> void
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
oop new_val,
JavaThread* jt) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
jt->satb_mark_queue().enqueue(pre_val);
}
}
template <class T> void
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;

@ -37,12 +37,11 @@ class DirtyCardQueueSet;
// snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
private:
public:
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
public:
G1SATBCardTableModRefBS(MemRegion whole_heap,
int max_covered_regions);
@ -61,10 +60,6 @@ public:
}
}
// When we know the current java thread:
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
JavaThread* jt);
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {

@ -89,13 +89,9 @@
"The number of discovered reference objects to process before " \
"draining concurrent marking work queues.") \
\
experimental(bool, G1UseConcMarkReferenceProcessing, false, \
experimental(bool, G1UseConcMarkReferenceProcessing, true, \
"If true, enable reference discovery during concurrent " \
"marking and reference processing at the end of remark " \
"(unsafe).") \
\
develop(bool, G1SATBBarrierPrintNullPreVals, false, \
"If true, count frac of ptr writes with null pre-vals.") \
"marking and reference processing at the end of remark.") \
\
product(intx, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \
@ -150,12 +146,6 @@
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
develop(bool, G1DisablePreBarrier, false, \
"Disable generation of pre-barrier (i.e., marking barrier) ") \
\
develop(bool, G1DisablePostBarrier, false, \
"Disable generation of post-barrier (i.e., RS barrier) ") \
\
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2011 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,6 @@
void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
int n_threads) {
if (n_threads > 0) {
assert((n_threads == 1 && ParallelGCThreads == 0) ||
@ -57,7 +56,7 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
int stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
@ -83,7 +82,6 @@ process_stride(Space* sp,
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
@ -129,7 +127,7 @@ process_stride(Space* sp,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
non_clean_card_iterate_work(chunk_mr, cl, clear);
non_clean_card_iterate_work(chunk_mr, cl);
// Find the next chunk of the stride.
chunk_card_start += CardsPerStrideChunk * n_strides;

Some files were not shown because too many files have changed in this diff Show More