Merge
This commit is contained in:
commit
0b9dfebfa4
@ -58,6 +58,7 @@ sun.jvm.hotspot.debugger.cdbg.basic.x86 \
|
||||
sun.jvm.hotspot.debugger.dummy \
|
||||
sun.jvm.hotspot.debugger.linux \
|
||||
sun.jvm.hotspot.debugger.linux.amd64 \
|
||||
sun.jvm.hotspot.debugger.linux.aarch64 \
|
||||
sun.jvm.hotspot.debugger.linux.ppc64 \
|
||||
sun.jvm.hotspot.debugger.linux.x86 \
|
||||
sun.jvm.hotspot.debugger.posix \
|
||||
@ -65,6 +66,7 @@ sun.jvm.hotspot.debugger.posix.elf \
|
||||
sun.jvm.hotspot.debugger.ppc64 \
|
||||
sun.jvm.hotspot.debugger.proc \
|
||||
sun.jvm.hotspot.debugger.proc.amd64 \
|
||||
sun.jvm.hotspot.debugger.proc.aarch64 \
|
||||
sun.jvm.hotspot.debugger.proc.ppc64 \
|
||||
sun.jvm.hotspot.debugger.proc.sparc \
|
||||
sun.jvm.hotspot.debugger.proc.x86 \
|
||||
@ -91,11 +93,13 @@ sun.jvm.hotspot.oops \
|
||||
sun.jvm.hotspot.prims \
|
||||
sun.jvm.hotspot.runtime \
|
||||
sun.jvm.hotspot.runtime.amd64 \
|
||||
sun.jvm.hotspot.runtime.aarch64 \
|
||||
sun.jvm.hotspot.runtime.bsd \
|
||||
sun.jvm.hotspot.runtime.bsd_amd64 \
|
||||
sun.jvm.hotspot.runtime.bsd_x86 \
|
||||
sun.jvm.hotspot.runtime.linux \
|
||||
sun.jvm.hotspot.runtime.linux_amd64 \
|
||||
sun.jvm.hotspot.runtime.linux_aarch64 \
|
||||
sun.jvm.hotspot.runtime.linux_ppc64 \
|
||||
sun.jvm.hotspot.runtime.linux_sparc \
|
||||
sun.jvm.hotspot.runtime.linux_x86 \
|
||||
@ -149,16 +153,19 @@ sun/jvm/hotspot/debugger/dummy/*.java \
|
||||
sun/jvm/hotspot/debugger/linux/*.java \
|
||||
sun/jvm/hotspot/debugger/linux/ppc64/*.java \
|
||||
sun/jvm/hotspot/debugger/linux/x86/*.java \
|
||||
sun/jvm/hotspot/debugger/linux/aarch64/*.java \
|
||||
sun/jvm/hotspot/debugger/posix/*.java \
|
||||
sun/jvm/hotspot/debugger/posix/elf/*.java \
|
||||
sun/jvm/hotspot/debugger/ppc64/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/amd64/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/aarch64/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/ppc64/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/sparc/*.java \
|
||||
sun/jvm/hotspot/debugger/proc/x86/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/amd64/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/aarch64/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/ppc64/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/sparc/*.java \
|
||||
sun/jvm/hotspot/debugger/remote/x86/*.java \
|
||||
@ -178,11 +185,13 @@ sun/jvm/hotspot/opto/*.java \
|
||||
sun/jvm/hotspot/prims/*.java \
|
||||
sun/jvm/hotspot/runtime/*.java \
|
||||
sun/jvm/hotspot/runtime/amd64/*.java \
|
||||
sun/jvm/hotspot/runtime/aarch64/*.java \
|
||||
sun/jvm/hotspot/runtime/bsd/*.java \
|
||||
sun/jvm/hotspot/runtime/bsd_amd64/*.java \
|
||||
sun/jvm/hotspot/runtime/bsd_x86/*.java \
|
||||
sun/jvm/hotspot/runtime/linux/*.java \
|
||||
sun/jvm/hotspot/runtime/linux_amd64/*.java \
|
||||
sun/jvm/hotspot/runtime/linux_aarch64/*.java \
|
||||
sun/jvm/hotspot/runtime/linux_ppc64/*.java \
|
||||
sun/jvm/hotspot/runtime/linux_sparc/*.java \
|
||||
sun/jvm/hotspot/runtime/linux_x86/*.java \
|
||||
|
@ -53,6 +53,10 @@
|
||||
#include "sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext.h"
|
||||
#endif
|
||||
|
||||
#ifdef aarch64
|
||||
#include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h"
|
||||
#endif
|
||||
|
||||
static jfieldID p_ps_prochandle_ID = 0;
|
||||
static jfieldID threadList_ID = 0;
|
||||
static jfieldID loadObjectList_ID = 0;
|
||||
@ -368,7 +372,7 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
|
||||
#define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
|
||||
#endif
|
||||
#ifdef aarch64
|
||||
#define NPRGREG 32
|
||||
#define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG
|
||||
#endif
|
||||
#if defined(sparc) || defined(sparcv9)
|
||||
#define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG
|
||||
@ -473,6 +477,13 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
|
||||
|
||||
#define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg
|
||||
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 31; i++)
|
||||
regs[i] = gregs.regs[i];
|
||||
regs[REG_INDEX(SP)] = gregs.sp;
|
||||
regs[REG_INDEX(PC)] = gregs.pc;
|
||||
}
|
||||
#endif /* aarch64 */
|
||||
|
||||
#ifdef ppc64
|
||||
|
@ -53,14 +53,15 @@ $(ARCH)/LinuxDebuggerLocal.o: LinuxDebuggerLocal.c
|
||||
$(JAVAH) -jni -classpath ../../../build/classes -d $(ARCH) \
|
||||
sun.jvm.hotspot.debugger.x86.X86ThreadContext \
|
||||
sun.jvm.hotspot.debugger.sparc.SPARCThreadContext \
|
||||
sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext
|
||||
sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext \
|
||||
sun.jvm.hotspot.debugger.aarch64.AARCH64ThreadContext
|
||||
$(GCC) $(CFLAGS) $< -o $@
|
||||
|
||||
$(ARCH)/sadis.o: ../../share/native/sadis.c
|
||||
$(JAVAH) -jni -classpath ../../../build/classes -d $(ARCH) \
|
||||
sun.jvm.hotspot.asm.Disassembler
|
||||
$(GCC) $(CFLAGS) $< -o $@
|
||||
|
||||
|
||||
$(ARCH)/%.o: %.c
|
||||
$(GCC) $(CFLAGS) $< -o $@
|
||||
|
||||
|
@ -72,6 +72,7 @@ combination of ptrace and /proc calls.
|
||||
#define user_regs_struct pt_regs
|
||||
#endif
|
||||
#if defined(aarch64)
|
||||
#include <asm/ptrace.h>
|
||||
#define user_regs_struct user_pt_regs
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
// Linux does not have the proc service library, though it does provide the
|
||||
// thread_db library which can be used to manipulate threads without having
|
||||
// to know the details of LinuxThreads or NPTL
|
||||
// to know the details of NPTL
|
||||
|
||||
// copied from Solaris "proc_service.h"
|
||||
typedef enum {
|
||||
|
@ -983,19 +983,15 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
|
||||
curFrame.getFP(),
|
||||
anno));
|
||||
} else {
|
||||
if (VM.getVM().getCPU().equals("x86") || VM.getVM().getCPU().equals("amd64")) {
|
||||
// For C2, which has null frame pointers on x86/amd64
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(curFrame.getPC());
|
||||
Address sp = curFrame.getSP();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.getFrameSize() > 0, "CodeBlob must have non-zero frame size");
|
||||
}
|
||||
annoPanel.addAnnotation(new Annotation(sp,
|
||||
sp.addOffsetTo(cb.getFrameSize()),
|
||||
anno));
|
||||
} else {
|
||||
Assert.that(VM.getVM().getCPU().equals("ia64"), "only ia64 should reach here");
|
||||
// For C2, which has null frame pointers on x86/amd64/aarch64
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(curFrame.getPC());
|
||||
Address sp = curFrame.getSP();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.getFrameSize() > 0, "CodeBlob must have non-zero frame size");
|
||||
}
|
||||
annoPanel.addAnnotation(new Annotation(sp,
|
||||
sp.addOffsetTo(cb.getFrameSize()),
|
||||
anno));
|
||||
}
|
||||
|
||||
// Add interpreter frame annotations
|
||||
|
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.aarch64;
|
||||
|
||||
import java.lang.annotation.Native;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
|
||||
/** Specifies the thread context on aarch64 platforms; only a sub-portion
|
||||
* of the context is guaranteed to be present on all operating
|
||||
* systems. */
|
||||
|
||||
public abstract class AARCH64ThreadContext implements ThreadContext {
|
||||
// Taken from /usr/include/asm/sigcontext.h on Linux/AARCH64.
|
||||
|
||||
// NOTE: the indices for the various registers must be maintained as
|
||||
// listed across various operating systems. However, only a small
|
||||
// subset of the registers' values are guaranteed to be present (and
|
||||
// must be present for the SA's stack walking to work)
|
||||
|
||||
// One instance of the Native annotation is enough to trigger header generation
|
||||
// for this file.
|
||||
@Native
|
||||
public static final int R0 = 0;
|
||||
public static final int R1 = 1;
|
||||
public static final int R2 = 2;
|
||||
public static final int R3 = 3;
|
||||
public static final int R4 = 4;
|
||||
public static final int R5 = 5;
|
||||
public static final int R6 = 6;
|
||||
public static final int R7 = 7;
|
||||
public static final int R8 = 8;
|
||||
public static final int R9 = 9;
|
||||
public static final int R10 = 10;
|
||||
public static final int R11 = 11;
|
||||
public static final int R12 = 12;
|
||||
public static final int R13 = 13;
|
||||
public static final int R14 = 14;
|
||||
public static final int R15 = 15;
|
||||
public static final int R16 = 16;
|
||||
public static final int R17 = 17;
|
||||
public static final int R18 = 18;
|
||||
public static final int R19 = 19;
|
||||
public static final int R20 = 20;
|
||||
public static final int R21 = 21;
|
||||
public static final int R22 = 22;
|
||||
public static final int R23 = 23;
|
||||
public static final int R24 = 24;
|
||||
public static final int R25 = 25;
|
||||
public static final int R26 = 26;
|
||||
public static final int R27 = 27;
|
||||
public static final int R28 = 28;
|
||||
public static final int FP = 29;
|
||||
public static final int LR = 30;
|
||||
public static final int SP = 31;
|
||||
public static final int PC = 32;
|
||||
|
||||
public static final int NPRGREG = 33;
|
||||
|
||||
private long[] data;
|
||||
|
||||
public AARCH64ThreadContext() {
|
||||
data = new long[NPRGREG];
|
||||
}
|
||||
|
||||
public int getNumRegisters() {
|
||||
return NPRGREG;
|
||||
}
|
||||
|
||||
public String getRegisterName(int index) {
|
||||
switch (index) {
|
||||
case LR: return "lr";
|
||||
case SP: return "sp";
|
||||
case PC: return "pc";
|
||||
default:
|
||||
return "r" + index;
|
||||
}
|
||||
}
|
||||
|
||||
public void setRegister(int index, long value) {
|
||||
data[index] = value;
|
||||
}
|
||||
|
||||
public long getRegister(int index) {
|
||||
return data[index];
|
||||
}
|
||||
|
||||
public CFrame getTopFrame(Debugger dbg) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** This can't be implemented in this class since we would have to
|
||||
* tie the implementation to, for example, the debugging system */
|
||||
public abstract void setRegisterAsAddress(int index, Address value);
|
||||
|
||||
/** This can't be implemented in this class since we would have to
|
||||
* tie the implementation to, for example, the debugging system */
|
||||
public abstract Address getRegisterAsAddress(int index);
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,12 +32,14 @@ import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.debugger.amd64.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.sparc.*;
|
||||
import sun.jvm.hotspot.debugger.ppc64.*;
|
||||
import sun.jvm.hotspot.debugger.linux.x86.*;
|
||||
import sun.jvm.hotspot.debugger.linux.amd64.*;
|
||||
import sun.jvm.hotspot.debugger.linux.sparc.*;
|
||||
import sun.jvm.hotspot.debugger.linux.ppc64.*;
|
||||
import sun.jvm.hotspot.debugger.linux.aarch64.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
class LinuxCDebugger implements CDebugger {
|
||||
@ -106,6 +109,13 @@ class LinuxCDebugger implements CDebugger {
|
||||
Address pc = context.getRegisterAsAddress(PPC64ThreadContext.PC);
|
||||
if (pc == null) return null;
|
||||
return new LinuxPPC64CFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize());
|
||||
} else if (cpu.equals("aarch64")) {
|
||||
AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
|
||||
Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP);
|
||||
if (fp == null) return null;
|
||||
Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC);
|
||||
if (pc == null) return null;
|
||||
return new LinuxAARCH64CFrame(dbg, fp, pc);
|
||||
} else {
|
||||
// Runtime exception thrown by LinuxThreadContextFactory if unknown cpu
|
||||
ThreadContext context = (ThreadContext) thread.getContext();
|
||||
|
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.linux.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.linux.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.basic.*;
|
||||
|
||||
final public class LinuxAARCH64CFrame extends BasicCFrame {
|
||||
public LinuxAARCH64CFrame(LinuxDebugger dbg, Address fp, Address pc) {
|
||||
super(dbg.getCDebugger());
|
||||
this.fp = fp;
|
||||
this.pc = pc;
|
||||
this.dbg = dbg;
|
||||
}
|
||||
|
||||
// override base class impl to avoid ELF parsing
|
||||
public ClosestSymbol closestSymbolToPC() {
|
||||
// try native lookup in debugger.
|
||||
return dbg.lookup(dbg.getAddressValue(pc()));
|
||||
}
|
||||
|
||||
public Address pc() {
|
||||
return pc;
|
||||
}
|
||||
|
||||
public Address localVariableBase() {
|
||||
return fp;
|
||||
}
|
||||
|
||||
public CFrame sender(ThreadProxy thread) {
|
||||
AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
|
||||
Address rsp = context.getRegisterAsAddress(AARCH64ThreadContext.SP);
|
||||
|
||||
if ((fp == null) || fp.lessThan(rsp)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check alignment of fp
|
||||
if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Address nextFP = fp.getAddressAt(0 * ADDRESS_SIZE);
|
||||
if (nextFP == null || nextFP.lessThanOrEqual(fp)) {
|
||||
return null;
|
||||
}
|
||||
Address nextPC = fp.getAddressAt(1 * ADDRESS_SIZE);
|
||||
if (nextPC == null) {
|
||||
return null;
|
||||
}
|
||||
return new LinuxAARCH64CFrame(dbg, nextFP, nextPC);
|
||||
}
|
||||
|
||||
// package/class internals only
|
||||
private static final int ADDRESS_SIZE = 8;
|
||||
private Address pc;
|
||||
private Address sp;
|
||||
private Address fp;
|
||||
private LinuxDebugger dbg;
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.linux.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.linux.*;
|
||||
|
||||
public class LinuxAARCH64ThreadContext extends AARCH64ThreadContext {
|
||||
private LinuxDebugger debugger;
|
||||
|
||||
public LinuxAARCH64ThreadContext(LinuxDebugger debugger) {
|
||||
super();
|
||||
this.debugger = debugger;
|
||||
}
|
||||
|
||||
public void setRegisterAsAddress(int index, Address value) {
|
||||
setRegister(index, debugger.getAddressValue(value));
|
||||
}
|
||||
|
||||
public Address getRegisterAsAddress(int index) {
|
||||
return debugger.newAddress(getRegister(index));
|
||||
}
|
||||
}
|
@ -31,11 +31,13 @@ import java.lang.reflect.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.debugger.proc.amd64.*;
|
||||
import sun.jvm.hotspot.debugger.proc.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.proc.sparc.*;
|
||||
import sun.jvm.hotspot.debugger.proc.ppc64.*;
|
||||
import sun.jvm.hotspot.debugger.proc.x86.*;
|
||||
import sun.jvm.hotspot.debugger.ppc64.*;
|
||||
import sun.jvm.hotspot.debugger.amd64.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.sparc.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
@ -88,6 +90,10 @@ public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
|
||||
threadFactory = new ProcAMD64ThreadFactory(this);
|
||||
pcRegIndex = AMD64ThreadContext.RIP;
|
||||
fpRegIndex = AMD64ThreadContext.RBP;
|
||||
} else if (cpu.equals("aarch64")) {
|
||||
threadFactory = new ProcAARCH64ThreadFactory(this);
|
||||
pcRegIndex = AARCH64ThreadContext.PC;
|
||||
fpRegIndex = AARCH64ThreadContext.FP;
|
||||
} else if (cpu.equals("ppc64")) {
|
||||
threadFactory = new ProcPPC64ThreadFactory(this);
|
||||
pcRegIndex = PPC64ThreadContext.PC;
|
||||
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.proc.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.proc.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class ProcAARCH64Thread implements ThreadProxy {
|
||||
private ProcDebugger debugger;
|
||||
private int id;
|
||||
|
||||
public ProcAARCH64Thread(ProcDebugger debugger, Address addr) {
|
||||
this.debugger = debugger;
|
||||
|
||||
// FIXME: the size here should be configurable. However, making it
|
||||
// so would produce a dependency on the "types" package from the
|
||||
// debugger package, which is not desired.
|
||||
this.id = (int) addr.getCIntegerAt(0, 4, true);
|
||||
}
|
||||
|
||||
public ProcAARCH64Thread(ProcDebugger debugger, long id) {
|
||||
this.debugger = debugger;
|
||||
this.id = (int) id;
|
||||
}
|
||||
|
||||
public ThreadContext getContext() throws IllegalThreadStateException {
|
||||
ProcAARCH64ThreadContext context = new ProcAARCH64ThreadContext(debugger);
|
||||
long[] regs = debugger.getThreadIntegerRegisterSet(id);
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(regs.length == AARCH64ThreadContext.NPRGREG, "size mismatch");
|
||||
}
|
||||
for (int i = 0; i < regs.length; i++) {
|
||||
context.setRegister(i, regs[i]);
|
||||
}
|
||||
return context;
|
||||
}
|
||||
|
||||
public boolean canSetContext() throws DebuggerException {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void setContext(ThreadContext context)
|
||||
throws IllegalThreadStateException, DebuggerException {
|
||||
throw new DebuggerException("Unimplemented");
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "t@" + id;
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj == null) || !(obj instanceof ProcAARCH64Thread)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (((ProcAARCH64Thread) obj).id == id);
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return id;
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.proc.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.proc.*;
|
||||
|
||||
public class ProcAARCH64ThreadContext extends AARCH64ThreadContext {
|
||||
private ProcDebugger debugger;
|
||||
|
||||
public ProcAARCH64ThreadContext(ProcDebugger debugger) {
|
||||
super();
|
||||
this.debugger = debugger;
|
||||
}
|
||||
|
||||
public void setRegisterAsAddress(int index, Address value) {
|
||||
setRegister(index, debugger.getAddressValue(value));
|
||||
}
|
||||
|
||||
public Address getRegisterAsAddress(int index) {
|
||||
return debugger.newAddress(getRegister(index));
|
||||
}
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.proc.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.proc.*;
|
||||
|
||||
public class ProcAARCH64ThreadFactory implements ProcThreadFactory {
|
||||
private ProcDebugger debugger;
|
||||
|
||||
public ProcAARCH64ThreadFactory(ProcDebugger debugger) {
|
||||
this.debugger = debugger;
|
||||
}
|
||||
|
||||
public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
|
||||
return new ProcAARCH64Thread(debugger, threadIdentifierAddr);
|
||||
}
|
||||
|
||||
public ThreadProxy createThreadWrapper(long id) {
|
||||
return new ProcAARCH64Thread(debugger, id);
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.remote.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.remote.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class RemoteAARCH64Thread extends RemoteThread {
|
||||
public RemoteAARCH64Thread(RemoteDebuggerClient debugger, Address addr) {
|
||||
super(debugger, addr);
|
||||
}
|
||||
|
||||
public RemoteAARCH64Thread(RemoteDebuggerClient debugger, long id) {
|
||||
super(debugger, id);
|
||||
}
|
||||
|
||||
public ThreadContext getContext() throws IllegalThreadStateException {
|
||||
RemoteAARCH64ThreadContext context = new RemoteAARCH64ThreadContext(debugger);
|
||||
long[] regs = (addr != null)? debugger.getThreadIntegerRegisterSet(addr) :
|
||||
debugger.getThreadIntegerRegisterSet(id);
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(regs.length == AARCH64ThreadContext.NPRGREG, "size of register set must match");
|
||||
}
|
||||
for (int i = 0; i < regs.length; i++) {
|
||||
context.setRegister(i, regs[i]);
|
||||
}
|
||||
return context;
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.remote.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.debugger.remote.*;
|
||||
|
||||
public class RemoteAARCH64ThreadContext extends AARCH64ThreadContext {
|
||||
private RemoteDebuggerClient debugger;
|
||||
|
||||
public RemoteAARCH64ThreadContext(RemoteDebuggerClient debugger) {
|
||||
super();
|
||||
this.debugger = debugger;
|
||||
}
|
||||
|
||||
public void setRegisterAsAddress(int index, Address value) {
|
||||
setRegister(index, debugger.getAddressValue(value));
|
||||
}
|
||||
|
||||
public Address getRegisterAsAddress(int index) {
|
||||
return debugger.newAddress(getRegister(index));
|
||||
}
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.debugger.remote.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.remote.*;
|
||||
|
||||
public class RemoteAARCH64ThreadFactory implements RemoteThreadFactory {
|
||||
private RemoteDebuggerClient debugger;
|
||||
|
||||
public RemoteAARCH64ThreadFactory(RemoteDebuggerClient debugger) {
|
||||
this.debugger = debugger;
|
||||
}
|
||||
|
||||
public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
|
||||
return new RemoteAARCH64Thread(debugger, threadIdentifierAddr);
|
||||
}
|
||||
|
||||
public ThreadProxy createThreadWrapper(long id) {
|
||||
return new RemoteAARCH64Thread(debugger, id);
|
||||
}
|
||||
}
|
@ -49,7 +49,6 @@ import sun.jvm.hotspot.types.*;
|
||||
public abstract class Generation extends VMObject {
|
||||
private static long reservedFieldOffset;
|
||||
private static long virtualSpaceFieldOffset;
|
||||
private static CIntegerField levelField;
|
||||
protected static final int K = 1024;
|
||||
// Fields for class StatRecord
|
||||
private static Field statRecordField;
|
||||
@ -75,7 +74,6 @@ public abstract class Generation extends VMObject {
|
||||
|
||||
reservedFieldOffset = type.getField("_reserved").getOffset();
|
||||
virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
|
||||
levelField = type.getCIntegerField("_level");
|
||||
// StatRecord
|
||||
statRecordField = type.getField("_stat_record");
|
||||
type = db.lookupType("Generation::StatRecord");
|
||||
@ -130,14 +128,6 @@ public abstract class Generation extends VMObject {
|
||||
}
|
||||
}
|
||||
|
||||
public GenerationSpec spec() {
|
||||
return ((GenCollectedHeap) VM.getVM().getUniverse().heap()).spec(level());
|
||||
}
|
||||
|
||||
public int level() {
|
||||
return (int) levelField.getValue(addr);
|
||||
}
|
||||
|
||||
public int invocations() {
|
||||
return getStatRecord().getInvocations();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -357,12 +357,6 @@ public abstract class Frame implements Cloneable {
|
||||
// FIXME: avoiding implementing this for now if possible
|
||||
// public void interpreter_frame_set_monitor_end(BasicObjectLock* value);
|
||||
// public void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
|
||||
//
|
||||
// Tells whether the current interpreter_frame frame pointer
|
||||
// corresponds to the old compiled/deoptimized fp
|
||||
// The receiver used to be a top level frame
|
||||
// public boolean interpreter_frame_equals_unpacked_fp(intptr_t* fp);
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
// Method and constant pool cache:
|
||||
//
|
||||
|
@ -35,6 +35,7 @@ import sun.jvm.hotspot.runtime.win32_amd64.Win32AMD64JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.win32_x86.Win32X86JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.linux_sparc.LinuxSPARCJavaThreadPDAccess;
|
||||
import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess;
|
||||
@ -91,6 +92,8 @@ public class Threads {
|
||||
access = new LinuxSPARCJavaThreadPDAccess();
|
||||
} else if (cpu.equals("ppc64")) {
|
||||
access = new LinuxPPC64JavaThreadPDAccess();
|
||||
} else if (cpu.equals("aarch64")) {
|
||||
access = new LinuxAARCH64JavaThreadPDAccess();
|
||||
} else {
|
||||
try {
|
||||
access = (JavaThreadPDAccess)
|
||||
|
@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.interpreter.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.aarch64.*;
|
||||
|
||||
/** <P> Should be able to be used on all aarch64 platforms we support
|
||||
(Linux/aarch64) to implement JavaThread's "currentFrameGuess()"
|
||||
functionality. Input is an AARCH64ThreadContext; output is SP, FP,
|
||||
and PC for an AARCH64Frame. Instantiation of the AARCH64Frame is
|
||||
left to the caller, since we may need to subclass AARCH64Frame to
|
||||
support signal handler frames on Unix platforms. </P>
|
||||
|
||||
<P> Algorithm is to walk up the stack within a given range (say,
|
||||
512K at most) looking for a plausible PC and SP for a Java frame,
|
||||
also considering those coming in from the context. If we find a PC
|
||||
that belongs to the VM (i.e., in generated code like the
|
||||
interpreter or CodeCache) then we try to find an associated FP.
|
||||
We repeat this until we either find a complete frame or run out of
|
||||
stack to look at. </P> */
|
||||
|
||||
public class AARCH64CurrentFrameGuess {
|
||||
private AARCH64ThreadContext context;
|
||||
private JavaThread thread;
|
||||
private Address spFound;
|
||||
private Address fpFound;
|
||||
private Address pcFound;
|
||||
|
||||
private static final boolean DEBUG = System.getProperty("sun.jvm.hotspot.runtime.aarch64.AARCH64Frame.DEBUG")
|
||||
!= null;
|
||||
|
||||
public AARCH64CurrentFrameGuess(AARCH64ThreadContext context,
|
||||
JavaThread thread) {
|
||||
this.context = context;
|
||||
this.thread = thread;
|
||||
}
|
||||
|
||||
/** Returns false if not able to find a frame within a reasonable range. */
|
||||
public boolean run(long regionInBytesToSearch) {
|
||||
Address sp = context.getRegisterAsAddress(AARCH64ThreadContext.SP);
|
||||
Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC);
|
||||
Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP);
|
||||
if (sp == null) {
|
||||
// Bail out if no last java frame either
|
||||
if (thread.getLastJavaSP() != null) {
|
||||
setValues(thread.getLastJavaSP(), thread.getLastJavaFP(), null);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
Address end = sp.addOffsetTo(regionInBytesToSearch);
|
||||
VM vm = VM.getVM();
|
||||
|
||||
setValues(null, null, null); // Assume we're not going to find anything
|
||||
|
||||
if (vm.isJavaPCDbg(pc)) {
|
||||
if (vm.isClientCompiler()) {
|
||||
// If the topmost frame is a Java frame, we are (pretty much)
|
||||
// guaranteed to have a viable FP. We should be more robust
|
||||
// than this (we have the potential for losing entire threads'
|
||||
// stack traces) but need to see how much work we really have
|
||||
// to do here. Searching the stack for an (SP, FP) pair is
|
||||
// hard since it's easy to misinterpret inter-frame stack
|
||||
// pointers as base-of-frame pointers; we also don't know the
|
||||
// sizes of C1 frames (not registered in the nmethod) so can't
|
||||
// derive them from SP.
|
||||
|
||||
setValues(sp, fp, pc);
|
||||
return true;
|
||||
} else {
|
||||
if (vm.getInterpreter().contains(pc)) {
|
||||
if (DEBUG) {
|
||||
System.out.println("CurrentFrameGuess: choosing interpreter frame: sp = " +
|
||||
sp + ", fp = " + fp + ", pc = " + pc);
|
||||
}
|
||||
setValues(sp, fp, pc);
|
||||
return true;
|
||||
}
|
||||
|
||||
// For the server compiler, FP is not guaranteed to be valid
|
||||
// for compiled code. In addition, an earlier attempt at a
|
||||
// non-searching algorithm (see below) failed because the
|
||||
// stack pointer from the thread context was pointing
|
||||
// (considerably) beyond the ostensible end of the stack, into
|
||||
// garbage; walking from the topmost frame back caused a crash.
|
||||
//
|
||||
// This algorithm takes the current PC as a given and tries to
|
||||
// find the correct corresponding SP by walking up the stack
|
||||
// and repeatedly performing stackwalks (very inefficient).
|
||||
//
|
||||
// FIXME: there is something wrong with stackwalking across
|
||||
// adapter frames...this is likely to be the root cause of the
|
||||
// failure with the simpler algorithm below.
|
||||
|
||||
for (long offset = 0;
|
||||
offset < regionInBytesToSearch;
|
||||
offset += vm.getAddressSize()) {
|
||||
try {
|
||||
Address curSP = sp.addOffsetTo(offset);
|
||||
Frame frame = new AARCH64Frame(curSP, null, pc);
|
||||
RegisterMap map = thread.newRegisterMap(false);
|
||||
while (frame != null) {
|
||||
if (frame.isEntryFrame() && frame.entryFrameIsFirst()) {
|
||||
// We were able to traverse all the way to the
|
||||
// bottommost Java frame.
|
||||
// This sp looks good. Keep it.
|
||||
if (DEBUG) {
|
||||
System.out.println("CurrentFrameGuess: Choosing sp = " + curSP + ", pc = " + pc);
|
||||
}
|
||||
setValues(curSP, null, pc);
|
||||
return true;
|
||||
}
|
||||
frame = frame.sender(map);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (DEBUG) {
|
||||
System.out.println("CurrentFrameGuess: Exception " + e + " at offset " + offset);
|
||||
}
|
||||
// Bad SP. Try another.
|
||||
}
|
||||
}
|
||||
|
||||
// Were not able to find a plausible SP to go with this PC.
|
||||
// Bail out.
|
||||
return false;
|
||||
|
||||
/*
|
||||
// Original algorithm which does not work because SP was
|
||||
// pointing beyond where it should have:
|
||||
|
||||
// For the server compiler, FP is not guaranteed to be valid
|
||||
// for compiled code. We see whether the PC is in the
|
||||
// interpreter and take care of that, otherwise we run code
|
||||
// (unfortunately) duplicated from AARCH64Frame.senderForCompiledFrame.
|
||||
|
||||
CodeCache cc = vm.getCodeCache();
|
||||
if (cc.contains(pc)) {
|
||||
CodeBlob cb = cc.findBlob(pc);
|
||||
|
||||
// See if we can derive a frame pointer from SP and PC
|
||||
// NOTE: This is the code duplicated from AARCH64Frame
|
||||
Address saved_fp = null;
|
||||
int llink_offset = cb.getLinkOffset();
|
||||
if (llink_offset >= 0) {
|
||||
// Restore base-pointer, since next frame might be an interpreter frame.
|
||||
Address fp_addr = sp.addOffsetTo(VM.getVM().getAddressSize() * llink_offset);
|
||||
saved_fp = fp_addr.getAddressAt(0);
|
||||
}
|
||||
|
||||
setValues(sp, saved_fp, pc);
|
||||
return true;
|
||||
}
|
||||
*/
|
||||
}
|
||||
} else {
|
||||
// If the current program counter was not known to us as a Java
|
||||
// PC, we currently assume that we are in the run-time system
|
||||
// and attempt to look to thread-local storage for saved SP and
|
||||
// FP. Note that if these are null (because we were, in fact,
|
||||
// in Java code, i.e., vtable stubs or similar, and the SA
|
||||
// didn't have enough insight into the target VM to understand
|
||||
// that) then we are going to lose the entire stack trace for
|
||||
// the thread, which is sub-optimal. FIXME.
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("CurrentFrameGuess: choosing last Java frame: sp = " +
|
||||
thread.getLastJavaSP() + ", fp = " + thread.getLastJavaFP());
|
||||
}
|
||||
if (thread.getLastJavaSP() == null) {
|
||||
return false; // No known Java frames on stack
|
||||
}
|
||||
|
||||
// The runtime has a nasty habit of not saving fp in the frame
|
||||
// anchor, leaving us to grovel about in the stack to find a
|
||||
// plausible address. Fortunately, this only happens in
|
||||
// compiled code; there we always have a valid PC, and we always
|
||||
// push LR and FP onto the stack as a pair, with FP at the lower
|
||||
// address.
|
||||
pc = thread.getLastJavaPC();
|
||||
fp = thread.getLastJavaFP();
|
||||
sp = thread.getLastJavaSP();
|
||||
|
||||
if (fp == null) {
|
||||
CodeCache cc = vm.getCodeCache();
|
||||
if (cc.contains(pc)) {
|
||||
CodeBlob cb = cc.findBlob(pc);
|
||||
if (DEBUG) {
|
||||
System.out.println("FP is null. Found blob frame size " + cb.getFrameSize());
|
||||
}
|
||||
// See if we can derive a frame pointer from SP and PC
|
||||
long link_offset = cb.getFrameSize() - 2 * VM.getVM().getAddressSize();
|
||||
if (link_offset >= 0) {
|
||||
fp = sp.addOffsetTo(link_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setValues(sp, fp, null);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public Address getSP() { return spFound; }
|
||||
public Address getFP() { return fpFound; }
|
||||
/** May be null if getting values from thread-local storage; take
|
||||
care to call the correct AARCH64Frame constructor to recover this if
|
||||
necessary */
|
||||
public Address getPC() { return pcFound; }
|
||||
|
||||
private void setValues(Address sp, Address fp, Address pc) {
|
||||
spFound = sp;
|
||||
fpFound = fp;
|
||||
pcFound = pc;
|
||||
}
|
||||
}
|
@ -0,0 +1,555 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.aarch64;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.compiler.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
/** Specialization of and implementation of abstract methods of the
|
||||
Frame class for the aarch64 family of CPUs. */
|
||||
|
||||
public class AARCH64Frame extends Frame {
|
||||
private static final boolean DEBUG;
|
||||
static {
|
||||
DEBUG = System.getProperty("sun.jvm.hotspot.runtime.aarch64.AARCH64Frame.DEBUG") != null;
|
||||
}
|
||||
|
||||
// All frames
|
||||
private static final int LINK_OFFSET = 0;
|
||||
private static final int RETURN_ADDR_OFFSET = 1;
|
||||
private static final int SENDER_SP_OFFSET = 2;
|
||||
|
||||
// Interpreter frames
|
||||
private static final int INTERPRETER_FRAME_MIRROR_OFFSET = 2; // for native calls only
|
||||
private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1;
|
||||
private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1;
|
||||
private static final int INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1;
|
||||
private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only
|
||||
private static int INTERPRETER_FRAME_CACHE_OFFSET;
|
||||
private static int INTERPRETER_FRAME_LOCALS_OFFSET;
|
||||
private static int INTERPRETER_FRAME_BCX_OFFSET;
|
||||
private static int INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
private static int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET;
|
||||
private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
|
||||
|
||||
// Entry frames
|
||||
private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -8;
|
||||
|
||||
// Native frames
|
||||
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
|
||||
|
||||
private static VMReg fp = new VMReg(29);
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
|
||||
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
|
||||
INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1;
|
||||
INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
|
||||
INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
}
|
||||
|
||||
|
||||
// an additional field beyond sp and pc:
|
||||
Address raw_fp; // frame pointer
|
||||
private Address raw_unextendedSP;
|
||||
|
||||
private AARCH64Frame() {
|
||||
}
|
||||
|
||||
private void adjustForDeopt() {
|
||||
if ( pc != null) {
|
||||
// Look for a deopt pc and if it is deopted convert to original pc
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
|
||||
if (cb != null && cb.isJavaMethod()) {
|
||||
NMethod nm = (NMethod) cb;
|
||||
if (pc.equals(nm.deoptHandlerBegin())) {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
|
||||
}
|
||||
// adjust pc if frame is deoptimized.
|
||||
pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
|
||||
deoptimized = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public AARCH64Frame(Address raw_sp, Address raw_fp, Address pc) {
|
||||
this.raw_sp = raw_sp;
|
||||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = pc;
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AARCH64Frame(sp, fp, pc): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
}
|
||||
|
||||
public AARCH64Frame(Address raw_sp, Address raw_fp) {
|
||||
this.raw_sp = raw_sp;
|
||||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AARCH64Frame(sp, fp): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
}
|
||||
|
||||
public AARCH64Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) {
|
||||
this.raw_sp = raw_sp;
|
||||
this.raw_unextendedSP = raw_unextendedSp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = pc;
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AARCH64Frame(sp, unextendedSP, fp, pc): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public Object clone() {
|
||||
AARCH64Frame frame = new AARCH64Frame();
|
||||
frame.raw_sp = raw_sp;
|
||||
frame.raw_unextendedSP = raw_unextendedSP;
|
||||
frame.raw_fp = raw_fp;
|
||||
frame.pc = pc;
|
||||
frame.deoptimized = deoptimized;
|
||||
return frame;
|
||||
}
|
||||
|
||||
public boolean equals(Object arg) {
|
||||
if (arg == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(arg instanceof AARCH64Frame)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
AARCH64Frame other = (AARCH64Frame) arg;
|
||||
|
||||
return (AddressOps.equal(getSP(), other.getSP()) &&
|
||||
AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) &&
|
||||
AddressOps.equal(getFP(), other.getFP()) &&
|
||||
AddressOps.equal(getPC(), other.getPC()));
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (raw_sp == null) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return raw_sp.hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "sp: " + (getSP() == null? "null" : getSP().toString()) +
|
||||
", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) +
|
||||
", fp: " + (getFP() == null? "null" : getFP().toString()) +
|
||||
", pc: " + (pc == null? "null" : pc.toString());
|
||||
}
|
||||
|
||||
// accessors for the instance variables
|
||||
public Address getFP() { return raw_fp; }
|
||||
public Address getSP() { return raw_sp; }
|
||||
public Address getID() { return raw_sp; }
|
||||
|
||||
// FIXME: not implemented yet
|
||||
public boolean isSignalHandlerFrameDbg() { return false; }
|
||||
public int getSignalNumberDbg() { return 0; }
|
||||
public String getSignalNameDbg() { return null; }
|
||||
|
||||
public boolean isInterpretedFrameValid() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(isInterpretedFrame(), "Not an interpreted frame");
|
||||
}
|
||||
|
||||
// These are reasonable sanity checks
|
||||
if (getFP() == null || getFP().andWithMask(0x3) != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getSP() == null || getSP().andWithMask(0x3) != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// These are hacks to keep us out of trouble.
|
||||
// The problem with these is that they mask other problems
|
||||
if (getFP().lessThanOrEqual(getSP())) {
|
||||
// this attempts to deal with unsigned comparison above
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) {
|
||||
// stack frames shouldn't be large.
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// FIXME: not applicable in current system
|
||||
// void patch_pc(Thread* thread, address pc);
|
||||
|
||||
public Frame sender(RegisterMap regMap, CodeBlob cb) {
|
||||
AARCH64RegisterMap map = (AARCH64RegisterMap) regMap;
|
||||
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
map.setIncludeArgumentOops(false);
|
||||
|
||||
if (isEntryFrame()) return senderForEntryFrame(map);
|
||||
if (isInterpretedFrame()) return senderForInterpreterFrame(map);
|
||||
|
||||
if(cb == null) {
|
||||
cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
} else {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
|
||||
}
|
||||
}
|
||||
|
||||
if (cb != null) {
|
||||
return senderForCompiledFrame(map, cb);
|
||||
}
|
||||
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
return new AARCH64Frame(getSenderSP(), getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(AARCH64RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForEntryFrame");
|
||||
}
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
AARCH64JavaCallWrapper jcw = (AARCH64JavaCallWrapper) getEntryFrameCallWrapper();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero");
|
||||
Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack");
|
||||
}
|
||||
AARCH64Frame fr;
|
||||
if (jcw.getLastJavaPC() != null) {
|
||||
fr = new AARCH64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC());
|
||||
} else {
|
||||
fr = new AARCH64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP());
|
||||
}
|
||||
map.clear();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
|
||||
}
|
||||
return fr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
private void adjustUnextendedSP() {
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
|
||||
CodeBlob cb = cb();
|
||||
NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
|
||||
if (senderNm != null) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (senderNm.isDeoptMhEntry(getPC())) {
|
||||
// DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
|
||||
raw_unextendedSP = getFP();
|
||||
}
|
||||
else if (senderNm.isDeoptEntry(getPC())) {
|
||||
// DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
|
||||
}
|
||||
else if (senderNm.isMethodHandleReturn(getPC())) {
|
||||
raw_unextendedSP = getFP();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Frame senderForInterpreterFrame(AARCH64RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForInterpreterFrame");
|
||||
}
|
||||
Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
|
||||
Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
|
||||
// We do not need to update the callee-save register mapping because above
|
||||
// us is either another interpreter frame or a converter-frame, but never
|
||||
// directly a compiled frame.
|
||||
// 11/24/04 SFG. With the removal of adapter frames this is no longer true.
|
||||
// However c2 no longer uses callee save register for java calls so there
|
||||
// are no callee register to find.
|
||||
|
||||
if (map.getUpdateMap())
|
||||
updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET));
|
||||
|
||||
return new AARCH64Frame(sp, unextendedSP, getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) {
|
||||
map.setLocation(fp, savedFPAddr);
|
||||
}
|
||||
|
||||
private Frame senderForCompiledFrame(AARCH64RegisterMap map, CodeBlob cb) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForCompiledFrame");
|
||||
}
|
||||
|
||||
//
|
||||
// NOTE: some of this code is (unfortunately) duplicated AARCH64CurrentFrameGuess
|
||||
//
|
||||
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size");
|
||||
}
|
||||
Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize());
|
||||
|
||||
// The return_address is always the word on the stack
|
||||
Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
|
||||
// This is the saved value of FP which may or may not really be an FP.
|
||||
// It is only an FP if the sender is an interpreter frame.
|
||||
Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize());
|
||||
|
||||
if (map.getUpdateMap()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
// For C1, the runtime stub might not have oop maps, so set this flag
|
||||
// outside of update_register_map.
|
||||
map.setIncludeArgumentOops(cb.callerMustGCArguments());
|
||||
|
||||
if (cb.getOopMaps() != null) {
|
||||
ImmutableOopMapSet.updateRegisterMap(this, cb, map, true);
|
||||
}
|
||||
|
||||
// Since the prolog does the save and restore of FP there is no oopmap
|
||||
// for it so we must fill in its location as if there was an oopmap entry
|
||||
// since if our caller was compiled code there could be live jvm state in it.
|
||||
updateMapWithSavedLink(map, savedFPAddr);
|
||||
}
|
||||
|
||||
return new AARCH64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC);
|
||||
}
|
||||
|
||||
protected boolean hasSenderPD() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public long frameSize() {
|
||||
return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public Address getLink() {
|
||||
try {
|
||||
if (DEBUG) {
|
||||
System.out.println("Reading link at " + addressOfStackSlot(LINK_OFFSET)
|
||||
+ " = " + addressOfStackSlot(LINK_OFFSET).getAddressAt(0));
|
||||
}
|
||||
return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
|
||||
} catch (Exception e) {
|
||||
if (DEBUG)
|
||||
System.out.println("Returning null");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: not implementable yet
|
||||
//inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
|
||||
|
||||
public Address getUnextendedSP() { return raw_unextendedSP; }
|
||||
|
||||
// Return address:
|
||||
public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
|
||||
public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); }
|
||||
|
||||
// return address of param, zero origin index.
|
||||
public Address getNativeParamAddr(int idx) {
|
||||
return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
|
||||
}
|
||||
|
||||
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
|
||||
|
||||
public Address addressOfInterpreterFrameLocals() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
|
||||
}
|
||||
|
||||
private Address addressOfInterpreterFrameBCX() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET);
|
||||
}
|
||||
|
||||
public int getInterpreterFrameBCI() {
|
||||
// FIXME: this is not atomic with respect to GC and is unsuitable
|
||||
// for use in a non-debugging, or reflective, system. Need to
|
||||
// figure out how to express this.
|
||||
Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0);
|
||||
Address methodHandle = addressOfInterpreterFrameMethod().getAddressAt(0);
|
||||
Method method = (Method)Metadata.instantiateWrapperFor(methodHandle);
|
||||
return bcpToBci(bcp, method);
|
||||
}
|
||||
|
||||
public Address addressOfInterpreterFrameMDX() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET);
|
||||
}
|
||||
|
||||
// FIXME
|
||||
//inline int frame::interpreter_frame_monitor_size() {
|
||||
// return BasicObjectLock::size();
|
||||
//}
|
||||
|
||||
// expression stack
|
||||
// (the max_stack arguments are used by the GC; see class FrameClosure)
|
||||
|
||||
public Address addressOfInterpreterFrameExpressionStack() {
|
||||
Address monitorEnd = interpreterFrameMonitorEnd().address();
|
||||
return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public int getInterpreterFrameExpressionStackDirection() { return -1; }
|
||||
|
||||
// top of expression stack
|
||||
public Address addressOfInterpreterFrameTOS() {
|
||||
return getSP();
|
||||
}
|
||||
|
||||
/** Expression stack from top down */
|
||||
public Address addressOfInterpreterFrameTOSAt(int slot) {
|
||||
return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public Address getInterpreterFrameSenderSP() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(isInterpretedFrame(), "interpreted frame expected");
|
||||
}
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
|
||||
}
|
||||
|
||||
// Monitors
|
||||
public BasicObjectLock interpreterFrameMonitorBegin() {
|
||||
return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET));
|
||||
}
|
||||
|
||||
public BasicObjectLock interpreterFrameMonitorEnd() {
|
||||
Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0);
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
// make sure the pointer points inside the frame
|
||||
Assert.that(AddressOps.gt(getFP(), result), "result must < than frame pointer");
|
||||
Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer");
|
||||
}
|
||||
return new BasicObjectLock(result);
|
||||
}
|
||||
|
||||
public int interpreterFrameMonitorSize() {
|
||||
return BasicObjectLock.size();
|
||||
}
|
||||
|
||||
// Method
|
||||
public Address addressOfInterpreterFrameMethod() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET);
|
||||
}
|
||||
|
||||
// Constant pool cache
|
||||
public Address addressOfInterpreterFrameCPCache() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET);
|
||||
}
|
||||
|
||||
// Entry frames
|
||||
public JavaCallWrapper getEntryFrameCallWrapper() {
|
||||
return new AARCH64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0));
|
||||
}
|
||||
|
||||
protected Address addressOfSavedOopResult() {
|
||||
// offset is 2 for compiler2 and 3 for compiler1
|
||||
return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) *
|
||||
VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
protected Address addressOfSavedReceiver() {
|
||||
return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
private void dumpStack() {
|
||||
for (Address addr = getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
|
||||
AddressOps.lt(addr, getSP());
|
||||
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
|
||||
System.out.println(addr + ": " + addr.getAddressAt(0));
|
||||
}
|
||||
System.out.println("-----------------------");
|
||||
for (Address addr = getSP();
|
||||
AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize()));
|
||||
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
|
||||
System.out.println(addr + ": " + addr.getAddressAt(0));
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.aarch64;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
|
||||
public class AARCH64JavaCallWrapper extends JavaCallWrapper {
|
||||
private static AddressField lastJavaFPField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("JavaFrameAnchor");
|
||||
|
||||
lastJavaFPField = type.getAddressField("_last_Java_fp");
|
||||
}
|
||||
|
||||
public AARCH64JavaCallWrapper(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public Address getLastJavaFP() {
|
||||
return lastJavaFPField.getValue(addr.addOffsetTo(anchorField.getOffset()));
|
||||
}
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.aarch64;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
|
||||
public class AARCH64RegisterMap extends RegisterMap {
|
||||
|
||||
/** This is the only public constructor */
|
||||
public AARCH64RegisterMap(JavaThread thread, boolean updateMap) {
|
||||
super(thread, updateMap);
|
||||
}
|
||||
|
||||
protected AARCH64RegisterMap(RegisterMap map) {
|
||||
super(map);
|
||||
}
|
||||
|
||||
public Object clone() {
|
||||
AARCH64RegisterMap retval = new AARCH64RegisterMap(this);
|
||||
return retval;
|
||||
}
|
||||
|
||||
// no PD state to clear or copy:
|
||||
protected void clearPD() {}
|
||||
protected void initializePD() {}
|
||||
protected void initializeFromPD(RegisterMap map) {}
|
||||
protected Address getLocationPD(VMReg reg) { return null; }
|
||||
}
|
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.linux_aarch64;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.aarch64.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.aarch64.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class LinuxAARCH64JavaThreadPDAccess implements JavaThreadPDAccess {
|
||||
private static AddressField lastJavaFPField;
|
||||
private static AddressField osThreadField;
|
||||
|
||||
// Field from OSThread
|
||||
private static CIntegerField osThreadThreadIDField;
|
||||
|
||||
// This is currently unneeded but is being kept in case we change
|
||||
// the currentFrameGuess algorithm
|
||||
private static final long GUESS_SCAN_RANGE = 128 * 1024;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("JavaThread");
|
||||
osThreadField = type.getAddressField("_osthread");
|
||||
|
||||
Type anchorType = db.lookupType("JavaFrameAnchor");
|
||||
lastJavaFPField = anchorType.getAddressField("_last_Java_fp");
|
||||
|
||||
Type osThreadType = db.lookupType("OSThread");
|
||||
osThreadThreadIDField = osThreadType.getCIntegerField("_thread_id");
|
||||
}
|
||||
|
||||
public Address getLastJavaFP(Address addr) {
|
||||
return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset()));
|
||||
}
|
||||
|
||||
public Address getLastJavaPC(Address addr) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Address getBaseOfStackPointer(Address addr) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public Frame getLastFramePD(JavaThread thread, Address addr) {
|
||||
Address fp = thread.getLastJavaFP();
|
||||
if (fp == null) {
|
||||
return null; // no information
|
||||
}
|
||||
return new AARCH64Frame(thread.getLastJavaSP(), fp);
|
||||
}
|
||||
|
||||
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
|
||||
return new AARCH64RegisterMap(thread, updateMap);
|
||||
}
|
||||
|
||||
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
|
||||
ThreadProxy t = getThreadProxy(addr);
|
||||
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
|
||||
AARCH64CurrentFrameGuess guesser = new AARCH64CurrentFrameGuess(context, thread);
|
||||
if (!guesser.run(GUESS_SCAN_RANGE)) {
|
||||
return null;
|
||||
}
|
||||
if (guesser.getPC() == null) {
|
||||
return new AARCH64Frame(guesser.getSP(), guesser.getFP());
|
||||
} else {
|
||||
return new AARCH64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
}
|
||||
}
|
||||
|
||||
public void printThreadIDOn(Address addr, PrintStream tty) {
|
||||
tty.print(getThreadProxy(addr));
|
||||
}
|
||||
|
||||
public void printInfoOn(Address threadAddr, PrintStream tty) {
|
||||
tty.print("Thread id: ");
|
||||
printThreadIDOn(threadAddr, tty);
|
||||
// tty.println("\nPostJavaState: " + getPostJavaState(threadAddr));
|
||||
}
|
||||
|
||||
public Address getLastSP(Address addr) {
|
||||
ThreadProxy t = getThreadProxy(addr);
|
||||
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
|
||||
return context.getRegisterAsAddress(AARCH64ThreadContext.SP);
|
||||
}
|
||||
|
||||
public ThreadProxy getThreadProxy(Address addr) {
|
||||
// Addr is the address of the JavaThread.
|
||||
// Fetch the OSThread (for now and for simplicity, not making a
|
||||
// separate "OSThread" class in this package)
|
||||
Address osThreadAddr = osThreadField.getValue(addr);
|
||||
// Get the address of the _thread_id from the OSThread
|
||||
Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset());
|
||||
|
||||
JVMDebugger debugger = VM.getVM().getDebugger();
|
||||
return debugger.getThreadForIdentifierAddress(threadIdAddr);
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,10 @@
|
||||
package sun.jvm.hotspot.utilities;
|
||||
|
||||
public interface AltPlatformInfo {
|
||||
// Additional cpu types can be tested via this interface
|
||||
|
||||
// Additional cpu types can be tested via this interface
|
||||
public boolean knownCPU(String cpu);
|
||||
}
|
||||
|
||||
// Mangle a cpu name if necessary
|
||||
public String getCPU(String cpu);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,27 +52,54 @@ public class PlatformInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns "sparc" for SPARC based platforms and "x86" for x86 based
|
||||
platforms. Otherwise returns the value of os.arch. If the value
|
||||
is not recognized as supported, an exception is thrown instead. */
|
||||
public static boolean knownCPU(String cpu) {
|
||||
final String[] KNOWN =
|
||||
new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "aarch64"};
|
||||
|
||||
for(String s : KNOWN) {
|
||||
if(s.equals(cpu))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Returns "sparc" for SPARC based platforms "x86" for x86 based
|
||||
platforms and x86_64 for 64bit x86 based platform. Otherwise
|
||||
returns the value of os.arch. If the value is not recognized as supported,
|
||||
an exception is thrown instead. */
|
||||
|
||||
public static String getCPU() throws UnsupportedPlatformException {
|
||||
String cpu = System.getProperty("os.arch");
|
||||
if (cpu.equals("i386") || cpu.equals("x86")) {
|
||||
return "x86";
|
||||
} else if (cpu.equals("sparc") || cpu.equals("sparcv9")) {
|
||||
return "sparc";
|
||||
} else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64") || cpu.equals("ppc64") || cpu.equals("aarch64")) {
|
||||
return cpu;
|
||||
} else {
|
||||
try {
|
||||
Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
|
||||
AltPlatformInfo api = (AltPlatformInfo)pic.newInstance();
|
||||
if (api.knownCPU(cpu)) {
|
||||
return cpu;
|
||||
}
|
||||
} catch (Exception e) {}
|
||||
throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
|
||||
|
||||
// Let any additional CPU mangling fire first
|
||||
try {
|
||||
Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
|
||||
AltPlatformInfo api = (AltPlatformInfo) pic.newInstance();
|
||||
if (api.knownCPU(cpu)) {
|
||||
return api.getCPU(cpu);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// Ignored
|
||||
}
|
||||
|
||||
// Check that CPU is supported
|
||||
if (!knownCPU(cpu)) {
|
||||
throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
|
||||
}
|
||||
|
||||
// Tweeks
|
||||
if (cpu.equals("i386"))
|
||||
return "x86";
|
||||
|
||||
if (cpu.equals("sparcv9"))
|
||||
return "sparc";
|
||||
|
||||
if (cpu.equals("x86_64"))
|
||||
return "amd64";
|
||||
|
||||
return cpu;
|
||||
|
||||
}
|
||||
|
||||
// this main is invoked from Makefile to make platform specific agent Makefile(s).
|
||||
|
@ -84,11 +84,11 @@ public class PointerLocation {
|
||||
}
|
||||
|
||||
public boolean isInNewGen() {
|
||||
return ((gen != null) && (gen.level() == 0));
|
||||
return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(0)));
|
||||
}
|
||||
|
||||
public boolean isInOldGen() {
|
||||
return ((gen != null) && (gen.level() == 1));
|
||||
return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(1)));
|
||||
}
|
||||
|
||||
public boolean inOtherGen() {
|
||||
@ -207,8 +207,6 @@ public class PointerLocation {
|
||||
tty.print("In new generation ");
|
||||
} else if (isInOldGen()) {
|
||||
tty.print("In old generation ");
|
||||
} else if (gen != null) {
|
||||
tty.print("In Generation " + getGeneration().level());
|
||||
} else {
|
||||
tty.print("In unknown section of Java heap");
|
||||
}
|
||||
|
@ -263,14 +263,19 @@ endif
|
||||
$(DtraceOutDir):
|
||||
mkdir $(DtraceOutDir)
|
||||
|
||||
# When building using a devkit, dtrace cannot find the correct preprocessor so
|
||||
# we run it explicitly before runing dtrace.
|
||||
$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
|
||||
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
|
||||
$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot.d > $(DtraceOutDir)/hotspot.d
|
||||
$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot.d
|
||||
|
||||
$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
|
||||
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
|
||||
$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d > $(DtraceOutDir)/hotspot_jni.d
|
||||
$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot_jni.d
|
||||
|
||||
$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
|
||||
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
|
||||
$(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hs_private.d > $(DtraceOutDir)/hs_private.d
|
||||
$(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hs_private.d
|
||||
|
||||
dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
|
||||
|
||||
|
@ -56,13 +56,14 @@ all_debug_universal:
|
||||
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
|
||||
$(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
|
||||
|
||||
LIPO ?= lipo
|
||||
|
||||
# Package built libraries in a universal binary
|
||||
$(UNIVERSAL_LIPO_LIST):
|
||||
BUILT_LIPO_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
|
||||
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
|
||||
$(LIPO) -create -output $@ $${BUILT_LIPO_FILES}; \
|
||||
fi
|
||||
|
||||
|
||||
|
@ -44,6 +44,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/x86/*.java \
|
||||
@ -55,6 +56,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/*.java \
|
||||
@ -63,6 +65,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/*.java \
|
||||
@ -70,6 +73,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \
|
||||
@ -92,11 +96,13 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/opto/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/prims/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd_amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/bsd_x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_aarch64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_ppc64/*.java \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -526,16 +526,6 @@ frame frame::sender(RegisterMap* map) const {
|
||||
return frame(sender_sp(), link(), sender_pc());
|
||||
}
|
||||
|
||||
bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
|
||||
assert(is_interpreted_frame(), "must be interpreter frame");
|
||||
Method* method = interpreter_frame_method();
|
||||
// When unpacking an optimized frame the frame pointer is
|
||||
// adjusted with:
|
||||
int diff = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
return _fp == (fp - diff);
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
// QQQ
|
||||
#ifdef CC_INTERP
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,7 +84,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
#ifdef BUILTIN_SIM
|
||||
#define UseBuiltinSim true
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, NotifySimulator, UseBuiltinSim, \
|
||||
"tell the AArch64 sim where we are in method code") \
|
||||
@ -112,7 +112,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
#define NotifySimulator false
|
||||
#define UseSimulatorCache false
|
||||
#define DisableBCCheck true
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, NearCpool, true, \
|
||||
"constant pool is close to instructions") \
|
||||
|
@ -2888,41 +2888,40 @@ void MacroAssembler::cmpptr(Register src1, Address src2) {
|
||||
cmp(src1, rscratch1);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check(Register obj) {
|
||||
// Does a store check for the oop in register obj. The content of
|
||||
// register obj is destroyed afterwards.
|
||||
store_check_part_1(obj);
|
||||
store_check_part_2(obj);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check(Register obj, Address dst) {
|
||||
store_check(obj);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check(Register obj) {
|
||||
// Does a store check for the oop in register obj. The content of
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
// split the store check operation so that other instructions can be scheduled inbetween
|
||||
void MacroAssembler::store_check_part_1(Register obj) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
lsr(obj, obj, CardTableModRefBS::card_shift);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check_part_2(Register obj) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
// The calculation for byte_map_base is as follows:
|
||||
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
||||
// So this essentially converts an address to a displacement and
|
||||
// it will never need to be relocated.
|
||||
lsr(obj, obj, CardTableModRefBS::card_shift);
|
||||
|
||||
// FIXME: It's not likely that disp will fit into an offset so we
|
||||
// don't bother to check, but it could save an instruction.
|
||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
||||
mov(rscratch1, disp);
|
||||
strb(zr, Address(obj, rscratch1));
|
||||
assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
|
||||
|
||||
{
|
||||
ExternalAddress cardtable((address) ct->byte_map_base);
|
||||
unsigned long offset;
|
||||
adrp(rscratch1, cardtable, offset);
|
||||
assert(offset == 0, "byte_map_base is misaligned");
|
||||
}
|
||||
|
||||
if (UseCondCardMark) {
|
||||
Label L_already_dirty;
|
||||
ldrb(rscratch2, Address(obj, rscratch1));
|
||||
cbz(rscratch2, L_already_dirty);
|
||||
strb(zr, Address(obj, rscratch1));
|
||||
bind(L_already_dirty);
|
||||
} else {
|
||||
strb(zr, Address(obj, rscratch1));
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
|
@ -756,10 +756,6 @@ public:
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// split store_check(Register obj) to enhance instruction interleaving
|
||||
void store_check_part_1(Register obj);
|
||||
void store_check_part_2(Register obj);
|
||||
|
||||
// oop manipulations
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst, Register src);
|
||||
|
@ -63,7 +63,7 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CM
|
||||
define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
|
||||
// Platform dependent flag handling: flags only defined on this platform.
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
/* Load poll address from thread. This is used to implement per-thread */ \
|
||||
/* safepoints on platforms != IA64. */ \
|
||||
|
@ -510,7 +510,8 @@ void VM_Version::determine_section_size() {
|
||||
|
||||
void VM_Version::determine_features() {
|
||||
#if defined(ABI_ELFv2)
|
||||
const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
|
||||
// 1 InstWord per call for the blr instruction.
|
||||
const int code_size = (num_features+1+2*1)*BytesPerInstWord;
|
||||
#else
|
||||
// 7 InstWords for each call (function descriptor + blr instruction).
|
||||
const int code_size = (num_features+1+2*7)*BytesPerInstWord;
|
||||
@ -545,7 +546,8 @@ void VM_Version::determine_features() {
|
||||
a->popcntw(R7, R5); // code[6] -> popcntw
|
||||
a->fcfids(F3, F4); // code[7] -> fcfids
|
||||
a->vand(VR0, VR0, VR0); // code[8] -> vand
|
||||
a->lqarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
|
||||
// arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
|
||||
a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
|
||||
a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
|
||||
a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
|
||||
a->tcheck(0); // code[12] -> tcheck
|
||||
@ -577,7 +579,8 @@ void VM_Version::determine_features() {
|
||||
|
||||
// Execute code. Illegal instructions will be replaced by 0 in the signal handler.
|
||||
VM_Version::_is_determine_features_test_running = true;
|
||||
(*test)((address)mid_of_test_area, (uint64_t)0);
|
||||
// We must align the first argument to 16 bytes because of the lqarx check.
|
||||
(*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
|
||||
VM_Version::_is_determine_features_test_running = false;
|
||||
|
||||
// determine which instructions are legal.
|
||||
@ -619,12 +622,12 @@ void VM_Version::config_dscr() {
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
// Emit code.
|
||||
uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->emit_fd();
|
||||
uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
a->mfdscr(R3);
|
||||
a->blr();
|
||||
|
||||
void (*set_dscr)(long) = (void(*)(long))(void *)a->emit_fd();
|
||||
void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
|
||||
a->mtdscr(R3);
|
||||
a->blr();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -599,12 +599,6 @@ bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
|
||||
return next_younger_sp_or_null(valid_sp, sp) != NULL;
|
||||
}
|
||||
|
||||
|
||||
bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
|
||||
assert(is_interpreted_frame(), "must be interpreter frame");
|
||||
return this->fp() == fp;
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
#ifdef CC_INTERP
|
||||
// Is there anything to do?
|
||||
|
@ -81,7 +81,7 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CM
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
product(intx, UseVIS, 99, \
|
||||
"Highest supported VIS instructions set on Sparc") \
|
||||
|
@ -524,17 +524,6 @@ frame frame::sender(RegisterMap* map) const {
|
||||
return frame(sender_sp(), link(), sender_pc());
|
||||
}
|
||||
|
||||
|
||||
bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
|
||||
assert(is_interpreted_frame(), "must be interpreter frame");
|
||||
Method* method = interpreter_frame_method();
|
||||
// When unpacking an optimized frame the frame pointer is
|
||||
// adjusted with:
|
||||
int diff = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
return _fp == (fp - diff);
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
// QQQ
|
||||
#ifdef CC_INTERP
|
||||
|
@ -84,7 +84,7 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
|
||||
define_pd_global(bool, PreserveFramePointer, false);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
develop(bool, IEEEPrecision, true, \
|
||||
"Enables IEEE precision (for INTEL only)") \
|
||||
|
@ -4260,31 +4260,24 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
void MacroAssembler::store_check(Register obj) {
|
||||
// Does a store check for the oop in register obj. The content of
|
||||
// register obj is destroyed afterwards.
|
||||
store_check_part_1(obj);
|
||||
store_check_part_2(obj);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check(Register obj, Address dst) {
|
||||
store_check(obj);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check(Register obj) {
|
||||
// Does a store check for the oop in register obj. The content of
|
||||
// register obj is destroyed afterwards.
|
||||
|
||||
// split the store check operation so that other instructions can be scheduled inbetween
|
||||
void MacroAssembler::store_check_part_1(Register obj) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
shrptr(obj, CardTableModRefBS::card_shift);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_check_part_2(Register obj) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
shrptr(obj, CardTableModRefBS::card_shift);
|
||||
|
||||
Address card_addr;
|
||||
|
||||
// The calculation for byte_map_base is as follows:
|
||||
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
||||
// So this essentially converts an address to a displacement and it will
|
||||
@ -4292,8 +4285,7 @@ void MacroAssembler::store_check_part_2(Register obj) {
|
||||
// large for a 32bit displacement.
|
||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
||||
if (is_simm32(disp)) {
|
||||
Address cardtable(noreg, obj, Address::times_1, disp);
|
||||
movb(cardtable, 0);
|
||||
card_addr = Address(noreg, obj, Address::times_1, disp);
|
||||
} else {
|
||||
// By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
|
||||
// displacement and done in a single instruction given favorable mapping and a
|
||||
@ -4301,7 +4293,21 @@ void MacroAssembler::store_check_part_2(Register obj) {
|
||||
// entry and that entry is not properly handled by the relocation code.
|
||||
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
|
||||
Address index(noreg, obj, Address::times_1);
|
||||
movb(as_Address(ArrayAddress(cardtable, index)), 0);
|
||||
card_addr = as_Address(ArrayAddress(cardtable, index));
|
||||
}
|
||||
|
||||
int dirty = CardTableModRefBS::dirty_card_val();
|
||||
if (UseCondCardMark) {
|
||||
Label L_already_dirty;
|
||||
if (UseConcMarkSweepGC) {
|
||||
membar(Assembler::StoreLoad);
|
||||
}
|
||||
cmpb(card_addr, dirty);
|
||||
jcc(Assembler::equal, L_already_dirty);
|
||||
movb(card_addr, dirty);
|
||||
bind(L_already_dirty);
|
||||
} else {
|
||||
movb(card_addr, dirty);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -315,10 +315,6 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// split store_check(Register obj) to enhance instruction interleaving
|
||||
void store_check_part_1(Register obj);
|
||||
void store_check_part_2(Register obj);
|
||||
|
||||
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
|
||||
void c2bool(Register x);
|
||||
|
||||
|
@ -63,7 +63,8 @@ define_pd_global(uintx, TypeProfileLevel, 0);
|
||||
|
||||
define_pd_global(bool, PreserveFramePointer, false);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, UseFastEmptyMethods, true, \
|
||||
"Use fast method entry code for empty methods") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -38,8 +38,8 @@ class AIXDecoder: public AbstractDecoder {
|
||||
|
||||
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // demangled by getFuncName
|
||||
|
||||
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
|
||||
return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0) == 0);
|
||||
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
|
||||
return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0, demangle) == 0);
|
||||
}
|
||||
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
|
||||
ShouldNotReachHere();
|
||||
|
@ -29,7 +29,7 @@
|
||||
//
|
||||
// Defines Aix specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
/* Use 64K pages for virtual memory (shmat). */ \
|
||||
product(bool, Use64KPages, true, \
|
||||
|
@ -1439,7 +1439,8 @@ static address resolve_function_descriptor_to_code_pointer(address p) {
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
if (offset) {
|
||||
*offset = -1;
|
||||
}
|
||||
@ -1454,7 +1455,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
}
|
||||
|
||||
// Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
|
||||
return Decoder::decode(addr, buf, buflen, offset);
|
||||
return Decoder::decode(addr, buf, buflen, offset, demangle);
|
||||
}
|
||||
|
||||
static int getModuleName(codeptr_t pc, // [in] program counter
|
||||
@ -1653,7 +1654,7 @@ void os::print_memory_info(outputStream* st) {
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st) {
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// cpu
|
||||
st->print("CPU:");
|
||||
st->print("total %d", os::processor_count());
|
||||
@ -3761,10 +3762,6 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
|
||||
return fetcher.result();
|
||||
}
|
||||
|
||||
// Not neede on Aix.
|
||||
// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
|
||||
// }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// debug support
|
||||
|
||||
|
@ -114,7 +114,8 @@ extern "C" int getFuncName(
|
||||
int* p_displacement, // [out] optional: displacement (-1 if not available)
|
||||
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further
|
||||
// information (NULL if not available)
|
||||
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
|
||||
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
|
||||
bool demangle // [in] whether to demangle the name
|
||||
) {
|
||||
struct tbtable* tb = 0;
|
||||
unsigned int searchcount = 0;
|
||||
@ -216,15 +217,17 @@ extern "C" int getFuncName(
|
||||
p_name[0] = '\0';
|
||||
|
||||
// If it is a C++ name, try and demangle it using the Demangle interface (see demangle.h).
|
||||
char* rest;
|
||||
Name* const name = Demangle(buf, rest);
|
||||
if (name) {
|
||||
const char* const demangled_name = name->Text();
|
||||
if (demangled_name) {
|
||||
strncpy(p_name, demangled_name, namelen-1);
|
||||
p_name[namelen-1] = '\0';
|
||||
if (demangle) {
|
||||
char* rest;
|
||||
Name* const name = Demangle(buf, rest);
|
||||
if (name) {
|
||||
const char* const demangled_name = name->Text();
|
||||
if (demangled_name) {
|
||||
strncpy(p_name, demangled_name, namelen-1);
|
||||
p_name[namelen-1] = '\0';
|
||||
}
|
||||
delete name;
|
||||
}
|
||||
delete name;
|
||||
}
|
||||
|
||||
// Fallback: if demangling did not work, just provide the unmangled name.
|
||||
@ -325,7 +328,7 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
int displacement = 0;
|
||||
|
||||
if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
|
||||
NULL, NULL, 0) == 0) {
|
||||
NULL, NULL, 0, true /* demangle */) == 0) {
|
||||
if (funcname[0] != '\0') {
|
||||
const char* const interned = dladdr_fixed_strings.intern(funcname);
|
||||
info->dli_sname = interned;
|
||||
|
@ -87,7 +87,8 @@ int getFuncName(
|
||||
char* p_name, size_t namelen, // [out] optional: user provided buffer for the function name
|
||||
int* p_displacement, // [out] optional: displacement
|
||||
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
|
||||
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
|
||||
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
|
||||
bool demangle = true // [in] whether to demangle the name
|
||||
);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ class MachODecoder : public AbstractDecoder {
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const void* base);
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const char* module_path = NULL) {
|
||||
const char* module_path, bool demangle) {
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,19 +28,20 @@
|
||||
//
|
||||
// Defines Bsd specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
product(bool, UseOprofile, false, \
|
||||
"enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseBsdPosixThreadCPUClocks, true, \
|
||||
"enable fast Bsd Posix clocks where available") \
|
||||
/* NB: The default value of UseBsdPosixThreadCPUClocks may be \
|
||||
overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
\
|
||||
product(bool, UseHugeTLBFS, false, \
|
||||
"Use MAP_HUGETLB for large pages") \
|
||||
\
|
||||
product(bool, UseSHM, false, \
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, UseOprofile, false, \
|
||||
"enable support for Oprofile profiler") \
|
||||
\
|
||||
/* NB: The default value of UseBsdPosixThreadCPUClocks may be */ \
|
||||
/* overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
product(bool, UseBsdPosixThreadCPUClocks, true, \
|
||||
"enable fast Bsd Posix clocks where available") \
|
||||
\
|
||||
product(bool, UseHugeTLBFS, false, \
|
||||
"Use MAP_HUGETLB for large pages") \
|
||||
\
|
||||
product(bool, UseSHM, false, \
|
||||
"Use SYSV shared memory for large pages")
|
||||
|
||||
//
|
||||
|
@ -637,11 +637,6 @@ void os::Bsd::hotspot_sigmask(Thread* thread) {
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// create new thread
|
||||
|
||||
// check if it's safe to start a new thread
|
||||
static bool _thread_safety_check(Thread* thread) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
// library handle for calling objc_registerThreadWithCollector()
|
||||
// without static linking to the libobjc library
|
||||
@ -681,15 +676,6 @@ static void *java_start(Thread *thread) {
|
||||
OSThread* osthread = thread->osthread();
|
||||
Monitor* sync = osthread->startThread_lock();
|
||||
|
||||
// non floating stack BsdThreads needs extra check, see above
|
||||
if (!_thread_safety_check(thread)) {
|
||||
// notify parent thread
|
||||
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
|
||||
osthread->set_state(ZOMBIE);
|
||||
sync->notify_all();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
osthread->set_thread_id(os::Bsd::gettid());
|
||||
|
||||
#ifdef __APPLE__
|
||||
@ -1339,7 +1325,8 @@ bool os::address_is_in_vm(address addr) {
|
||||
#define MACH_MAXSYMLEN 256
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1349,7 +1336,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void*)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1358,15 +1345,16 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle non-dynamic manually:
|
||||
if (dlinfo.dli_fbase != NULL &&
|
||||
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
|
||||
if (!Decoder::demangle(localbuf, buf, buflen)) {
|
||||
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
|
||||
dlinfo.dli_fbase)) {
|
||||
if (!(demangle && Decoder::demangle(localbuf, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", localbuf);
|
||||
}
|
||||
return true;
|
||||
@ -1706,7 +1694,7 @@ void os::print_os_info(outputStream* st) {
|
||||
os::Posix::print_load_average(st);
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st) {
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do for now.
|
||||
}
|
||||
|
||||
@ -2276,8 +2264,6 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
static address _highest_vm_reserved_address = NULL;
|
||||
|
||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||
// location, however, they will be overwritten. If 'fixed' is false,
|
||||
@ -2300,23 +2286,9 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
|
||||
if (addr != MAP_FAILED) {
|
||||
// anon_mmap() should only get called during VM initialization,
|
||||
// don't need lock (actually we can skip locking even it can be called
|
||||
// from multiple threads, because _highest_vm_reserved_address is just a
|
||||
// hint about the upper limit of non-stack memory regions.)
|
||||
if ((address)addr + bytes > _highest_vm_reserved_address) {
|
||||
_highest_vm_reserved_address = (address)addr + bytes;
|
||||
}
|
||||
}
|
||||
|
||||
return addr == MAP_FAILED ? NULL : addr;
|
||||
}
|
||||
|
||||
// Don't update _highest_vm_reserved_address, because there might be memory
|
||||
// regions above addr + size. If so, releasing a memory region only creates
|
||||
// a hole in the address space, it doesn't help prevent heap-stack collision.
|
||||
//
|
||||
static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
@ -2490,15 +2462,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
|
||||
|
||||
// Repeatedly allocate blocks until the block is allocated at the
|
||||
// right spot. Give up after max_tries. Note that reserve_memory() will
|
||||
// automatically update _highest_vm_reserved_address if the call is
|
||||
// successful. The variable tracks the highest memory address every reserved
|
||||
// by JVM. It is used to detect heap-stack collision if running with
|
||||
// fixed-stack BsdThreads. Because here we may attempt to reserve more
|
||||
// space than needed, it could confuse the collision detecting code. To
|
||||
// solve the problem, save current _highest_vm_reserved_address and
|
||||
// calculate the correct value before return.
|
||||
address old_highest = _highest_vm_reserved_address;
|
||||
// right spot.
|
||||
|
||||
// Bsd mmap allows caller to pass an address as hint; give it a try first,
|
||||
// if kernel honors the hint then we can return immediately.
|
||||
@ -2552,10 +2516,8 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
}
|
||||
|
||||
if (i < max_tries) {
|
||||
_highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
|
||||
return requested_addr;
|
||||
} else {
|
||||
_highest_vm_reserved_address = old_highest;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -3715,12 +3677,6 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
|
||||
return fetcher.result();
|
||||
}
|
||||
|
||||
int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond,
|
||||
pthread_mutex_t *_mutex,
|
||||
const struct timespec *_abstime) {
|
||||
return pthread_cond_timedwait(_cond, _mutex, _abstime);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// debug support
|
||||
|
||||
@ -4286,7 +4242,7 @@ int os::PlatformEvent::park(jlong millis) {
|
||||
// In that case, we should propagate the notify to another waiter.
|
||||
|
||||
while (_Event < 0) {
|
||||
status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
|
||||
status = pthread_cond_timedwait(_cond, _mutex, &abst);
|
||||
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
|
||||
pthread_cond_destroy(_cond);
|
||||
pthread_cond_init(_cond, NULL);
|
||||
@ -4492,7 +4448,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
if (time == 0) {
|
||||
status = pthread_cond_wait(_cond, _mutex);
|
||||
} else {
|
||||
status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &absTime);
|
||||
status = pthread_cond_timedwait(_cond, _mutex, &absTime);
|
||||
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
|
||||
pthread_cond_destroy(_cond);
|
||||
pthread_cond_init(_cond, NULL);
|
||||
|
@ -30,9 +30,6 @@
|
||||
// Information about the protection of the page at address '0' on this os.
|
||||
static bool zero_page_read_protected() { return true; }
|
||||
|
||||
// pthread_getattr_np comes with BsdThreads-0.9-7 on RedHat 7.1
|
||||
typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
|
||||
|
||||
#ifdef __APPLE__
|
||||
// Mac OS X doesn't support clock_gettime. Stub out the type, it is
|
||||
// unused
|
||||
@ -145,9 +142,6 @@ class Bsd {
|
||||
|
||||
// none present
|
||||
|
||||
// BsdThreads work-around for 6292965
|
||||
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
|
||||
|
||||
private:
|
||||
typedef int (*sched_getcpu_func_t)(void);
|
||||
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,14 +28,15 @@
|
||||
//
|
||||
// Defines Linux specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, UseOprofile, false, \
|
||||
"enable support for Oprofile profiler") \
|
||||
\
|
||||
/* NB: The default value of UseLinuxPosixThreadCPUClocks may be */ \
|
||||
/* overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
product(bool, UseLinuxPosixThreadCPUClocks, true, \
|
||||
"enable fast Linux Posix clocks where available") \
|
||||
/* NB: The default value of UseLinuxPosixThreadCPUClocks may be \
|
||||
overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
\
|
||||
product(bool, UseHugeTLBFS, false, \
|
||||
"Use MAP_HUGETLB for large pages") \
|
||||
|
@ -135,8 +135,6 @@ Mutex* os::Linux::_createThread_lock = NULL;
|
||||
pthread_t os::Linux::_main_thread;
|
||||
int os::Linux::_page_size = -1;
|
||||
const int os::Linux::_vm_default_page_size = (8 * K);
|
||||
bool os::Linux::_is_floating_stack = false;
|
||||
bool os::Linux::_is_NPTL = false;
|
||||
bool os::Linux::_supports_fast_thread_cpu_time = false;
|
||||
const char * os::Linux::_glibc_version = NULL;
|
||||
const char * os::Linux::_libpthread_version = NULL;
|
||||
@ -150,8 +148,6 @@ static int clock_tics_per_sec = 100;
|
||||
static sigset_t check_signal_done;
|
||||
static bool check_signals = true;
|
||||
|
||||
static pid_t _initial_pid = 0;
|
||||
|
||||
// Signal number used to suspend/resume a thread
|
||||
|
||||
// do not use any signal number less than SIGSEGV, see 4355769
|
||||
@ -223,18 +219,10 @@ static char cpu_arch[] = HOTSPOT_LIB_ARCH;
|
||||
//
|
||||
// Returns the kernel thread id of the currently running thread. Kernel
|
||||
// thread id is used to access /proc.
|
||||
//
|
||||
// (Note that getpid() on LinuxThreads returns kernel thread id too; but
|
||||
// on NPTL, it returns the same pid for all threads, as required by POSIX.)
|
||||
//
|
||||
pid_t os::Linux::gettid() {
|
||||
int rslt = syscall(SYS_gettid);
|
||||
if (rslt == -1) {
|
||||
// old kernel, no NPTL support
|
||||
return getpid();
|
||||
} else {
|
||||
return (pid_t)rslt;
|
||||
}
|
||||
assert(rslt != -1, "must be."); // old linuxthreads implementation?
|
||||
return (pid_t)rslt;
|
||||
}
|
||||
|
||||
// Most versions of linux have a bug where the number of processors are
|
||||
@ -508,68 +496,48 @@ void os::Linux::hotspot_sigmask(Thread* thread) {
|
||||
// detecting pthread library
|
||||
|
||||
void os::Linux::libpthread_init() {
|
||||
// Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
|
||||
// and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
|
||||
// generic name for earlier versions.
|
||||
// Define macros here so we can build HotSpot on old systems.
|
||||
#ifndef _CS_GNU_LIBC_VERSION
|
||||
#define _CS_GNU_LIBC_VERSION 2
|
||||
#endif
|
||||
#ifndef _CS_GNU_LIBPTHREAD_VERSION
|
||||
#define _CS_GNU_LIBPTHREAD_VERSION 3
|
||||
// Save glibc and pthread version strings.
|
||||
#if !defined(_CS_GNU_LIBC_VERSION) || \
|
||||
!defined(_CS_GNU_LIBPTHREAD_VERSION)
|
||||
#error "glibc too old (< 2.3.2)"
|
||||
#endif
|
||||
|
||||
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
|
||||
if (n > 0) {
|
||||
char *str = (char *)malloc(n, mtInternal);
|
||||
confstr(_CS_GNU_LIBC_VERSION, str, n);
|
||||
os::Linux::set_glibc_version(str);
|
||||
} else {
|
||||
// _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
|
||||
static char _gnu_libc_version[32];
|
||||
jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
|
||||
"glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
|
||||
os::Linux::set_glibc_version(_gnu_libc_version);
|
||||
}
|
||||
assert(n > 0, "cannot retrieve glibc version");
|
||||
char *str = (char *)malloc(n, mtInternal);
|
||||
confstr(_CS_GNU_LIBC_VERSION, str, n);
|
||||
os::Linux::set_glibc_version(str);
|
||||
|
||||
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
|
||||
if (n > 0) {
|
||||
char *str = (char *)malloc(n, mtInternal);
|
||||
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
|
||||
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
|
||||
// us "NPTL-0.29" even we are running with LinuxThreads. Check if this
|
||||
// is the case. LinuxThreads has a hard limit on max number of threads.
|
||||
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
|
||||
// On the other hand, NPTL does not have such a limit, sysconf()
|
||||
// will return -1 and errno is not changed. Check if it is really NPTL.
|
||||
if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
|
||||
strstr(str, "NPTL") &&
|
||||
sysconf(_SC_THREAD_THREADS_MAX) > 0) {
|
||||
free(str);
|
||||
os::Linux::set_libpthread_version("linuxthreads");
|
||||
} else {
|
||||
os::Linux::set_libpthread_version(str);
|
||||
}
|
||||
} else {
|
||||
// glibc before 2.3.2 only has LinuxThreads.
|
||||
os::Linux::set_libpthread_version("linuxthreads");
|
||||
}
|
||||
|
||||
if (strstr(libpthread_version(), "NPTL")) {
|
||||
os::Linux::set_is_NPTL();
|
||||
} else {
|
||||
os::Linux::set_is_LinuxThreads();
|
||||
}
|
||||
|
||||
// LinuxThreads have two flavors: floating-stack mode, which allows variable
|
||||
// stack size; and fixed-stack mode. NPTL is always floating-stack.
|
||||
if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
|
||||
os::Linux::set_is_floating_stack();
|
||||
}
|
||||
assert(n > 0, "cannot retrieve pthread version");
|
||||
str = (char *)malloc(n, mtInternal);
|
||||
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
|
||||
os::Linux::set_libpthread_version(str);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
// thread stack expansion
|
||||
|
||||
// os::Linux::manually_expand_stack() takes care of expanding the thread
|
||||
// stack. Note that this is normally not needed: pthread stacks allocate
|
||||
// thread stack using mmap() without MAP_NORESERVE, so the stack is already
|
||||
// committed. Therefore it is not necessary to expand the stack manually.
|
||||
//
|
||||
// Manually expanding the stack was historically needed on LinuxThreads
|
||||
// thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays
|
||||
// it is kept to deal with very rare corner cases:
|
||||
//
|
||||
// For one, user may run the VM on an own implementation of threads
|
||||
// whose stacks are - like the old LinuxThreads - implemented using
|
||||
// mmap(MAP_GROWSDOWN).
|
||||
//
|
||||
// Also, this coding may be needed if the VM is running on the primordial
|
||||
// thread. Normally we avoid running on the primordial thread; however,
|
||||
// user may still invoke the VM on the primordial thread.
|
||||
//
|
||||
// The following historical comment describes the details about running
|
||||
// on a thread stack allocated with mmap(MAP_GROWSDOWN):
|
||||
|
||||
|
||||
// Force Linux kernel to expand current thread stack. If "bottom" is close
|
||||
// to the stack guard, caller should block all signals.
|
||||
@ -593,10 +561,7 @@ void os::Linux::libpthread_init() {
|
||||
// stack overflow detection.
|
||||
//
|
||||
// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
|
||||
// not use this flag. However, the stack of initial thread is not created
|
||||
// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
|
||||
// unlikely) that user code can create a thread with MAP_GROWSDOWN stack
|
||||
// and then attach the thread to JVM.
|
||||
// not use MAP_GROWSDOWN.
|
||||
//
|
||||
// To get around the problem and allow stack banging on Linux, we need to
|
||||
// manually expand thread stack after receiving the SIGSEGV.
|
||||
@ -671,45 +636,6 @@ bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// create new thread
|
||||
|
||||
static address highest_vm_reserved_address();
|
||||
|
||||
// check if it's safe to start a new thread
|
||||
static bool _thread_safety_check(Thread* thread) {
|
||||
if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
|
||||
// Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
|
||||
// Heap is mmap'ed at lower end of memory space. Thread stacks are
|
||||
// allocated (MAP_FIXED) from high address space. Every thread stack
|
||||
// occupies a fixed size slot (usually 2Mbytes, but user can change
|
||||
// it to other values if they rebuild LinuxThreads).
|
||||
//
|
||||
// Problem with MAP_FIXED is that mmap() can still succeed even part of
|
||||
// the memory region has already been mmap'ed. That means if we have too
|
||||
// many threads and/or very large heap, eventually thread stack will
|
||||
// collide with heap.
|
||||
//
|
||||
// Here we try to prevent heap/stack collision by comparing current
|
||||
// stack bottom with the highest address that has been mmap'ed by JVM
|
||||
// plus a safety margin for memory maps created by native code.
|
||||
//
|
||||
// This feature can be disabled by setting ThreadSafetyMargin to 0
|
||||
//
|
||||
if (ThreadSafetyMargin > 0) {
|
||||
address stack_bottom = os::current_stack_base() - os::current_stack_size();
|
||||
|
||||
// not safe if our stack extends below the safety margin
|
||||
return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
// Floating stack LinuxThreads or NPTL:
|
||||
// Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
|
||||
// there's not enough space left, pthread_create() will fail. If we come
|
||||
// here, that means enough space has been reserved for stack.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Thread start routine for all newly created threads
|
||||
static void *java_start(Thread *thread) {
|
||||
// Try to randomize the cache line index of hot stack frames.
|
||||
@ -726,15 +652,6 @@ static void *java_start(Thread *thread) {
|
||||
OSThread* osthread = thread->osthread();
|
||||
Monitor* sync = osthread->startThread_lock();
|
||||
|
||||
// non floating stack LinuxThreads needs extra check, see above
|
||||
if (!_thread_safety_check(thread)) {
|
||||
// notify parent thread
|
||||
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
|
||||
osthread->set_state(ZOMBIE);
|
||||
sync->notify_all();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// thread_id is kernel thread id (similar to Solaris LWP id)
|
||||
osthread->set_thread_id(os::Linux::gettid());
|
||||
|
||||
@ -833,12 +750,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
ThreadState state;
|
||||
|
||||
{
|
||||
// Serialize thread creation if we are running with fixed stack LinuxThreads
|
||||
bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
|
||||
if (lock) {
|
||||
os::Linux::createThread_lock()->lock_without_safepoint_check();
|
||||
}
|
||||
|
||||
pthread_t tid;
|
||||
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
|
||||
|
||||
@ -851,7 +762,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
// Need to clean up stuff we've allocated so far
|
||||
thread->set_osthread(NULL);
|
||||
delete osthread;
|
||||
if (lock) os::Linux::createThread_lock()->unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -866,10 +776,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
||||
if (lock) {
|
||||
os::Linux::createThread_lock()->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Aborted due to thread limit being reached
|
||||
@ -1497,7 +1403,6 @@ void os::abort(bool dump_core, void* siginfo, void* context) {
|
||||
|
||||
// Die immediately, no exit hook, no abort hook, no cleanup.
|
||||
void os::die() {
|
||||
// _exit() on LinuxThreads only kills current thread
|
||||
::abort();
|
||||
}
|
||||
|
||||
@ -1520,24 +1425,7 @@ size_t os::lasterror(char *buf, size_t len) {
|
||||
|
||||
intx os::current_thread_id() { return (intx)pthread_self(); }
|
||||
int os::current_process_id() {
|
||||
|
||||
// Under the old linux thread library, linux gives each thread
|
||||
// its own process id. Because of this each thread will return
|
||||
// a different pid if this method were to return the result
|
||||
// of getpid(2). Linux provides no api that returns the pid
|
||||
// of the launcher thread for the vm. This implementation
|
||||
// returns a unique pid, the pid of the launcher thread
|
||||
// that starts the vm 'process'.
|
||||
|
||||
// Under the NPTL, getpid() returns the same pid as the
|
||||
// launcher thread rather than a unique pid per thread.
|
||||
// Use gettid() if you want the old pre NPTL behaviour.
|
||||
|
||||
// if you are looking for the result of a call to getpid() that
|
||||
// returns a unique pid for the calling thread, then look at the
|
||||
// OSThread::thread_id() method in osThread_linux.hpp file
|
||||
|
||||
return (int)(_initial_pid ? _initial_pid : getpid());
|
||||
return ::getpid();
|
||||
}
|
||||
|
||||
// DLL functions
|
||||
@ -1623,7 +1511,8 @@ bool os::address_is_in_vm(address addr) {
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1632,7 +1521,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void*)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1641,7 +1530,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2183,9 +2072,6 @@ void os::Linux::print_libversion_info(outputStream* st) {
|
||||
st->print("libc:");
|
||||
st->print("%s ", os::Linux::glibc_version());
|
||||
st->print("%s ", os::Linux::libpthread_version());
|
||||
if (os::Linux::is_LinuxThreads()) {
|
||||
st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
@ -2215,12 +2101,52 @@ void os::print_memory_info(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st) {
|
||||
st->print("\n/proc/cpuinfo:\n");
|
||||
if (!_print_ascii_file("/proc/cpuinfo", st)) {
|
||||
st->print(" <Not Available>");
|
||||
// Print the first "model name" line and the first "flags" line
|
||||
// that we find and nothing more. We assume "model name" comes
|
||||
// before "flags" so if we find a second "model name", then the
|
||||
// "flags" field is considered missing.
|
||||
static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
|
||||
#if defined(IA32) || defined(AMD64)
|
||||
// Other platforms have less repetitive cpuinfo files
|
||||
FILE *fp = fopen("/proc/cpuinfo", "r");
|
||||
if (fp) {
|
||||
while (!feof(fp)) {
|
||||
if (fgets(buf, buflen, fp)) {
|
||||
// Assume model name comes before flags
|
||||
bool model_name_printed = false;
|
||||
if (strstr(buf, "model name") != NULL) {
|
||||
if (!model_name_printed) {
|
||||
st->print_raw("\nCPU Model and flags from /proc/cpuinfo:\n");
|
||||
st->print_raw(buf);
|
||||
model_name_printed = true;
|
||||
} else {
|
||||
// model name printed but not flags? Odd, just return
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// print the flags line too
|
||||
if (strstr(buf, "flags") != NULL) {
|
||||
st->print_raw(buf);
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
#endif // x86 platforms
|
||||
return false;
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Only print the model name if the platform provides this as a summary
|
||||
if (!print_model_name_and_flags(st, buf, buflen)) {
|
||||
st->print("\n/proc/cpuinfo:\n");
|
||||
if (!_print_ascii_file("/proc/cpuinfo", st)) {
|
||||
st->print_cr(" <Not Available>");
|
||||
}
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::print_siginfo(outputStream* st, void* siginfo) {
|
||||
@ -3044,8 +2970,6 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
static address _highest_vm_reserved_address = NULL;
|
||||
|
||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||
// location, however, they will be overwritten. If 'fixed' is false,
|
||||
@ -3068,23 +2992,9 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
|
||||
if (addr != MAP_FAILED) {
|
||||
// anon_mmap() should only get called during VM initialization,
|
||||
// don't need lock (actually we can skip locking even it can be called
|
||||
// from multiple threads, because _highest_vm_reserved_address is just a
|
||||
// hint about the upper limit of non-stack memory regions.)
|
||||
if ((address)addr + bytes > _highest_vm_reserved_address) {
|
||||
_highest_vm_reserved_address = (address)addr + bytes;
|
||||
}
|
||||
}
|
||||
|
||||
return addr == MAP_FAILED ? NULL : addr;
|
||||
}
|
||||
|
||||
// Don't update _highest_vm_reserved_address, because there might be memory
|
||||
// regions above addr + size. If so, releasing a memory region only creates
|
||||
// a hole in the address space, it doesn't help prevent heap-stack collision.
|
||||
//
|
||||
static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
@ -3098,10 +3008,6 @@ bool os::pd_release_memory(char* addr, size_t size) {
|
||||
return anon_munmap(addr, size);
|
||||
}
|
||||
|
||||
static address highest_vm_reserved_address() {
|
||||
return _highest_vm_reserved_address;
|
||||
}
|
||||
|
||||
static bool linux_mprotect(char* addr, size_t size, int prot) {
|
||||
// Linux wants the mprotect address argument to be page aligned.
|
||||
char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
|
||||
@ -3718,15 +3624,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
|
||||
|
||||
// Repeatedly allocate blocks until the block is allocated at the
|
||||
// right spot. Give up after max_tries. Note that reserve_memory() will
|
||||
// automatically update _highest_vm_reserved_address if the call is
|
||||
// successful. The variable tracks the highest memory address every reserved
|
||||
// by JVM. It is used to detect heap-stack collision if running with
|
||||
// fixed-stack LinuxThreads. Because here we may attempt to reserve more
|
||||
// space than needed, it could confuse the collision detecting code. To
|
||||
// solve the problem, save current _highest_vm_reserved_address and
|
||||
// calculate the correct value before return.
|
||||
address old_highest = _highest_vm_reserved_address;
|
||||
// right spot.
|
||||
|
||||
// Linux mmap allows caller to pass an address as hint; give it a try first,
|
||||
// if kernel honors the hint then we can return immediately.
|
||||
@ -3780,10 +3678,8 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
}
|
||||
|
||||
if (i < max_tries) {
|
||||
_highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
|
||||
return requested_addr;
|
||||
} else {
|
||||
_highest_vm_reserved_address = old_highest;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -4627,16 +4523,6 @@ void os::init(void) {
|
||||
char dummy; // used to get a guess on initial stack address
|
||||
// first_hrtime = gethrtime();
|
||||
|
||||
// With LinuxThreads the JavaMain thread pid (primordial thread)
|
||||
// is different than the pid of the java launcher thread.
|
||||
// So, on Linux, the launcher thread pid is passed to the VM
|
||||
// via the sun.java.launcher.pid property.
|
||||
// Use this property instead of getpid() if it was correctly passed.
|
||||
// See bug 6351349.
|
||||
pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
|
||||
|
||||
_initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
|
||||
|
||||
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
|
||||
|
||||
init_random(1234567);
|
||||
@ -4769,9 +4655,8 @@ jint os::init_2(void) {
|
||||
|
||||
Linux::libpthread_init();
|
||||
if (PrintMiscellaneous && (Verbose || WizardMode)) {
|
||||
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
|
||||
Linux::glibc_version(), Linux::libpthread_version(),
|
||||
Linux::is_floating_stack() ? "floating stack" : "fixed stack");
|
||||
tty->print_cr("[HotSpot is running with %s, %s]\n",
|
||||
Linux::glibc_version(), Linux::libpthread_version());
|
||||
}
|
||||
|
||||
if (UseNUMA) {
|
||||
@ -4946,22 +4831,6 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
|
||||
return fetcher.result();
|
||||
}
|
||||
|
||||
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond,
|
||||
pthread_mutex_t *_mutex,
|
||||
const struct timespec *_abstime) {
|
||||
if (is_NPTL()) {
|
||||
return pthread_cond_timedwait(_cond, _mutex, _abstime);
|
||||
} else {
|
||||
// 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
|
||||
// word back to default 64bit precision if condvar is signaled. Java
|
||||
// wants 53bit precision. Save and restore current value.
|
||||
int fpu = get_fpu_control_word();
|
||||
int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
|
||||
set_fpu_control_word(fpu);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// debug support
|
||||
|
||||
@ -5585,7 +5454,7 @@ int os::PlatformEvent::park(jlong millis) {
|
||||
// In that case, we should propagate the notify to another waiter.
|
||||
|
||||
while (_Event < 0) {
|
||||
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
|
||||
status = pthread_cond_timedwait(_cond, _mutex, &abst);
|
||||
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
|
||||
pthread_cond_destroy(_cond);
|
||||
pthread_cond_init(_cond, os::Linux::condAttr());
|
||||
@ -5813,7 +5682,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
|
||||
} else {
|
||||
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
|
||||
status = os::Linux::safe_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
|
||||
status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
|
||||
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
|
||||
pthread_cond_destroy(&_cond[_cur_index]);
|
||||
pthread_cond_init(&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,9 +27,6 @@
|
||||
|
||||
// Linux_OS defines the interface to Linux operating systems
|
||||
|
||||
// pthread_getattr_np comes with LinuxThreads-0.9-7 on RedHat 7.1
|
||||
typedef int (*pthread_getattr_func_type)(pthread_t, pthread_attr_t *);
|
||||
|
||||
// Information about the protection of the page at address '0' on this os.
|
||||
static bool zero_page_read_protected() { return true; }
|
||||
|
||||
@ -63,8 +60,6 @@ class Linux {
|
||||
static const char *_glibc_version;
|
||||
static const char *_libpthread_version;
|
||||
|
||||
static bool _is_floating_stack;
|
||||
static bool _is_NPTL;
|
||||
static bool _supports_fast_thread_cpu_time;
|
||||
|
||||
static GrowableArray<int>* _cpu_to_node;
|
||||
@ -90,10 +85,6 @@ class Linux {
|
||||
|
||||
static bool supports_variable_stack_size();
|
||||
|
||||
static void set_is_NPTL() { _is_NPTL = true; }
|
||||
static void set_is_LinuxThreads() { _is_NPTL = false; }
|
||||
static void set_is_floating_stack() { _is_floating_stack = true; }
|
||||
|
||||
static void rebuild_cpu_to_node_map();
|
||||
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
||||
|
||||
@ -178,14 +169,6 @@ class Linux {
|
||||
static const char *glibc_version() { return _glibc_version; }
|
||||
static const char *libpthread_version() { return _libpthread_version; }
|
||||
|
||||
// NPTL or LinuxThreads?
|
||||
static bool is_LinuxThreads() { return !_is_NPTL; }
|
||||
static bool is_NPTL() { return _is_NPTL; }
|
||||
|
||||
// NPTL is always floating stack. LinuxThreads could be using floating
|
||||
// stack or fixed stack.
|
||||
static bool is_floating_stack() { return _is_floating_stack; }
|
||||
|
||||
static void libpthread_init();
|
||||
static bool libnuma_init();
|
||||
static void* libnuma_dlsym(void* handle, const char* name);
|
||||
@ -234,9 +217,6 @@ class Linux {
|
||||
|
||||
// none present
|
||||
|
||||
// LinuxThreads work-around for 6292965
|
||||
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
|
||||
|
||||
private:
|
||||
typedef int (*sched_getcpu_func_t)(void);
|
||||
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
//
|
||||
// Defines Solaris specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, UseExtendedFileIO, true, \
|
||||
"Enable workaround for limitations of stdio FILE structure")
|
||||
|
@ -1627,7 +1627,8 @@ typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
|
||||
static dladdr1_func_type dladdr1_func = NULL;
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int * offset) {
|
||||
int buflen, int * offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1655,7 +1656,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dlinfo.dli_saddr != NULL &&
|
||||
(char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
|
||||
if (dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1665,7 +1666,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1679,7 +1680,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void *)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1688,7 +1689,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1996,7 +1997,7 @@ static bool check_addr0(outputStream* st) {
|
||||
return status;
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st) {
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do for now.
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -162,7 +162,7 @@ void WindowsDecoder::initialize() {
|
||||
// current function and comparing the result
|
||||
address addr = (address)Decoder::demangle;
|
||||
char buf[MAX_PATH];
|
||||
if (decode(addr, buf, sizeof(buf), NULL)) {
|
||||
if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
|
||||
}
|
||||
}
|
||||
@ -187,7 +187,7 @@ bool WindowsDecoder::can_decode_C_frame_in_vm() const {
|
||||
}
|
||||
|
||||
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath) {
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name) {
|
||||
if (_pfnSymGetSymFromAddr64 != NULL) {
|
||||
PIMAGEHLP_SYMBOL64 pSymbol;
|
||||
char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
|
||||
@ -197,7 +197,7 @@ bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, co
|
||||
DWORD64 displacement;
|
||||
if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
|
||||
if (buf != NULL) {
|
||||
if (demangle(pSymbol->Name, buf, buflen)) {
|
||||
if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", pSymbol->Name);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ public:
|
||||
|
||||
bool can_decode_C_frame_in_vm() const;
|
||||
bool demangle(const char* symbol, char *buf, int buflen);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,8 +28,7 @@
|
||||
//
|
||||
// Defines Windows specific flags. They are not available on other platforms.
|
||||
//
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, \
|
||||
diagnostic, notproduct) \
|
||||
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
product(bool, UseUTCFileTimestamp, true, \
|
||||
"Adjust the timestamp returned from stat() to be UTC")
|
||||
|
@ -1369,11 +1369,12 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
if (Decoder::decode(addr, buf, buflen, offset)) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
|
||||
return true;
|
||||
}
|
||||
if (offset != NULL) *offset = -1;
|
||||
@ -1732,7 +1733,7 @@ void os::win32::print_windows_version(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st) {
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do for now.
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file hat
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -619,54 +619,15 @@ bool os::is_allocatable(size_t bytes) {
|
||||
|
||||
#ifdef AMD64
|
||||
size_t os::Linux::min_stack_allowed = 64 * K;
|
||||
|
||||
// amd64: pthread on amd64 is always in floating stack mode
|
||||
bool os::Linux::supports_variable_stack_size() { return true; }
|
||||
#else
|
||||
size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
|
||||
#endif
|
||||
|
||||
// Test if pthread library can support variable thread stack size. LinuxThreads
|
||||
// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads
|
||||
// in floating stack mode and NPTL support variable stack size.
|
||||
bool os::Linux::supports_variable_stack_size() {
|
||||
if (os::Linux::is_NPTL()) {
|
||||
// NPTL, yes
|
||||
return true;
|
||||
|
||||
} else {
|
||||
// Note: We can't control default stack size when creating a thread.
|
||||
// If we use non-default stack size (pthread_attr_setstacksize), both
|
||||
// floating stack and non-floating stack LinuxThreads will return the
|
||||
// same value. This makes it impossible to implement this function by
|
||||
// detecting thread stack size directly.
|
||||
//
|
||||
// An alternative approach is to check %gs. Fixed-stack LinuxThreads
|
||||
// do not use %gs, so its value is 0. Floating-stack LinuxThreads use
|
||||
// %gs (either as LDT selector or GDT selector, depending on kernel)
|
||||
// to access thread specific data.
|
||||
//
|
||||
// Note that %gs is a reserved glibc register since early 2001, so
|
||||
// applications are not allowed to change its value (Ulrich Drepper from
|
||||
// Redhat confirmed that all known offenders have been modified to use
|
||||
// either %fs or TSD). In the worst case scenario, when VM is embedded in
|
||||
// a native application that plays with %gs, we might see non-zero %gs
|
||||
// even LinuxThreads is running in fixed stack mode. As the result, we'll
|
||||
// return true and skip _thread_safety_check(), so we may not be able to
|
||||
// detect stack-heap collisions. But otherwise it's harmless.
|
||||
//
|
||||
#ifdef __GNUC__
|
||||
return (GET_GS() != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif // AMD64
|
||||
|
||||
// Test if pthread library can support variable thread stack size.
|
||||
bool os::Linux::supports_variable_stack_size() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,8 +40,7 @@
|
||||
// actual memory pages are committed on demand.
|
||||
//
|
||||
// If an application creates and destroys a lot of threads, usually the
|
||||
// stack space freed by a thread will soon get reused by new thread
|
||||
// (this is especially true in NPTL or LinuxThreads in fixed-stack mode).
|
||||
// stack space freed by a thread will soon get reused by new thread.
|
||||
// No memory page in _sp_map is wasted.
|
||||
//
|
||||
// However, it's still possible that we might end up populating &
|
||||
|
@ -363,9 +363,6 @@ class CompilerInterfaceVC10 extends CompilerInterface {
|
||||
|
||||
// Set /On option
|
||||
addAttr(rv, "Optimization", opt);
|
||||
// Set /FR option.
|
||||
addAttr(rv, "BrowseInformation", "true");
|
||||
addAttr(rv, "BrowseInformationFile", "$(IntDir)");
|
||||
// Set /MD option.
|
||||
addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL");
|
||||
// Set /Oy- option
|
||||
|
@ -1619,6 +1619,9 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
|
||||
LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
|
||||
if (UseCondCardMark) {
|
||||
LIR_Opr cur_value = new_register(T_INT);
|
||||
if (UseConcMarkSweepGC) {
|
||||
__ membar_storeload();
|
||||
}
|
||||
__ move(card_addr, cur_value);
|
||||
|
||||
LabelObj* L_already_dirty = new LabelObj();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,4 +25,4 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_globals.hpp"
|
||||
|
||||
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
|
||||
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, IGNORE_RANGE, IGNORE_CONSTRAINT)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@
|
||||
//
|
||||
// Defines all global flags used by the client compiler.
|
||||
//
|
||||
#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
|
||||
#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
|
||||
\
|
||||
/* Printing */ \
|
||||
notproduct(bool, PrintC1Statistics, false, \
|
||||
@ -148,6 +148,7 @@
|
||||
\
|
||||
product(intx, ValueMapInitialSize, 11, \
|
||||
"Initial size of a value map") \
|
||||
range(1, NOT_LP64(1*K) LP64_ONLY(32*K)) \
|
||||
\
|
||||
product(intx, ValueMapMaxLoopSize, 8, \
|
||||
"maximum size of a loop optimized by global value numbering") \
|
||||
@ -191,6 +192,7 @@
|
||||
\
|
||||
develop(intx, NestedInliningSizeRatio, 90, \
|
||||
"Percentage of prev. allowed inline size in recursive inlining") \
|
||||
range(0, 100) \
|
||||
\
|
||||
notproduct(bool, PrintIRWithLIR, false, \
|
||||
"Print IR instructions with generated LIR") \
|
||||
@ -338,10 +340,15 @@
|
||||
diagnostic(bool, C1PatchInvokeDynamic, true, \
|
||||
"Patch invokedynamic appendix not known at compile time") \
|
||||
\
|
||||
|
||||
|
||||
// Read default values for c1 globals
|
||||
|
||||
C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
|
||||
C1_FLAGS(DECLARE_DEVELOPER_FLAG, \
|
||||
DECLARE_PD_DEVELOPER_FLAG, \
|
||||
DECLARE_PRODUCT_FLAG, \
|
||||
DECLARE_PD_PRODUCT_FLAG, \
|
||||
DECLARE_DIAGNOSTIC_FLAG, \
|
||||
DECLARE_NOTPRODUCT_FLAG, \
|
||||
IGNORE_RANGE, \
|
||||
IGNORE_CONSTRAINT)
|
||||
|
||||
#endif // SHARE_VM_C1_C1_GLOBALS_HPP
|
||||
|
@ -949,8 +949,7 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
|
||||
assert(runtime_visible_annotations != NULL, "null visible annotations");
|
||||
parse_annotations(runtime_visible_annotations,
|
||||
runtime_visible_annotations_length,
|
||||
parsed_annotations,
|
||||
CHECK);
|
||||
parsed_annotations);
|
||||
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
|
||||
} else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
|
||||
if (runtime_invisible_annotations_exists) {
|
||||
@ -1643,7 +1642,6 @@ int ClassFileParser::skip_annotation_value(u1* buffer, int limit, int index) {
|
||||
index = skip_annotation(buffer, limit, index);
|
||||
break;
|
||||
default:
|
||||
assert(false, "annotation tag");
|
||||
return limit; // bad tag byte
|
||||
}
|
||||
return index;
|
||||
@ -1651,8 +1649,7 @@ int ClassFileParser::skip_annotation_value(u1* buffer, int limit, int index) {
|
||||
|
||||
// Sift through annotations, looking for those significant to the VM:
|
||||
void ClassFileParser::parse_annotations(u1* buffer, int limit,
|
||||
ClassFileParser::AnnotationCollector* coll,
|
||||
TRAPS) {
|
||||
ClassFileParser::AnnotationCollector* coll) {
|
||||
// annotations := do(nann:u2) {annotation}
|
||||
int index = 0;
|
||||
if ((index += 2) >= limit) return; // read nann
|
||||
@ -2286,8 +2283,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||
runtime_visible_annotations = cfs->get_u1_buffer();
|
||||
assert(runtime_visible_annotations != NULL, "null visible annotations");
|
||||
parse_annotations(runtime_visible_annotations,
|
||||
runtime_visible_annotations_length, &parsed_annotations,
|
||||
CHECK_(nullHandle));
|
||||
runtime_visible_annotations_length, &parsed_annotations);
|
||||
cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
|
||||
} else if (method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
|
||||
if (runtime_invisible_annotations_exists) {
|
||||
@ -2951,8 +2947,7 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
|
||||
assert(runtime_visible_annotations != NULL, "null visible annotations");
|
||||
parse_annotations(runtime_visible_annotations,
|
||||
runtime_visible_annotations_length,
|
||||
parsed_annotations,
|
||||
CHECK);
|
||||
parsed_annotations);
|
||||
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
|
||||
} else if (tag == vmSymbols::tag_runtime_invisible_annotations()) {
|
||||
if (runtime_invisible_annotations_exists) {
|
||||
|
@ -295,8 +295,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
int skip_annotation_value(u1* buffer, int limit, int index);
|
||||
void parse_annotations(u1* buffer, int limit,
|
||||
/* Results (currently, only one result is supported): */
|
||||
AnnotationCollector* result,
|
||||
TRAPS);
|
||||
AnnotationCollector* result);
|
||||
|
||||
// Final setup
|
||||
unsigned int compute_oop_map_count(instanceKlassHandle super,
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
#include <sys/stat.h>
|
||||
|
||||
@ -32,11 +33,11 @@
|
||||
//
|
||||
// The compact hash table writer implementations
|
||||
//
|
||||
CompactHashtableWriter::CompactHashtableWriter(const char* table_name,
|
||||
CompactHashtableWriter::CompactHashtableWriter(int table_type,
|
||||
int num_entries,
|
||||
CompactHashtableStats* stats) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
_table_name = table_name;
|
||||
_type = table_type;
|
||||
_num_entries = num_entries;
|
||||
_num_buckets = number_of_buckets(_num_entries);
|
||||
_buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
|
||||
@ -99,7 +100,7 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
|
||||
NumberSeq* summary) {
|
||||
int index;
|
||||
juint* compact_table = p;
|
||||
// Find the start of the buckets, skip the compact_bucket_infos table
|
||||
// Compute the start of the buckets, include the compact_bucket_infos table
|
||||
// and the table end offset.
|
||||
juint offset = _num_buckets + 1;
|
||||
*first_bucket = compact_table + offset;
|
||||
@ -130,10 +131,17 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
|
||||
// Write the compact table's entries
|
||||
juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
|
||||
NumberSeq* summary) {
|
||||
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
|
||||
assert(max_delta <= 0x7fffffff, "range check");
|
||||
uintx base_address = 0;
|
||||
uintx max_delta = 0;
|
||||
int num_compact_buckets = 0;
|
||||
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
|
||||
base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
max_delta = uintx(MetaspaceShared::shared_rs()->size());
|
||||
assert(max_delta <= 0x7fffffff, "range check");
|
||||
} else {
|
||||
assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
|
||||
assert(UseCompressedOops, "UseCompressedOops is required");
|
||||
}
|
||||
|
||||
assert(p != NULL, "sanity");
|
||||
for (int index = 0; index < _num_buckets; index++) {
|
||||
@ -148,12 +156,16 @@ juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
|
||||
for (Entry* tent = _buckets[index]; tent;
|
||||
tent = tent->next()) {
|
||||
if (bucket_type == REGULAR_BUCKET_TYPE) {
|
||||
*p++ = juint(tent->hash()); // write symbol hash
|
||||
*p++ = juint(tent->hash()); // write entry hash
|
||||
}
|
||||
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
|
||||
uintx deltax = uintx(tent->value()) - base_address;
|
||||
assert(deltax < max_delta, "range check");
|
||||
juint delta = juint(deltax);
|
||||
*p++ = delta; // write entry offset
|
||||
} else {
|
||||
*p++ = oopDesc::encode_heap_oop(tent->string());
|
||||
}
|
||||
uintx deltax = uintx(tent->value()) - base_address;
|
||||
assert(deltax < max_delta, "range check");
|
||||
juint delta = juint(deltax);
|
||||
*p++ = delta; // write symbol offset
|
||||
count ++;
|
||||
}
|
||||
assert(count == _bucket_sizes[index], "sanity");
|
||||
@ -174,6 +186,10 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
|
||||
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
|
||||
// Now write the following at the beginning of the table:
|
||||
// base_address (uintx)
|
||||
// num_entries (juint)
|
||||
// num_buckets (juint)
|
||||
*p++ = high(base_address);
|
||||
*p++ = low (base_address); // base address
|
||||
*p++ = _num_entries; // number of entries in the table
|
||||
@ -191,7 +207,8 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
if (_num_entries > 0) {
|
||||
avg_cost = double(_required_bytes)/double(_num_entries);
|
||||
}
|
||||
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT, _table_name, (intptr_t)base_address);
|
||||
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
|
||||
table_name(), (intptr_t)base_address);
|
||||
tty->print_cr("Number of entries : %9d", _num_entries);
|
||||
tty->print_cr("Total bytes used : %9d", (int)((*top) - old_top));
|
||||
tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
|
||||
@ -202,12 +219,24 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
}
|
||||
}
|
||||
|
||||
const char* CompactHashtableWriter::table_name() {
|
||||
switch (_type) {
|
||||
case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol";
|
||||
case CompactHashtable<oop, char>::_string_table: return "string";
|
||||
default:
|
||||
;
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// The CompactHashtable implementation
|
||||
//
|
||||
template <class T, class N> const char* CompactHashtable<T, N>::init(const char* buffer) {
|
||||
template <class T, class N> const char* CompactHashtable<T, N>::init(
|
||||
CompactHashtableType type, const char* buffer) {
|
||||
assert(!DumpSharedSpaces, "run-time only");
|
||||
_type = type;
|
||||
juint*p = (juint*)buffer;
|
||||
juint upper = *p++;
|
||||
juint lower = *p++;
|
||||
@ -245,8 +274,34 @@ template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosur
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) {
|
||||
assert(!DumpSharedSpaces, "run-time only");
|
||||
assert(_type == _string_table || _bucket_count == 0, "sanity");
|
||||
for (juint i = 0; i < _bucket_count; i ++) {
|
||||
juint bucket_info = _buckets[i];
|
||||
juint bucket_offset = BUCKET_OFFSET(bucket_info);
|
||||
int bucket_type = BUCKET_TYPE(bucket_info);
|
||||
juint* bucket = _buckets + bucket_offset;
|
||||
juint* bucket_end = _buckets;
|
||||
|
||||
narrowOop o;
|
||||
if (bucket_type == COMPACT_BUCKET_TYPE) {
|
||||
o = (narrowOop)bucket[0];
|
||||
f->do_oop(&o);
|
||||
} else {
|
||||
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
|
||||
while (bucket < bucket_end) {
|
||||
o = (narrowOop)bucket[1];
|
||||
f->do_oop(&o);
|
||||
bucket += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate these types
|
||||
template class CompactHashtable<Symbol*, char>;
|
||||
template class CompactHashtable<oop, char>;
|
||||
|
||||
#ifndef O_BINARY // if defined (Win32) use binary files.
|
||||
#define O_BINARY 0 // otherwise do nothing.
|
||||
@ -273,6 +328,8 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
|
||||
_p = _base;
|
||||
_end = _base + st.st_size;
|
||||
_filename = filename;
|
||||
_prefix_type = Unknown;
|
||||
_line_no = 1;
|
||||
}
|
||||
|
||||
HashtableTextDump::~HashtableTextDump() {
|
||||
@ -286,9 +343,11 @@ void HashtableTextDump::quit(const char* err, const char* msg) {
|
||||
vm_exit_during_initialization(err, msg);
|
||||
}
|
||||
|
||||
void HashtableTextDump::corrupted(const char *p) {
|
||||
char info[60];
|
||||
sprintf(info, "corrupted at pos %d", (int)(p - _base));
|
||||
void HashtableTextDump::corrupted(const char *p, const char* msg) {
|
||||
char info[100];
|
||||
jio_snprintf(info, sizeof(info),
|
||||
"%s. Corrupted at line %d (file pos %d)",
|
||||
msg, _line_no, (int)(p - _base));
|
||||
quit(info, _filename);
|
||||
}
|
||||
|
||||
@ -298,8 +357,9 @@ bool HashtableTextDump::skip_newline() {
|
||||
} else if (_p[0] == '\n') {
|
||||
_p += 1;
|
||||
} else {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Unexpected character");
|
||||
}
|
||||
_line_no ++;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -328,26 +388,60 @@ void HashtableTextDump::check_version(const char* ver) {
|
||||
skip_newline();
|
||||
}
|
||||
|
||||
void HashtableTextDump::scan_prefix_type() {
|
||||
_p ++;
|
||||
if (strncmp(_p, "SECTION: String", 15) == 0) {
|
||||
_p += 15;
|
||||
_prefix_type = StringPrefix;
|
||||
} else if (strncmp(_p, "SECTION: Symbol", 15) == 0) {
|
||||
_p += 15;
|
||||
_prefix_type = SymbolPrefix;
|
||||
} else {
|
||||
_prefix_type = Unknown;
|
||||
}
|
||||
skip_newline();
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_prefix() {
|
||||
int HashtableTextDump::scan_prefix(int* utf8_length) {
|
||||
if (*_p == '@') {
|
||||
scan_prefix_type();
|
||||
}
|
||||
|
||||
switch (_prefix_type) {
|
||||
case SymbolPrefix:
|
||||
*utf8_length = scan_symbol_prefix(); break;
|
||||
case StringPrefix:
|
||||
*utf8_length = scan_string_prefix(); break;
|
||||
default:
|
||||
tty->print_cr("Shared input data type: Unknown.");
|
||||
corrupted(_p, "Unknown data type");
|
||||
}
|
||||
|
||||
return _prefix_type;
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_string_prefix() {
|
||||
// Expect /[0-9]+: /
|
||||
int utf8_length = get_num(':');
|
||||
int utf8_length;
|
||||
get_num(':', &utf8_length);
|
||||
if (*_p != ' ') {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Wrong prefix format for string");
|
||||
}
|
||||
_p++;
|
||||
return utf8_length;
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_prefix2() {
|
||||
int HashtableTextDump::scan_symbol_prefix() {
|
||||
// Expect /[0-9]+ (-|)[0-9]+: /
|
||||
int utf8_length = get_num(' ');
|
||||
if (*_p == '-') {
|
||||
_p++;
|
||||
int utf8_length;
|
||||
get_num(' ', &utf8_length);
|
||||
if (*_p == '-') {
|
||||
_p++;
|
||||
}
|
||||
(void)get_num(':');
|
||||
int ref_num;
|
||||
(void)get_num(':', &ref_num);
|
||||
if (*_p != ' ') {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Wrong prefix format for symbol");
|
||||
}
|
||||
_p++;
|
||||
return utf8_length;
|
||||
@ -408,7 +502,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
|
||||
case 'r': *to++ = '\r'; break;
|
||||
case '\\': *to++ = '\\'; break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
corrupted(_p, "Unsupported character");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "services/diagnosticCommand.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
@ -49,7 +50,7 @@ public:
|
||||
// the compact table to the shared archive.
|
||||
//
|
||||
// At dump time, the CompactHashtableWriter obtains all entries from the
|
||||
// symbol table and adds them to a new temporary hash table. The hash
|
||||
// symbol/string table and adds them to a new temporary hash table. The hash
|
||||
// table size (number of buckets) is calculated using
|
||||
// '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
|
||||
// size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
|
||||
@ -57,14 +58,14 @@ public:
|
||||
// faster lookup. It also has relatively small number of empty buckets and
|
||||
// good distribution of the entries.
|
||||
//
|
||||
// We use a simple hash function (symbol_hash % num_bucket) for the table.
|
||||
// We use a simple hash function (hash % num_bucket) for the table.
|
||||
// The new table is compacted when written out. Please see comments
|
||||
// above the CompactHashtable class for the table layout detail. The bucket
|
||||
// offsets are written to the archive as part of the compact table. The
|
||||
// bucket offset is encoded in the low 30-bit (0-29) and the bucket type
|
||||
// (regular or compact) are encoded in bit[31, 30]. For buckets with more
|
||||
// than one entry, both symbol hash and symbol offset are written to the
|
||||
// table. For buckets with only one entry, only the symbol offset is written
|
||||
// than one entry, both hash and entry offset are written to the
|
||||
// table. For buckets with only one entry, only the entry offset is written
|
||||
// to the table and the buckets are tagged as compact in their type bits.
|
||||
// Buckets without entry are skipped from the table. Their offsets are
|
||||
// still written out for faster lookup.
|
||||
@ -78,6 +79,7 @@ public:
|
||||
|
||||
public:
|
||||
Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {}
|
||||
Entry(unsigned int hash, oop string) : _next(NULL), _hash(hash), _literal(string) {}
|
||||
|
||||
void *value() {
|
||||
return _literal;
|
||||
@ -85,6 +87,9 @@ public:
|
||||
Symbol *symbol() {
|
||||
return (Symbol*)_literal;
|
||||
}
|
||||
oop string() {
|
||||
return (oop)_literal;
|
||||
}
|
||||
unsigned int hash() {
|
||||
return _hash;
|
||||
}
|
||||
@ -95,7 +100,7 @@ public:
|
||||
private:
|
||||
static int number_of_buckets(int num_entries);
|
||||
|
||||
const char* _table_name;
|
||||
int _type;
|
||||
int _num_entries;
|
||||
int _num_buckets;
|
||||
juint* _bucket_sizes;
|
||||
@ -105,7 +110,7 @@ private:
|
||||
|
||||
public:
|
||||
// This is called at dump-time only
|
||||
CompactHashtableWriter(const char* table_name, int num_entries, CompactHashtableStats* stats);
|
||||
CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats);
|
||||
~CompactHashtableWriter();
|
||||
|
||||
int get_required_bytes() {
|
||||
@ -116,6 +121,10 @@ public:
|
||||
add(hash, new Entry(hash, symbol));
|
||||
}
|
||||
|
||||
void add(unsigned int hash, oop string) {
|
||||
add(hash, new Entry(hash, string));
|
||||
}
|
||||
|
||||
private:
|
||||
void add(unsigned int hash, Entry* entry);
|
||||
juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary);
|
||||
@ -123,6 +132,7 @@ private:
|
||||
|
||||
public:
|
||||
void dump(char** top, char* end);
|
||||
const char* table_name();
|
||||
};
|
||||
|
||||
#define REGULAR_BUCKET_TYPE 0
|
||||
@ -136,23 +146,23 @@ public:
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// CompactHashtable is used to stored the CDS archive's symbol table. Used
|
||||
// CompactHashtable is used to stored the CDS archive's symbol/string table. Used
|
||||
// at runtime only to access the compact table from the archive.
|
||||
//
|
||||
// Because these tables are read-only (no entries can be added/deleted) at run-time
|
||||
// and tend to have large number of entries, we try to minimize the footprint
|
||||
// cost per entry.
|
||||
//
|
||||
// Layout of compact symbol table in the shared archive:
|
||||
// Layout of compact table in the shared archive:
|
||||
//
|
||||
// uintx base_address;
|
||||
// juint num_symbols;
|
||||
// juint num_entries;
|
||||
// juint num_buckets;
|
||||
// juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
|
||||
// juint table[]
|
||||
//
|
||||
// -----------------------------------
|
||||
// | base_address | num_symbols |
|
||||
// | base_address | num_entries |
|
||||
// |---------------------------------|
|
||||
// | num_buckets | bucket_info0 |
|
||||
// |---------------------------------|
|
||||
@ -177,9 +187,13 @@ public:
|
||||
// compact buckets have '01' in their highest 2-bit, and regular buckets have
|
||||
// '00' in their highest 2-bit.
|
||||
//
|
||||
// For normal buckets, each symbol's entry is 8 bytes in the table[]:
|
||||
// juint hash; /* symbol hash */
|
||||
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
|
||||
// For normal buckets, each entry is 8 bytes in the table[]:
|
||||
// juint hash; /* symbol/string hash */
|
||||
// union {
|
||||
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
|
||||
// narrowOop str; /* String narrowOop encoding */
|
||||
// }
|
||||
//
|
||||
//
|
||||
// For compact buckets, each entry has only the 4-byte 'offset' in the table[].
|
||||
//
|
||||
@ -189,19 +203,41 @@ public:
|
||||
//
|
||||
template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
enum CompactHashtableType {
|
||||
_symbol_table = 0,
|
||||
_string_table = 1
|
||||
};
|
||||
|
||||
private:
|
||||
CompactHashtableType _type;
|
||||
uintx _base_address;
|
||||
juint _entry_count;
|
||||
juint _bucket_count;
|
||||
juint _table_end_offset;
|
||||
juint* _buckets;
|
||||
|
||||
inline bool equals(T entry, const char* name, int len) {
|
||||
if (entry->equals(name, len)) {
|
||||
assert(entry->refcount() == -1, "must be shared");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
|
||||
juint* addr, const char* name, int len) {
|
||||
Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
|
||||
if (sym->equals(name, len)) {
|
||||
assert(sym->refcount() == -1, "must be shared");
|
||||
return sym;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline oop lookup_entry(CompactHashtable<oop, char>* const t,
|
||||
juint* addr, const char* name, int len) {
|
||||
narrowOop obj = (narrowOop)(*addr);
|
||||
oop string = oopDesc::decode_heap_oop(obj);
|
||||
if (java_lang_String::equals(string, (jchar*)name, len)) {
|
||||
return string;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -211,7 +247,14 @@ public:
|
||||
_table_end_offset = 0;
|
||||
_buckets = 0;
|
||||
}
|
||||
const char* init(const char *buffer);
|
||||
const char* init(CompactHashtableType type, const char *buffer);
|
||||
|
||||
void reset() {
|
||||
_entry_count = 0;
|
||||
_bucket_count = 0;
|
||||
_table_end_offset = 0;
|
||||
_buckets = 0;
|
||||
}
|
||||
|
||||
// Lookup an entry from the compact table
|
||||
inline T lookup(const N* name, unsigned int hash, int len) {
|
||||
@ -225,23 +268,22 @@ public:
|
||||
juint* bucket_end = _buckets;
|
||||
|
||||
if (bucket_type == COMPACT_BUCKET_TYPE) {
|
||||
// the compact bucket has one entry with symbol offset only
|
||||
T entry = (T)((void*)(_base_address + bucket[0]));
|
||||
if (equals(entry, name, len)) {
|
||||
return entry;
|
||||
// the compact bucket has one entry with entry offset only
|
||||
T res = lookup_entry(this, &bucket[0], name, len);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
// This is a regular bucket, which has more than one
|
||||
// entries. Each entry is a pair of symbol (hash, offset).
|
||||
// entries. Each entry is a pair of entry (hash, offset).
|
||||
// Seek until the end of the bucket.
|
||||
bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
|
||||
while (bucket < bucket_end) {
|
||||
unsigned int h = (unsigned int)(bucket[0]);
|
||||
if (h == hash) {
|
||||
juint offset = bucket[1];
|
||||
T entry = (T)((void*)(_base_address + offset));
|
||||
if (equals(entry, name, len)) {
|
||||
return entry;
|
||||
T res = lookup_entry(this, &bucket[1], name, len);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
bucket += 2;
|
||||
@ -253,12 +295,15 @@ public:
|
||||
|
||||
// iterate over symbols
|
||||
void symbols_do(SymbolClosure *cl);
|
||||
|
||||
// iterate over strings
|
||||
void oops_do(OopClosure* f);
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Read/Write the contents of a hashtable textual dump (created by
|
||||
// SymbolTable::dump).
|
||||
// SymbolTable::dump and StringTable::dump).
|
||||
// Because the dump file may be big (hundred of MB in extreme cases),
|
||||
// we use mmap for fast access when reading it.
|
||||
//
|
||||
@ -269,21 +314,29 @@ class HashtableTextDump VALUE_OBJ_CLASS_SPEC {
|
||||
const char* _end;
|
||||
const char* _filename;
|
||||
size_t _size;
|
||||
int _prefix_type;
|
||||
int _line_no;
|
||||
public:
|
||||
HashtableTextDump(const char* filename);
|
||||
~HashtableTextDump();
|
||||
|
||||
enum {
|
||||
SymbolPrefix = 1 << 0,
|
||||
StringPrefix = 1 << 1,
|
||||
Unknown = 1 << 2
|
||||
};
|
||||
|
||||
void quit(const char* err, const char* msg);
|
||||
|
||||
inline int remain() {
|
||||
return (int)(_end - _p);
|
||||
}
|
||||
|
||||
void corrupted(const char *p);
|
||||
void corrupted(const char *p, const char *msg);
|
||||
|
||||
inline void corrupted_if(bool cond) {
|
||||
if (cond) {
|
||||
corrupted(_p);
|
||||
corrupted(_p, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -292,7 +345,7 @@ public:
|
||||
void skip_past(char c);
|
||||
void check_version(const char* ver);
|
||||
|
||||
inline int get_num(char delim) {
|
||||
inline bool get_num(char delim, int *utf8_length) {
|
||||
const char* p = _p;
|
||||
const char* end = _end;
|
||||
int num = 0;
|
||||
@ -303,18 +356,22 @@ public:
|
||||
num = num * 10 + (c - '0');
|
||||
} else if (c == delim) {
|
||||
_p = p;
|
||||
return num;
|
||||
*utf8_length = num;
|
||||
return true;
|
||||
} else {
|
||||
corrupted(p-1);
|
||||
// Not [0-9], not 'delim'
|
||||
return false;
|
||||
}
|
||||
}
|
||||
corrupted(_end);
|
||||
corrupted(_end, "Incorrect format");
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
int scan_prefix();
|
||||
int scan_prefix2();
|
||||
void scan_prefix_type();
|
||||
int scan_prefix(int* utf8_length);
|
||||
int scan_string_prefix();
|
||||
int scan_symbol_prefix();
|
||||
|
||||
jchar unescape(const char* from, const char* end, int count);
|
||||
void get_utf8(char* utf8_buffer, int utf8_length);
|
||||
|
@ -118,6 +118,10 @@ class java_lang_String : AllStatic {
|
||||
return hash_offset;
|
||||
}
|
||||
|
||||
static void set_value_raw(oop string, typeArrayOop buffer) {
|
||||
assert(initialized, "Must be initialized");
|
||||
string->obj_field_put_raw(value_offset, buffer);
|
||||
}
|
||||
static void set_value(oop string, typeArrayOop buffer) {
|
||||
assert(initialized && (value_offset > 0), "Must be initialized");
|
||||
string->obj_field_put(value_offset, (oop)buffer);
|
||||
@ -210,6 +214,7 @@ class java_lang_String : AllStatic {
|
||||
// Debugging
|
||||
static void print(oop java_string, outputStream* st);
|
||||
friend class JavaClasses;
|
||||
friend class StringTable;
|
||||
};
|
||||
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#endif
|
||||
@ -87,19 +88,28 @@ class StableMemoryChecker : public StackObj {
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
StringTable* StringTable::_the_table = NULL;
|
||||
|
||||
bool StringTable::_ignore_shared_strings = false;
|
||||
bool StringTable::_needs_rehashing = false;
|
||||
|
||||
volatile int StringTable::_parallel_claimed_idx = 0;
|
||||
|
||||
CompactHashtable<oop, char> StringTable::_shared_table;
|
||||
|
||||
// Pick hashing algorithm
|
||||
unsigned int StringTable::hash_string(const jchar* s, int len) {
|
||||
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
|
||||
java_lang_String::hash_code(s, len);
|
||||
}
|
||||
|
||||
oop StringTable::lookup(int index, jchar* name,
|
||||
int len, unsigned int hash) {
|
||||
oop StringTable::lookup_shared(jchar* name, int len) {
|
||||
// java_lang_String::hash_code() was used to compute hash values in the shared table. Don't
|
||||
// use the hash value from StringTable::hash_string() as it might use alternate hashcode.
|
||||
return _shared_table.lookup((const char*)name,
|
||||
java_lang_String::hash_code(name, len), len);
|
||||
}
|
||||
|
||||
oop StringTable::lookup_in_main_table(int index, jchar* name,
|
||||
int len, unsigned int hash) {
|
||||
int count = 0;
|
||||
for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) {
|
||||
count++;
|
||||
@ -140,7 +150,8 @@ oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
|
||||
// Since look-up was done lock-free, we need to check if another
|
||||
// thread beat us in the race to insert the symbol.
|
||||
|
||||
oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
|
||||
// No need to lookup the shared table from here since the caller (intern()) already did
|
||||
oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int)
|
||||
if (test != NULL) {
|
||||
// Entry already added
|
||||
return test;
|
||||
@ -172,9 +183,14 @@ static void ensure_string_alive(oop string) {
|
||||
}
|
||||
|
||||
oop StringTable::lookup(jchar* name, int len) {
|
||||
oop string = lookup_shared(name, len);
|
||||
if (string != NULL) {
|
||||
return string;
|
||||
}
|
||||
|
||||
unsigned int hash = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
oop string = the_table()->lookup(index, name, len, hash);
|
||||
string = the_table()->lookup_in_main_table(index, name, len, hash);
|
||||
|
||||
ensure_string_alive(string);
|
||||
|
||||
@ -184,9 +200,14 @@ oop StringTable::lookup(jchar* name, int len) {
|
||||
|
||||
oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
int len, TRAPS) {
|
||||
oop found_string = lookup_shared(name, len);
|
||||
if (found_string != NULL) {
|
||||
return found_string;
|
||||
}
|
||||
|
||||
unsigned int hashValue = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hashValue);
|
||||
oop found_string = the_table()->lookup(index, name, len, hashValue);
|
||||
found_string = the_table()->lookup_in_main_table(index, name, len, hashValue);
|
||||
|
||||
// Found
|
||||
if (found_string != NULL) {
|
||||
@ -611,3 +632,131 @@ int StringtableDCmd::num_arguments() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Sharing
|
||||
bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
|
||||
CompactHashtableWriter* ch_table) {
|
||||
#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
|
||||
assert(UseG1GC, "Only support G1 GC");
|
||||
assert(UseCompressedOops && UseCompressedClassPointers,
|
||||
"Only support UseCompressedOops and UseCompressedClassPointers enabled");
|
||||
|
||||
Thread* THREAD = Thread::current();
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
for (int i = 0; i < the_table()->table_size(); ++i) {
|
||||
HashtableEntry<oop, mtSymbol>* bucket = the_table()->bucket(i);
|
||||
for ( ; bucket != NULL; bucket = bucket->next()) {
|
||||
oop s = bucket->literal();
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (hash == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// allocate the new 'value' array first
|
||||
typeArrayOop v = java_lang_String::value(s);
|
||||
int v_len = v->size();
|
||||
typeArrayOop new_v;
|
||||
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(v_len)) {
|
||||
continue; // skip the current String. The 'value' array is too large to handle
|
||||
} else {
|
||||
new_v = (typeArrayOop)G1CollectedHeap::heap()->archive_mem_allocate(v_len);
|
||||
if (new_v == NULL) {
|
||||
return false; // allocation failed
|
||||
}
|
||||
}
|
||||
// now allocate the new String object
|
||||
int s_len = s->size();
|
||||
oop new_s = (oop)G1CollectedHeap::heap()->archive_mem_allocate(s_len);
|
||||
if (new_s == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
s->identity_hash();
|
||||
v->identity_hash();
|
||||
|
||||
// copy the objects' data
|
||||
Copy::aligned_disjoint_words((HeapWord*)s, (HeapWord*)new_s, s_len);
|
||||
Copy::aligned_disjoint_words((HeapWord*)v, (HeapWord*)new_v, v_len);
|
||||
|
||||
// adjust the pointer to the 'value' field in the new String oop. Also pre-compute and set the
|
||||
// 'hash' field. That avoids "write" to the shared strings at runtime by the deduplication process.
|
||||
java_lang_String::set_value_raw(new_s, new_v);
|
||||
if (java_lang_String::hash(new_s) == 0) {
|
||||
java_lang_String::set_hash(new_s, hash);
|
||||
}
|
||||
|
||||
// add to the compact table
|
||||
ch_table->add(hash, new_s);
|
||||
}
|
||||
}
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
|
||||
assert(string_space->length() <= 2, "sanity");
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space,
|
||||
size_t* space_size) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
|
||||
if (PrintSharedSpaces) {
|
||||
tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table,
|
||||
the_table()->number_of_entries(),
|
||||
&MetaspaceShared::stats()->string);
|
||||
|
||||
// Copy the interned strings into the "string space" within the java heap
|
||||
if (!copy_shared_string(string_space, &ch_table)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < string_space->length(); i++) {
|
||||
*space_size += string_space->at(i).byte_size();
|
||||
}
|
||||
|
||||
// Now dump the compact table
|
||||
if (*top + ch_table.get_required_bytes() > end) {
|
||||
// not enough space left
|
||||
return false;
|
||||
}
|
||||
ch_table.dump(top, end);
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
void StringTable::shared_oops_do(OopClosure* f) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
_shared_table.oops_do(f);
|
||||
#endif
|
||||
}
|
||||
|
||||
const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
|
||||
// no shared string data
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// initialize the shared table
|
||||
juint *p = (juint*)buffer;
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<oop, char>::_string_table, (char*)p);
|
||||
const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
|
||||
|
||||
if (_ignore_shared_strings) {
|
||||
_shared_table.reset();
|
||||
}
|
||||
|
||||
return aligned_end;
|
||||
#endif
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,10 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
template <class T, class N> class CompactHashtable;
|
||||
class CompactHashtableWriter;
|
||||
class FileMapInfo;
|
||||
|
||||
class StringTable : public RehashableHashtable<oop, mtSymbol> {
|
||||
friend class VMStructs;
|
||||
friend class Symbol;
|
||||
@ -36,6 +40,10 @@ private:
|
||||
// The string table
|
||||
static StringTable* _the_table;
|
||||
|
||||
// Shared string table
|
||||
static CompactHashtable<oop, char> _shared_table;
|
||||
static bool _ignore_shared_strings;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
static bool _needs_rehashing;
|
||||
|
||||
@ -46,7 +54,8 @@ private:
|
||||
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
|
||||
unsigned int hashValue, TRAPS);
|
||||
|
||||
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
|
||||
oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue);
|
||||
static oop lookup_shared(jchar* name, int len);
|
||||
|
||||
// Apply the give oop closure to the entries to the buckets
|
||||
// in the range [start_idx, end_idx).
|
||||
@ -141,12 +150,14 @@ public:
|
||||
static int verify_and_compare_entries();
|
||||
|
||||
// Sharing
|
||||
static void copy_buckets(char** top, char*end) {
|
||||
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
|
||||
}
|
||||
static void copy_table(char** top, char*end) {
|
||||
the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end);
|
||||
}
|
||||
static void ignore_shared_strings(bool v) { _ignore_shared_strings = v; }
|
||||
static bool shared_string_ignored() { return _ignore_shared_strings; }
|
||||
static void shared_oops_do(OopClosure* f);
|
||||
static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
|
||||
CompactHashtableWriter* ch_table);
|
||||
static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space,
|
||||
size_t* space_size);
|
||||
static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
|
||||
static void reverse() {
|
||||
the_table()->Hashtable<oop, mtSymbol>::reverse();
|
||||
}
|
||||
|
@ -539,7 +539,8 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
|
||||
|
||||
bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
#if INCLUDE_CDS
|
||||
CompactHashtableWriter ch_table("symbol", the_table()->number_of_entries(),
|
||||
CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table,
|
||||
the_table()->number_of_entries(),
|
||||
&MetaspaceShared::stats()->symbol);
|
||||
if (*top + ch_table.get_required_bytes() > end) {
|
||||
// not enough space left
|
||||
@ -556,7 +557,6 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
}
|
||||
}
|
||||
|
||||
char* old_top = *top;
|
||||
ch_table.dump(top, end);
|
||||
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
@ -565,7 +565,8 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
}
|
||||
|
||||
const char* SymbolTable::init_shared_table(const char* buffer) {
|
||||
const char* end = _shared_table.init(buffer);
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
|
||||
return (const char*)align_pointer_up(end, sizeof(void*));
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,12 @@ class CodeCache : AllStatic {
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
// Returns the CodeBlobType for nmethods of the given compilation level
|
||||
// Returns the CodeBlobType for the given nmethod
|
||||
static int get_code_blob_type(nmethod* nm) {
|
||||
return get_code_heap(nm)->code_blob_type();
|
||||
}
|
||||
|
||||
// Returns the CodeBlobType for the given compilation level
|
||||
static int get_code_blob_type(int comp_level) {
|
||||
if (comp_level == CompLevel_none ||
|
||||
comp_level == CompLevel_simple ||
|
||||
@ -287,7 +292,7 @@ private:
|
||||
// Iterate over all CodeBlobs
|
||||
_code_blob_type = CodeBlobType::All;
|
||||
} else if (nm != NULL) {
|
||||
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
|
||||
_code_blob_type = CodeCache::get_code_blob_type(nm);
|
||||
} else {
|
||||
// Only iterate over method code heaps, starting with non-profiled
|
||||
_code_blob_type = CodeBlobType::MethodNonProfiled;
|
||||
|
@ -1421,7 +1421,7 @@ void nmethod::flush() {
|
||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
|
||||
}
|
||||
|
||||
// We need to deallocate any ExceptionCache data.
|
||||
|
@ -190,10 +190,10 @@ class CMSParGCThreadState: public CHeapObj<mtGC> {
|
||||
};
|
||||
|
||||
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
ReservedSpace rs, size_t initial_byte_size, int level,
|
||||
ReservedSpace rs, size_t initial_byte_size,
|
||||
CardTableRS* ct, bool use_adaptive_freelists,
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
||||
CardGeneration(rs, initial_byte_size, level, ct),
|
||||
CardGeneration(rs, initial_byte_size, ct),
|
||||
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||
_did_compact(false)
|
||||
{
|
||||
@ -285,9 +285,9 @@ void CMSCollector::ref_processor_init() {
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(_span, // span
|
||||
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
|
||||
(int) ParallelGCThreads, // mt processing degree
|
||||
ParallelGCThreads, // mt processing degree
|
||||
_cmsGen->refs_discovery_is_mt(), // mt discovery
|
||||
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
|
||||
MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
|
||||
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
|
||||
&_is_alive_closure); // closure for liveness info
|
||||
// Initialize the _ref_processor field of CMSGen
|
||||
@ -562,7 +562,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
// are not shared with parallel scavenge (ParNew).
|
||||
{
|
||||
uint i;
|
||||
uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
|
||||
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
|
||||
|| ParallelRefProcEnabled)
|
||||
@ -682,12 +682,17 @@ void ConcurrentMarkSweepGeneration::print_statistics() {
|
||||
void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
if (PrintGCDetails) {
|
||||
// I didn't want to change the logging when removing the level concept,
|
||||
// but I guess this logging could say "old" or something instead of "1".
|
||||
assert(gch->is_old_gen(this),
|
||||
"The CMS generation should be the old generation");
|
||||
uint level = 1;
|
||||
if (Verbose) {
|
||||
gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
|
||||
level(), short_name(), s, used(), capacity());
|
||||
gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
|
||||
level, short_name(), s, used(), capacity());
|
||||
} else {
|
||||
gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
|
||||
level(), short_name(), s, used() / K, capacity() / K);
|
||||
gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
|
||||
level, short_name(), s, used() / K, capacity() / K);
|
||||
}
|
||||
}
|
||||
if (Verbose) {
|
||||
@ -797,27 +802,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
|
||||
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
|
||||
gclog_or_tty->print_cr(" Desired free fraction %f",
|
||||
desired_free_percentage);
|
||||
desired_free_percentage);
|
||||
gclog_or_tty->print_cr(" Maximum free fraction %f",
|
||||
maximum_free_percentage);
|
||||
maximum_free_percentage);
|
||||
gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
|
||||
gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
|
||||
desired_capacity/1000);
|
||||
int prev_level = level() - 1;
|
||||
if (prev_level >= 0) {
|
||||
size_t prev_size = 0;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Generation* prev_gen = gch->young_gen();
|
||||
prev_size = prev_gen->capacity();
|
||||
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
|
||||
prev_size/1000);
|
||||
}
|
||||
desired_capacity/1000);
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
|
||||
size_t young_size = gch->young_gen()->capacity();
|
||||
gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
|
||||
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
|
||||
unsafe_max_alloc_nogc()/1000);
|
||||
unsafe_max_alloc_nogc()/1000);
|
||||
gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
|
||||
contiguous_available()/1000);
|
||||
contiguous_available()/1000);
|
||||
gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
|
||||
expand_bytes);
|
||||
expand_bytes);
|
||||
}
|
||||
// safe if expansion fails
|
||||
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
|
||||
@ -1650,8 +1650,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||
_intra_sweep_estimate.padded_average());
|
||||
}
|
||||
|
||||
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
|
||||
ref_processor(), clear_all_soft_refs);
|
||||
GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
|
||||
#ifdef ASSERT
|
||||
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
||||
size_t free_size = cms_space->free();
|
||||
@ -2432,7 +2431,7 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
@ -2504,7 +2503,7 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
@ -3031,7 +3030,7 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens are roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
@ -4282,15 +4281,12 @@ void CMSCollector::checkpointRootsFinal() {
|
||||
FlagSetting fl(gch->_is_gc_active, false);
|
||||
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
|
||||
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
|
||||
int level = _cmsGen->level() - 1;
|
||||
if (level >= 0) {
|
||||
gch->do_collection(true, // full (i.e. force, see below)
|
||||
false, // !clear_all_soft_refs
|
||||
0, // size
|
||||
false, // is_tlab
|
||||
level // max_level
|
||||
);
|
||||
}
|
||||
gch->do_collection(true, // full (i.e. force, see below)
|
||||
false, // !clear_all_soft_refs
|
||||
0, // size
|
||||
false, // is_tlab
|
||||
GenCollectedHeap::YoungGen // type
|
||||
);
|
||||
}
|
||||
FreelistLocker x(this);
|
||||
MutexLockerEx y(bitMapLock(),
|
||||
@ -4464,7 +4460,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
CLDToOopClosure cld_closure(&par_mri_cl, true);
|
||||
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_collector->_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
false, // yg was scanned above
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
@ -4603,7 +4599,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_collector->_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
false, // yg was scanned above
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
@ -5184,7 +5180,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
GenCollectedHeap::OldGen,
|
||||
true, // younger gens as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
@ -5322,8 +5318,8 @@ CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_mark_and_push(collector, span, bit_map, work_queue),
|
||||
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
|
||||
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
|
||||
_low_water_mark(MIN2((work_queue->max_elems()/4),
|
||||
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
|
||||
{ }
|
||||
|
||||
// . see if we can share work_queues with ParNew? XXX
|
||||
@ -5648,11 +5644,12 @@ FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
|
||||
return _cmsSpace->find_chunk_at_end();
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
|
||||
void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
|
||||
bool full) {
|
||||
// The next lower level has been collected. Gather any statistics
|
||||
// If the young generation has been collected, gather any statistics
|
||||
// that are of interest at this point.
|
||||
if (!full && (current_level + 1) == level()) {
|
||||
bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
|
||||
if (!full && current_is_young) {
|
||||
// Gather statistics on the young generation collection.
|
||||
collector()->stats().record_gc0_end(used());
|
||||
}
|
||||
@ -6251,8 +6248,8 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
|
||||
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
|
||||
_low_water_mark(MIN2((work_queue->max_elems()/4),
|
||||
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
|
||||
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
|
||||
{
|
||||
_ref_processor = rp;
|
||||
|
@ -1063,7 +1063,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
void shrink_free_list_by(size_t bytes);
|
||||
|
||||
// Update statistics for GC
|
||||
virtual void update_gc_stats(int level, bool full);
|
||||
virtual void update_gc_stats(Generation* current_generation, bool full);
|
||||
|
||||
// Maximum available space in the generation (including uncommitted)
|
||||
// space.
|
||||
@ -1079,7 +1079,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
public:
|
||||
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
||||
int level, CardTableRS* ct,
|
||||
CardTableRS* ct,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
||||
|
||||
|
@ -42,7 +42,7 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
|
||||
uint n_threads) {
|
||||
assert(n_threads > 0, "expected n_threads > 0");
|
||||
assert(n_threads <= ParallelGCThreads,
|
||||
err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
|
||||
err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
|
||||
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
|
@ -62,25 +62,25 @@
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
ParNewGeneration* gen_,
|
||||
ParNewGeneration* young_gen_,
|
||||
Generation* old_gen_,
|
||||
int thread_num_,
|
||||
ObjToScanQueueSet* work_queue_set_,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
|
||||
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
|
||||
_ageTable(false), // false ==> not the global age table, no perf data.
|
||||
_to_space_alloc_buffer(desired_plab_sz_),
|
||||
_to_space_closure(gen_, this), _old_gen_closure(gen_, this),
|
||||
_to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
|
||||
_older_gen_closure(gen_, this),
|
||||
_to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
|
||||
_to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
|
||||
_older_gen_closure(young_gen_, this),
|
||||
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
|
||||
&_to_space_root_closure, gen_, &_old_gen_root_closure,
|
||||
&_to_space_root_closure, young_gen_, &_old_gen_root_closure,
|
||||
work_queue_set_, &term_),
|
||||
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
|
||||
_is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
|
||||
_keep_alive_closure(&_scan_weak_ref_closure),
|
||||
_strong_roots_time(0.0), _term_time(0.0)
|
||||
{
|
||||
@ -481,7 +481,6 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
|
||||
{
|
||||
assert(_g->level() == 0, "Optimized for youngest generation");
|
||||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
@ -566,11 +565,11 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
par_scan_state()->end_term_time();
|
||||
}
|
||||
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
|
||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
|
||||
StrongRootsScope* strong_roots_scope) :
|
||||
AbstractGangTask("ParNewGeneration collection"),
|
||||
_gen(gen), _old_gen(old_gen),
|
||||
_young_gen(young_gen), _old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set),
|
||||
_strong_roots_scope(strong_roots_scope)
|
||||
@ -596,7 +595,7 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_gen->level(),
|
||||
GenCollectedHeap::YoungGen,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
@ -616,8 +615,8 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParNewGeneration::
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
|
||||
: DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
: DefNewGeneration(rs, initial_byte_size, "PCopy"),
|
||||
_overflow_list(NULL),
|
||||
_is_alive_closure(this),
|
||||
_plab_stats(YoungPLABSize, PLABWeight)
|
||||
@ -752,7 +751,7 @@ public:
|
||||
private:
|
||||
virtual void work(uint worker_id);
|
||||
private:
|
||||
ParNewGeneration& _gen;
|
||||
ParNewGeneration& _young_gen;
|
||||
ProcessTask& _task;
|
||||
Generation& _old_gen;
|
||||
HeapWord* _young_old_boundary;
|
||||
@ -760,12 +759,12 @@ private:
|
||||
};
|
||||
|
||||
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
|
||||
ParNewGeneration& gen,
|
||||
ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet& state_set)
|
||||
: AbstractGangTask("ParNewGeneration parallel reference processing"),
|
||||
_gen(gen),
|
||||
_young_gen(young_gen),
|
||||
_task(task),
|
||||
_old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
@ -806,12 +805,12 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
FlexibleWorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
_state_set.reset(workers->active_workers(), _generation.promotion_failed());
|
||||
ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
|
||||
_generation.reserved().end(), _state_set);
|
||||
_state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
|
||||
ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
|
||||
_young_gen.reserved().end(), _state_set);
|
||||
workers->run_task(&rp_task);
|
||||
_state_set.reset(0 /* bad value in debug if not reset */,
|
||||
_generation.promotion_failed());
|
||||
_young_gen.promotion_failed());
|
||||
}
|
||||
|
||||
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
@ -835,10 +834,10 @@ ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
|
||||
ScanClosure(g, gc_barrier) {}
|
||||
|
||||
EvacuateFollowersClosureGeneral::
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
OopsInGenClosure* cur,
|
||||
OopsInGenClosure* older) :
|
||||
_gch(gch), _level(level),
|
||||
_gch(gch),
|
||||
_scan_cur_or_nonheap(cur), _scan_older(older)
|
||||
{}
|
||||
|
||||
@ -846,10 +845,10 @@ void EvacuateFollowersClosureGeneral::do_void() {
|
||||
do {
|
||||
// Beware: this call will lead to closure applications via virtual
|
||||
// calls.
|
||||
_gch->oop_since_save_marks_iterate(_level,
|
||||
_gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
|
||||
_scan_cur_or_nonheap,
|
||||
_scan_older);
|
||||
} while (!_gch->no_allocs_since_save_marks(_level));
|
||||
} while (!_gch->no_allocs_since_save_marks(true /* include_young */));
|
||||
}
|
||||
|
||||
|
||||
@ -972,14 +971,14 @@ void ParNewGeneration::collect(bool full,
|
||||
ScanClosure scan_without_gc_barrier(this, false);
|
||||
ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
|
||||
set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
|
||||
EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
|
||||
EvacuateFollowersClosureGeneral evacuate_followers(gch,
|
||||
&scan_without_gc_barrier, &scan_with_gc_barrier);
|
||||
rp->setup_policy(clear_all_soft_refs);
|
||||
// Can the mt_degree be set later (at run_task() time would be best)?
|
||||
rp->set_active_mt_degree(active_workers);
|
||||
ReferenceProcessorStats stats;
|
||||
if (rp->processing_is_mt()) {
|
||||
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
|
||||
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, &task_executor,
|
||||
_gc_timer, _gc_tracer.gc_id());
|
||||
@ -1045,7 +1044,7 @@ void ParNewGeneration::collect(bool full,
|
||||
|
||||
rp->set_enqueuing_is_done(true);
|
||||
if (rp->processing_is_mt()) {
|
||||
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
|
||||
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
|
||||
rp->enqueue_discovered_references(&task_executor);
|
||||
} else {
|
||||
rp->enqueue_discovered_references(NULL);
|
||||
@ -1349,7 +1348,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
|
||||
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
|
||||
// Trim off a prefix of at most objsFromOverflow items
|
||||
Thread* tid = Thread::current();
|
||||
size_t spin_count = (size_t)ParallelGCThreads;
|
||||
size_t spin_count = ParallelGCThreads;
|
||||
size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
|
||||
for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
|
||||
// someone grabbed it before we did ...
|
||||
@ -1466,9 +1465,9 @@ void ParNewGeneration::ref_processor_init() {
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(_reserved, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
(uint) ParallelGCThreads, // mt processing degree
|
||||
ParallelGCThreads, // mt processing degree
|
||||
refs_discovery_is_mt(), // mt discovery
|
||||
(uint) ParallelGCThreads, // mt discovery degree
|
||||
ParallelGCThreads, // mt discovery degree
|
||||
refs_discovery_is_atomic(), // atomic_discovery
|
||||
NULL); // is_alive_non_header
|
||||
}
|
||||
|
@ -234,14 +234,14 @@ class ParScanThreadState {
|
||||
|
||||
class ParNewGenTask: public AbstractGangTask {
|
||||
private:
|
||||
ParNewGeneration* _gen;
|
||||
ParNewGeneration* _young_gen;
|
||||
Generation* _old_gen;
|
||||
HeapWord* _young_old_boundary;
|
||||
class ParScanThreadStateSet* _state_set;
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
|
||||
public:
|
||||
ParNewGenTask(ParNewGeneration* gen,
|
||||
ParNewGenTask(ParNewGeneration* young_gen,
|
||||
Generation* old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet* state_set,
|
||||
@ -264,11 +264,10 @@ class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
|
||||
class EvacuateFollowersClosureGeneral: public VoidClosure {
|
||||
private:
|
||||
GenCollectedHeap* _gch;
|
||||
int _level;
|
||||
OopsInGenClosure* _scan_cur_or_nonheap;
|
||||
OopsInGenClosure* _scan_older;
|
||||
public:
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
|
||||
OopsInGenClosure* cur,
|
||||
OopsInGenClosure* older);
|
||||
virtual void do_void();
|
||||
@ -288,12 +287,14 @@ class ScanClosureWithParBarrier: public ScanClosure {
|
||||
// Implements AbstractRefProcTaskExecutor for ParNew.
|
||||
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
|
||||
private:
|
||||
ParNewGeneration& _generation;
|
||||
ParNewGeneration& _young_gen;
|
||||
Generation& _old_gen;
|
||||
ParScanThreadStateSet& _state_set;
|
||||
public:
|
||||
ParNewRefProcTaskExecutor(ParNewGeneration& generation,
|
||||
ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
|
||||
Generation& old_gen,
|
||||
ParScanThreadStateSet& state_set)
|
||||
: _generation(generation), _state_set(state_set)
|
||||
: _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
|
||||
{ }
|
||||
|
||||
// Executes a task using worker threads.
|
||||
@ -353,7 +354,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
|
||||
|
||||
public:
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
|
||||
|
||||
~ParNewGeneration() {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++)
|
||||
|
@ -72,7 +72,7 @@ inline void ParScanClosure::do_oop_work(T* p,
|
||||
bool root_scan) {
|
||||
assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
|
||||
generation()->is_in_reserved(p))
|
||||
&& (generation()->level() == 0 || gc_barrier),
|
||||
&& (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
|
||||
"The gen must be right, and we must be doing the barrier "
|
||||
"in older generations.");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
@ -198,8 +198,7 @@ void VM_GenCollectFullConcurrent::doit() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"We can only be executing this arm of if at a safepoint");
|
||||
GCCauseSetter gccs(gch, _gc_cause);
|
||||
gch->do_full_collection(gch->must_clear_all_soft_refs(),
|
||||
0 /* collect only youngest gen */);
|
||||
gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
|
||||
} // Else no need for a foreground young gc
|
||||
assert((_gc_count_before < gch->total_collections()) ||
|
||||
(GC_locker::is_active() /* gc may have been skipped */
|
||||
|
@ -107,7 +107,8 @@ void CollectionSetChooser::verify() {
|
||||
HeapRegion *curr = regions_at(index++);
|
||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||
guarantee(!curr->is_young(), "should not be young!");
|
||||
guarantee(!curr->is_humongous(), "should not be humongous!");
|
||||
guarantee(!curr->is_pinned(),
|
||||
err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
|
||||
if (prev != NULL) {
|
||||
guarantee(order_regions(prev, curr) != 1,
|
||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||
@ -149,8 +150,8 @@ void CollectionSetChooser::sort_regions() {
|
||||
|
||||
|
||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||
assert(!hr->is_humongous(),
|
||||
"Humongous regions shouldn't be added to the collection set");
|
||||
assert(!hr->is_pinned(),
|
||||
err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
|
||||
assert(!hr->is_young(), "should not be young!");
|
||||
_regions.append(hr);
|
||||
_length++;
|
||||
|
@ -103,13 +103,12 @@ public:
|
||||
void sort_regions();
|
||||
|
||||
// Determine whether to add the given region to the CSet chooser or
|
||||
// not. Currently, we skip humongous regions (we never add them to
|
||||
// the CSet, we only reclaim them during cleanup) and regions whose
|
||||
// live bytes are over the threshold.
|
||||
// not. Currently, we skip pinned regions and regions whose live
|
||||
// bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
|
||||
bool should_add(HeapRegion* hr) {
|
||||
assert(hr->is_marked(), "pre-condition");
|
||||
assert(!hr->is_young(), "should never consider young regions");
|
||||
return !hr->is_humongous() &&
|
||||
return !hr->is_pinned() &&
|
||||
hr->live_bytes() < _region_live_threshold_bytes;
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ErgoVerbose.hpp"
|
||||
#include "gc/g1/g1Log.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
@ -177,7 +178,7 @@ class ClearBitmapHRClosure : public HeapRegionClosure {
|
||||
// will have them as guarantees at the beginning / end of the bitmap
|
||||
// clearing to get some checking in the product.
|
||||
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
|
||||
assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
|
||||
assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -518,7 +519,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
|
||||
_markStack(this),
|
||||
// _finger set in set_non_marking_state
|
||||
|
||||
_max_worker_id((uint)ParallelGCThreads),
|
||||
_max_worker_id(ParallelGCThreads),
|
||||
// _active_tasks set in set_non_marking_state
|
||||
// _tasks set inside the constructor
|
||||
_task_queues(new CMTaskQueueSet((int) _max_worker_id)),
|
||||
@ -579,8 +580,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
|
||||
_root_regions.init(_g1h, this);
|
||||
|
||||
if (ConcGCThreads > ParallelGCThreads) {
|
||||
warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
|
||||
"than ParallelGCThreads (" UINTX_FORMAT ").",
|
||||
warning("Can't have more ConcGCThreads (%u) "
|
||||
"than ParallelGCThreads (%u).",
|
||||
ConcGCThreads, ParallelGCThreads);
|
||||
return;
|
||||
}
|
||||
@ -604,20 +605,20 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
|
||||
double sleep_factor =
|
||||
(1.0 - marking_task_overhead) / marking_task_overhead;
|
||||
|
||||
FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
|
||||
FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
|
||||
_sleep_factor = sleep_factor;
|
||||
_marking_task_overhead = marking_task_overhead;
|
||||
} else {
|
||||
// Calculate the number of parallel marking threads by scaling
|
||||
// the number of parallel GC threads.
|
||||
uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
|
||||
FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
|
||||
uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
|
||||
FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
|
||||
_sleep_factor = 0.0;
|
||||
_marking_task_overhead = 1.0;
|
||||
}
|
||||
|
||||
assert(ConcGCThreads > 0, "Should have been set");
|
||||
_parallel_marking_threads = (uint) ConcGCThreads;
|
||||
_parallel_marking_threads = ConcGCThreads;
|
||||
_max_parallel_marking_threads = _parallel_marking_threads;
|
||||
|
||||
if (parallel_marking_threads() > 1) {
|
||||
@ -830,7 +831,7 @@ void ConcurrentMark::clearNextBitmap() {
|
||||
// marking bitmap and getting it ready for the next cycle. During
|
||||
// this time no other cycle can start. So, let's make sure that this
|
||||
// is the case.
|
||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
|
||||
|
||||
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
|
||||
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
|
||||
@ -844,7 +845,7 @@ void ConcurrentMark::clearNextBitmap() {
|
||||
|
||||
// Repeat the asserts from above.
|
||||
guarantee(cmThread()->during_cycle(), "invariant");
|
||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
class CheckBitmapClearHRClosure : public HeapRegionClosure {
|
||||
@ -1178,6 +1179,8 @@ public:
|
||||
};
|
||||
|
||||
void ConcurrentMark::scanRootRegions() {
|
||||
double scan_start = os::elapsedTime();
|
||||
|
||||
// Start of concurrent marking.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
@ -1185,6 +1188,11 @@ void ConcurrentMark::scanRootRegions() {
|
||||
// at least one root region to scan. So, if it's false, we
|
||||
// should not attempt to do any further work.
|
||||
if (root_regions()->scan_in_progress()) {
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
||||
}
|
||||
|
||||
_parallel_marking_threads = calc_parallel_marking_threads();
|
||||
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
|
||||
"Maximum number of marking threads exceeded");
|
||||
@ -1194,6 +1202,11 @@ void ConcurrentMark::scanRootRegions() {
|
||||
_parallel_workers->set_active_workers(active_workers);
|
||||
_parallel_workers->run_task(&task);
|
||||
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
|
||||
}
|
||||
|
||||
// It's possible that has_aborted() is true here without actually
|
||||
// aborting the survivor scan earlier. This is OK as it's
|
||||
// mainly used for sanity checking.
|
||||
@ -1254,7 +1267,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
// If a full collection has happened, we shouldn't do this.
|
||||
if (has_aborted()) {
|
||||
g1h->set_marking_complete(); // So bitmap clearing isn't confused
|
||||
g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1783,7 +1796,7 @@ public:
|
||||
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
if (hr->is_continues_humongous() || hr->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
// We use a claim value of zero here because all regions
|
||||
@ -1888,7 +1901,7 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
// If a full collection has happened, we shouldn't do this.
|
||||
if (has_aborted()) {
|
||||
g1h->set_marking_complete(); // So bitmap clearing isn't confused
|
||||
g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1934,7 +1947,7 @@ void ConcurrentMark::cleanup() {
|
||||
}
|
||||
|
||||
size_t start_used_bytes = g1h->used();
|
||||
g1h->set_marking_complete();
|
||||
g1h->collector_state()->set_mark_in_progress(false);
|
||||
|
||||
double count_end = os::elapsedTime();
|
||||
double this_final_counting_time = (count_end - start);
|
||||
@ -2756,7 +2769,7 @@ public:
|
||||
|
||||
void ConcurrentMark::verify_no_cset_oops() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
|
||||
if (!G1CollectedHeap::heap()->mark_in_progress()) {
|
||||
if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2992,6 +3005,11 @@ void ConcurrentMark::print_stats() {
|
||||
|
||||
// abandon current marking iteration due to a Full GC
|
||||
void ConcurrentMark::abort() {
|
||||
if (!cmThread()->during_cycle() || _has_aborted) {
|
||||
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
|
||||
// concurrent bitmap clearing.
|
||||
_nextMarkBitMap->clearAll();
|
||||
@ -3009,12 +3027,8 @@ void ConcurrentMark::abort() {
|
||||
}
|
||||
_first_overflow_barrier_sync.abort();
|
||||
_second_overflow_barrier_sync.abort();
|
||||
const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
|
||||
if (!gc_id.is_undefined()) {
|
||||
// We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
|
||||
// to detect that it was aborted. Only keep track of the first GC id that we aborted.
|
||||
_aborted_gc_id = gc_id;
|
||||
}
|
||||
_aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
|
||||
assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
|
||||
_has_aborted = true;
|
||||
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
|
@ -78,7 +78,19 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// We want to avoid that the logging from the concurrent thread is mixed
|
||||
// with the logging from a STW GC. So, if necessary join the STS to ensure
|
||||
// that the logging is done either before or after the STW logging.
|
||||
void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
|
||||
if (doit) {
|
||||
SuspendibleThreadSetJoiner sts_joiner(join_sts);
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->vprint_cr(fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::run() {
|
||||
initialize_in_thread();
|
||||
@ -110,28 +122,12 @@ void ConcurrentMarkThread::run() {
|
||||
// without the root regions have been scanned which would be a
|
||||
// correctness issue.
|
||||
|
||||
double scan_start = os::elapsedTime();
|
||||
if (!cm()->has_aborted()) {
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
||||
}
|
||||
|
||||
_cm->scanRootRegions();
|
||||
|
||||
double scan_end = os::elapsedTime();
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
|
||||
scan_end - scan_start);
|
||||
}
|
||||
}
|
||||
|
||||
double mark_start_sec = os::elapsedTime();
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
|
||||
}
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
|
||||
|
||||
int iter = 0;
|
||||
do {
|
||||
@ -151,25 +147,15 @@ void ConcurrentMarkThread::run() {
|
||||
os::sleep(current_thread, sleep_time_ms, false);
|
||||
}
|
||||
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
|
||||
mark_end_sec - mark_start_sec);
|
||||
}
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
|
||||
|
||||
CMCheckpointRootsFinalClosure final_cl(_cm);
|
||||
VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
if (cm()->restart_for_overflow()) {
|
||||
if (G1TraceMarkStackOverflow) {
|
||||
gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
|
||||
"in remark (restart #%d).", iter);
|
||||
}
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
|
||||
}
|
||||
cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
|
||||
}
|
||||
} while (cm()->restart_for_overflow());
|
||||
|
||||
@ -194,7 +180,7 @@ void ConcurrentMarkThread::run() {
|
||||
// We don't want to update the marking status if a GC pause
|
||||
// is already underway.
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
g1h->set_marking_complete();
|
||||
g1h->collector_state()->set_mark_in_progress(false);
|
||||
}
|
||||
|
||||
// Check if cleanup set the free_regions_coming flag. If it
|
||||
@ -209,10 +195,7 @@ void ConcurrentMarkThread::run() {
|
||||
// reclaimed by cleanup.
|
||||
|
||||
double cleanup_start_sec = os::elapsedTime();
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
||||
}
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-start]");
|
||||
|
||||
// Now do the concurrent cleanup operation.
|
||||
_cm->completeCleanup();
|
||||
@ -229,11 +212,7 @@ void ConcurrentMarkThread::run() {
|
||||
g1h->reset_free_regions_coming();
|
||||
|
||||
double cleanup_end_sec = os::elapsedTime();
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
|
||||
cleanup_end_sec - cleanup_start_sec);
|
||||
}
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
|
||||
}
|
||||
guarantee(cm()->cleanup_list_is_empty(),
|
||||
"at this point there should be no regions on the cleanup list");
|
||||
@ -266,13 +245,8 @@ void ConcurrentMarkThread::run() {
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
if (!cm()->has_aborted()) {
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
}
|
||||
}
|
||||
|
||||
if (cm()->has_aborted()) {
|
||||
if (G1Log::fine()) {
|
||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
|
||||
} else {
|
||||
cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
double _vtime_accum; // Accumulated virtual time.
|
||||
|
||||
double _vtime_mark_accum;
|
||||
void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5);
|
||||
|
||||
public:
|
||||
virtual void run();
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
@ -44,6 +45,8 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
HeapRegion** retained_old) {
|
||||
HeapRegion* retained_region = *retained_old;
|
||||
*retained_old = NULL;
|
||||
assert(retained_region == NULL || !retained_region->is_archive(),
|
||||
err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
@ -65,7 +68,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
// we allocate to in the region sets. We'll re-add it later, when
|
||||
// it's retired again.
|
||||
_g1h->_old_set.remove(retained_region);
|
||||
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
bool during_im = _g1h->collector_state()->during_initial_mark_pause();
|
||||
retained_region->note_start_of_copying(during_im);
|
||||
old->set(retained_region);
|
||||
_g1h->_hr_printer.reuse(retained_region);
|
||||
@ -168,3 +171,153 @@ void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
// Create the archive allocator, and also enable archive object checking
|
||||
// in mark-sweep, since we will be creating archive regions.
|
||||
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
|
||||
G1MarkSweep::enable_archive_object_check();
|
||||
return result;
|
||||
}
|
||||
|
||||
bool G1ArchiveAllocator::alloc_new_region() {
|
||||
// Allocate the highest free region in the reserved heap,
|
||||
// and add it to our list of allocated regions. It is marked
|
||||
// archive and added to the old set.
|
||||
HeapRegion* hr = _g1h->alloc_highest_free_region();
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
|
||||
hr->set_archive();
|
||||
_g1h->_old_set.add(hr);
|
||||
_g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
|
||||
_allocated_regions.append(hr);
|
||||
_allocation_region = hr;
|
||||
|
||||
// Set up _bottom and _max to begin allocating in the lowest
|
||||
// min_region_size'd chunk of the allocated G1 region.
|
||||
_bottom = hr->bottom();
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Tell mark-sweep that objects in this region are not to be marked.
|
||||
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->g1mm()->update_sizes();
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
assert(word_size != 0, "size must not be zero");
|
||||
if (_allocation_region == NULL) {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
HeapWord* old_top = _allocation_region->top();
|
||||
assert(_bottom >= _allocation_region->bottom(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom())));
|
||||
assert(_max <= _allocation_region->end(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end())));
|
||||
assert(_bottom <= old_top && old_top <= _max,
|
||||
err_msg("inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max)));
|
||||
|
||||
// Allocate the next word_size words in the current allocation chunk.
|
||||
// If allocation would cross the _max boundary, insert a filler and begin
|
||||
// at the base of the next min_region_size'd chunk. Also advance to the next
|
||||
// chunk if we don't yet cross the boundary, but the remainder would be too
|
||||
// small to fill.
|
||||
HeapWord* new_top = old_top + word_size;
|
||||
size_t remainder = pointer_delta(_max, new_top);
|
||||
if ((new_top > _max) ||
|
||||
((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
|
||||
if (old_top != _max) {
|
||||
size_t fill_size = pointer_delta(_max, old_top);
|
||||
CollectedHeap::fill_with_object(old_top, fill_size);
|
||||
_summary_bytes_used += fill_size * HeapWordSize;
|
||||
}
|
||||
_allocation_region->set_top(_max);
|
||||
old_top = _bottom = _max;
|
||||
|
||||
// Check if we've just used up the last min_region_size'd chunk
|
||||
// in the current region, and if so, allocate a new one.
|
||||
if (_bottom != _allocation_region->end()) {
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
} else {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
old_top = _allocation_region->bottom();
|
||||
}
|
||||
}
|
||||
_allocation_region->set_top(old_top + word_size);
|
||||
_summary_bytes_used += word_size * HeapWordSize;
|
||||
|
||||
return old_top;
|
||||
}
|
||||
|
||||
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||
err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
|
||||
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||
err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
|
||||
|
||||
// If we've allocated nothing, simply return.
|
||||
if (_allocation_region == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If an end alignment was requested, insert filler objects.
|
||||
if (end_alignment_in_bytes != 0) {
|
||||
HeapWord* currtop = _allocation_region->top();
|
||||
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
|
||||
size_t fill_size = pointer_delta(newtop, currtop);
|
||||
if (fill_size != 0) {
|
||||
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||
// If the required fill is smaller than we can represent,
|
||||
// bump up to the next aligned address. We know we won't exceed the current
|
||||
// region boundary because the max supported alignment is smaller than the min
|
||||
// region size, and because the allocation code never leaves space smaller than
|
||||
// the min_fill_size at the top of the current allocation region.
|
||||
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
fill_size = pointer_delta(newtop, currtop);
|
||||
}
|
||||
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||
CollectedHeap::fill_with_objects(fill, fill_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the allocated regions, and create MemRegions summarizing
|
||||
// the allocated address range, combining contiguous ranges. Add the
|
||||
// MemRegions to the GrowableArray provided by the caller.
|
||||
int index = _allocated_regions.length() - 1;
|
||||
assert(_allocated_regions.at(index) == _allocation_region,
|
||||
err_msg("expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
|
||||
HeapWord* base_address = _allocation_region->bottom();
|
||||
HeapWord* top = base_address;
|
||||
|
||||
while (index >= 0) {
|
||||
HeapRegion* next = _allocated_regions.at(index);
|
||||
HeapWord* new_base = next->bottom();
|
||||
HeapWord* new_top = next->top();
|
||||
if (new_base != top) {
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
base_address = new_base;
|
||||
}
|
||||
top = new_top;
|
||||
index = index - 1;
|
||||
}
|
||||
|
||||
assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
_allocated_regions.clear();
|
||||
_allocation_region = NULL;
|
||||
};
|
||||
|
@ -227,7 +227,7 @@ public:
|
||||
size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
G1PLAB* buffer = alloc_buffer(dest, context);
|
||||
if (_survivor_alignment_bytes == 0) {
|
||||
if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
|
||||
return buffer->allocate(word_sz);
|
||||
} else {
|
||||
return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
|
||||
@ -269,4 +269,72 @@ public:
|
||||
virtual void waste(size_t& wasted, size_t& undo_wasted);
|
||||
};
|
||||
|
||||
// G1ArchiveAllocator is used to allocate memory in archive
|
||||
// regions. Such regions are not modifiable by GC, being neither
|
||||
// scavenged nor compacted, or even marked in the object header.
|
||||
// They can contain no pointers to non-archive heap regions,
|
||||
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The current allocation region
|
||||
HeapRegion* _allocation_region;
|
||||
|
||||
// Regions allocated for the current archive range.
|
||||
GrowableArray<HeapRegion*> _allocated_regions;
|
||||
|
||||
// The number of bytes used in the current range.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// Current allocation window within the current region.
|
||||
HeapWord* _bottom;
|
||||
HeapWord* _top;
|
||||
HeapWord* _max;
|
||||
|
||||
// Allocate a new region for this archive allocator.
|
||||
// Allocation is from the top of the reserved heap downward.
|
||||
bool alloc_new_region();
|
||||
|
||||
public:
|
||||
G1ArchiveAllocator(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h),
|
||||
_allocation_region(NULL),
|
||||
_allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
|
||||
ResourceObj::C_HEAP),
|
||||
2), true /* C_Heap */),
|
||||
_summary_bytes_used(0),
|
||||
_bottom(NULL),
|
||||
_top(NULL),
|
||||
_max(NULL) { }
|
||||
|
||||
virtual ~G1ArchiveAllocator() {
|
||||
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
||||
}
|
||||
|
||||
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||
|
||||
// Allocate memory for an individual object.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Return the memory ranges used in the current archive, after
|
||||
// aligning to the requested alignment.
|
||||
void complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes);
|
||||
|
||||
// The number of bytes allocated by this allocator.
|
||||
size_t used() {
|
||||
return _summary_bytes_used;
|
||||
}
|
||||
|
||||
// Clear the count of bytes allocated in prior G1 regions. This
|
||||
// must be done when recalculate_use is used to reset the counter
|
||||
// for the generic allocator, since it counts bytes in all G1
|
||||
// regions, including those still associated with this allocator.
|
||||
void clear_used() {
|
||||
_summary_bytes_used = 0;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_G1_G1BIASEDARRAY_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
// Implements the common base functionality for arrays that contain provisions
|
||||
@ -128,6 +129,14 @@ public:
|
||||
return biased_base()[biased_index];
|
||||
}
|
||||
|
||||
// Return the index of the element of the given array that covers the given
|
||||
// word in the heap.
|
||||
idx_t get_index_by_address(HeapWord* value) const {
|
||||
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
|
||||
this->verify_biased_index(biased_index);
|
||||
return biased_index - _bias;
|
||||
}
|
||||
|
||||
// Set the value of the array entry that corresponds to the given array.
|
||||
void set_by_address(HeapWord * address, T value) {
|
||||
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
|
||||
@ -135,6 +144,18 @@ public:
|
||||
biased_base()[biased_index] = value;
|
||||
}
|
||||
|
||||
// Set the value of all array entries that correspond to addresses
|
||||
// in the specified MemRegion.
|
||||
void set_by_address(MemRegion range, T value) {
|
||||
idx_t biased_start = ((uintptr_t)range.start()) >> this->shift_by();
|
||||
idx_t biased_last = ((uintptr_t)range.last()) >> this->shift_by();
|
||||
this->verify_biased_index(biased_start);
|
||||
this->verify_biased_index(biased_last);
|
||||
for (idx_t i = biased_start; i <= biased_last; i++) {
|
||||
biased_base()[i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
// Returns the address of the element the given address maps to
|
||||
T* address_mapped_to(HeapWord* address) {
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ErgoVerbose.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
@ -404,7 +405,7 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
|
||||
// can move in an incremental collection.
|
||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return !hr->is_humongous();
|
||||
return !hr->is_pinned();
|
||||
}
|
||||
|
||||
// Private methods.
|
||||
@ -907,6 +908,207 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::begin_archive_alloc_range() {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
if (_archive_allocator == NULL) {
|
||||
_archive_allocator = G1ArchiveAllocator::create_allocator(this);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
|
||||
// Allocations in archive regions cannot be of a size that would be considered
|
||||
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
|
||||
// may be different at archive-restore time.
|
||||
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||
if (is_archive_alloc_too_large(word_size)) {
|
||||
return NULL;
|
||||
}
|
||||
return _archive_allocator->archive_mem_allocate(word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||
|
||||
// Call complete_archive to do the real work, filling in the MemRegion
|
||||
// array with the archive regions.
|
||||
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
|
||||
delete _archive_allocator;
|
||||
_archive_allocator = NULL;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MutexLockerEx x(Heap_lock);
|
||||
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
// Temporarily disable pretouching of heap pages. This interface is used
|
||||
// when mmap'ing archived heap data in, so pre-touching is wasted.
|
||||
FlagSetting fs(AlwaysPreTouch, false);
|
||||
|
||||
// Enable archive object checking in G1MarkSweep. We have to let it know
|
||||
// about each archive range, so that objects in those ranges aren't marked.
|
||||
G1MarkSweep::enable_archive_object_check();
|
||||
|
||||
// For each specified MemRegion range, allocate the corresponding G1
|
||||
// regions and mark them as archive regions. We expect the ranges in
|
||||
// ascending starting address order, without overlap.
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
MemRegion curr_range = ranges[i];
|
||||
HeapWord* start_address = curr_range.start();
|
||||
size_t word_size = curr_range.word_size();
|
||||
HeapWord* last_address = curr_range.last();
|
||||
size_t commits = 0;
|
||||
|
||||
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
guarantee(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
prev_last_addr = last_address;
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
// range ended, and adjust the start address so we don't try to allocate
|
||||
// the same region again. If the current range is entirely within that
|
||||
// region, skip it, just adjusting the recorded top.
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
|
||||
start_address = start_region->end();
|
||||
if (start_address > last_address) {
|
||||
_allocator->increase_used(word_size * HeapWordSize);
|
||||
start_region->set_top(last_address + 1);
|
||||
continue;
|
||||
}
|
||||
start_region->set_top(start_address);
|
||||
curr_range = MemRegion(start_address, last_address + 1);
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
}
|
||||
|
||||
// Perform the actual region allocation, exiting if it fails.
|
||||
// Then note how much new space we have allocated.
|
||||
if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
|
||||
return false;
|
||||
}
|
||||
_allocator->increase_used(word_size * HeapWordSize);
|
||||
if (commits != 0) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap expansion",
|
||||
ergo_format_reason("allocate archive regions")
|
||||
ergo_format_byte("total size"),
|
||||
HeapRegion::GrainWords * HeapWordSize * commits);
|
||||
}
|
||||
|
||||
// Mark each G1 region touched by the range as archive, add it to the old set,
|
||||
// and set the allocation context and top.
|
||||
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
prev_last_region = last_region;
|
||||
|
||||
while (curr_region != NULL) {
|
||||
assert(curr_region->is_empty() && !curr_region->is_pinned(),
|
||||
err_msg("Region already in use (index %u)", curr_region->hrm_index()));
|
||||
_hr_printer.alloc(curr_region, G1HRPrinter::Archive);
|
||||
curr_region->set_allocation_context(AllocationContext::system());
|
||||
curr_region->set_archive();
|
||||
_old_set.add(curr_region);
|
||||
if (curr_region != last_region) {
|
||||
curr_region->set_top(curr_region->end());
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region->set_top(last_address + 1);
|
||||
curr_region = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Notify mark-sweep of the archive range.
|
||||
G1MarkSweep::mark_range_archive(curr_range);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord *prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
// For each MemRegion, create filler objects, if needed, in the G1 regions
|
||||
// that contain the address range. The address range actually within the
|
||||
// MemRegion will not be modified. That is assumed to have been initialized
|
||||
// elsewhere, probably via an mmap of archived heap data.
|
||||
MutexLockerEx x(Heap_lock);
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
HeapWord* start_address = ranges[i].start();
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
HeapWord* bottom_address = start_region->bottom();
|
||||
|
||||
// Check for a range beginning in the same region in which the
|
||||
// previous one ended.
|
||||
if (start_region == prev_last_region) {
|
||||
bottom_address = prev_last_addr + 1;
|
||||
}
|
||||
|
||||
// Verify that the regions were all marked as archive regions by
|
||||
// alloc_archive_regions.
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
prev_last_addr = last_address;
|
||||
prev_last_region = last_region;
|
||||
|
||||
// Fill the memory below the allocated range with dummy object(s),
|
||||
// if the region bottom does not match the range start, or if the previous
|
||||
// range ended within the same G1 region, and there is a gap.
|
||||
if (start_address != bottom_address) {
|
||||
size_t fill_size = pointer_delta(start_address, bottom_address);
|
||||
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
|
||||
_allocator->increase_used(fill_size * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
@ -1039,7 +1241,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
} else {
|
||||
HeapWord* result = humongous_obj_allocate(word_size, context);
|
||||
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
g1_policy()->set_initiate_conc_mark_if_possible();
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1131,6 +1333,8 @@ public:
|
||||
}
|
||||
} else if (hr->is_continues_humongous()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||
} else if (hr->is_archive()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::Archive);
|
||||
} else if (hr->is_old()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
||||
} else {
|
||||
@ -1250,7 +1454,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
g1_policy()->stop_incremental_cset_building();
|
||||
|
||||
tear_down_region_sets(false /* free_list_only */);
|
||||
g1_policy()->set_gcs_are_young(true);
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
|
||||
// See the comments in g1CollectedHeap.hpp and
|
||||
// G1CollectedHeap::ref_processing_init() about
|
||||
@ -1714,16 +1918,15 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_ref_processor_stw(NULL),
|
||||
_bot_shared(NULL),
|
||||
_evac_failure_scan_stack(NULL),
|
||||
_mark_in_progress(false),
|
||||
_cg1r(NULL),
|
||||
_g1mm(NULL),
|
||||
_refine_cte_cl(NULL),
|
||||
_full_collection(false),
|
||||
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
|
||||
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
|
||||
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
||||
_humongous_reclaim_candidates(),
|
||||
_has_humongous_reclaim_candidates(false),
|
||||
_archive_allocator(NULL),
|
||||
_free_regions_coming(false),
|
||||
_young_list(new YoungList(this)),
|
||||
_gc_time_stamp(0),
|
||||
@ -1733,7 +1936,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_surviving_young_words(NULL),
|
||||
_old_marking_cycles_started(0),
|
||||
_old_marking_cycles_completed(0),
|
||||
_concurrent_cycle_started(false),
|
||||
_heap_summary_sent(false),
|
||||
_in_cset_fast_test(),
|
||||
_dirty_cards_region_list(NULL),
|
||||
@ -1750,9 +1952,13 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_workers->initialize_workers();
|
||||
|
||||
_allocator = G1Allocator::create_allocator(this);
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
|
||||
|
||||
int n_queues = (int)ParallelGCThreads;
|
||||
// Override the default _filler_array_max_size so that no humongous filler
|
||||
// objects are created.
|
||||
_filler_array_max_size = _humongous_object_threshold_in_words;
|
||||
|
||||
uint n_queues = ParallelGCThreads;
|
||||
_task_queues = new RefToScanQueueSet(n_queues);
|
||||
|
||||
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
||||
@ -1762,7 +1968,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
|
||||
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
|
||||
|
||||
for (int i = 0; i < n_queues; i++) {
|
||||
for (uint i = 0; i < n_queues; i++) {
|
||||
RefToScanQueue* q = new RefToScanQueue();
|
||||
q->initialize();
|
||||
_task_queues->register_queue(i, q);
|
||||
@ -2064,11 +2270,11 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
// mt processing
|
||||
(uint) ParallelGCThreads,
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
|
||||
// mt discovery
|
||||
(uint) MAX2(ParallelGCThreads, ConcGCThreads),
|
||||
MAX2(ParallelGCThreads, ConcGCThreads),
|
||||
// degree of mt discovery
|
||||
false,
|
||||
// Reference discovery is not atomic
|
||||
@ -2081,11 +2287,11 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
// mt processing
|
||||
(uint) ParallelGCThreads,
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1),
|
||||
// mt discovery
|
||||
(uint) ParallelGCThreads,
|
||||
ParallelGCThreads,
|
||||
// degree of mt discovery
|
||||
true,
|
||||
// Reference discovery is atomic
|
||||
@ -2165,7 +2371,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
size_t G1CollectedHeap::used() const {
|
||||
return _allocator->used();
|
||||
size_t result = _allocator->used();
|
||||
if (_archive_allocator != NULL) {
|
||||
result += _archive_allocator->used();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::used_unlocked() const {
|
||||
@ -2288,7 +2498,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
|
||||
_concurrent_cycle_started = true;
|
||||
collector_state()->set_concurrent_cycle_started(true);
|
||||
_gc_timer_cm->register_gc_start(start_time);
|
||||
|
||||
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
|
||||
@ -2296,7 +2506,7 @@ void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_concurrent_cycle_end() {
|
||||
if (_concurrent_cycle_started) {
|
||||
if (collector_state()->concurrent_cycle_started()) {
|
||||
if (_cm->has_aborted()) {
|
||||
_gc_tracer_cm->report_concurrent_mode_failure();
|
||||
}
|
||||
@ -2305,13 +2515,13 @@ void G1CollectedHeap::register_concurrent_cycle_end() {
|
||||
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
|
||||
|
||||
// Clear state variables to prepare for the next concurrent cycle.
|
||||
_concurrent_cycle_started = false;
|
||||
collector_state()->set_concurrent_cycle_started(false);
|
||||
_heap_summary_sent = false;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
|
||||
if (_concurrent_cycle_started) {
|
||||
if (collector_state()->concurrent_cycle_started()) {
|
||||
// This function can be called when:
|
||||
// the cleanup pause is run
|
||||
// the concurrent cycle is aborted before the cleanup pause.
|
||||
@ -2325,22 +2535,6 @@ void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
|
||||
}
|
||||
}
|
||||
|
||||
G1YCType G1CollectedHeap::yc_type() {
|
||||
bool is_young = g1_policy()->gcs_are_young();
|
||||
bool is_initial_mark = g1_policy()->during_initial_mark_pause();
|
||||
bool is_during_mark = mark_in_progress();
|
||||
|
||||
if (is_initial_mark) {
|
||||
return InitialMark;
|
||||
} else if (is_during_mark) {
|
||||
return DuringMark;
|
||||
} else if (is_young) {
|
||||
return Normal;
|
||||
} else {
|
||||
return Mixed;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
assert_heap_not_locked();
|
||||
|
||||
@ -2594,7 +2788,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
||||
|
||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||
while (result != NULL && result->is_humongous()) {
|
||||
while (result != NULL && result->is_pinned()) {
|
||||
result = _hrm.next_region_in_heap(result);
|
||||
}
|
||||
return result;
|
||||
@ -2902,6 +3096,31 @@ public:
|
||||
size_t live_bytes() { return _live_bytes; }
|
||||
};
|
||||
|
||||
class VerifyArchiveOopClosure: public OopClosure {
|
||||
public:
|
||||
VerifyArchiveOopClosure(HeapRegion *hr) { }
|
||||
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
|
||||
err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||
p2i(p), p2i(obj)));
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyArchiveRegionClosure: public ObjectClosure {
|
||||
public:
|
||||
VerifyArchiveRegionClosure(HeapRegion *hr) { }
|
||||
// Verify that all object pointers are to archive regions.
|
||||
void do_object(oop o) {
|
||||
VerifyArchiveOopClosure checkOop(NULL);
|
||||
assert(o != NULL, "Should not be here for NULL oops");
|
||||
o->oop_iterate_no_header(&checkOop);
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyRegionClosure: public HeapRegionClosure {
|
||||
private:
|
||||
bool _par;
|
||||
@ -2921,6 +3140,13 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
// For archive regions, verify there are no heap pointers to
|
||||
// non-pinned regions. For all others, verify liveness info.
|
||||
if (r->is_archive()) {
|
||||
VerifyArchiveRegionClosure verify_oop_pointers(r);
|
||||
r->object_iterate(&verify_oop_pointers);
|
||||
return true;
|
||||
}
|
||||
if (!r->is_continues_humongous()) {
|
||||
bool failures = false;
|
||||
r->verify(_vo, &failures);
|
||||
@ -3105,7 +3331,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
@ -3116,7 +3342,10 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
case VerifyOption_G1UseMarkWord: {
|
||||
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
|
||||
return !obj->is_gc_marked() && !hr->is_archive();
|
||||
}
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
@ -3149,7 +3378,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
|
||||
st->cr();
|
||||
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
|
||||
"HS=humongous(starts), HC=humongous(continues), "
|
||||
"CS=collection set, F=free, TS=gc time stamp, "
|
||||
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
|
||||
"PTAMS=previous top-at-mark-start, "
|
||||
"NTAMS=next top-at-mark-start)");
|
||||
PrintRegionClosure blk(st);
|
||||
@ -3251,6 +3480,28 @@ void G1CollectedHeap::print_all_rsets() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
|
||||
YoungList* young_list = heap()->young_list();
|
||||
|
||||
size_t eden_used_bytes = young_list->eden_used_bytes();
|
||||
size_t survivor_used_bytes = young_list->survivor_used_bytes();
|
||||
|
||||
size_t eden_capacity_bytes =
|
||||
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
|
||||
|
||||
VirtualSpaceSummary heap_summary = create_heap_space_summary();
|
||||
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
|
||||
const G1HeapSummary& heap_summary = create_g1_heap_summary();
|
||||
gc_tracer->report_gc_heap_summary(when, heap_summary);
|
||||
|
||||
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
|
||||
gc_tracer->report_metaspace_summary(when, metaspace_summary);
|
||||
}
|
||||
|
||||
|
||||
G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
CollectedHeap* heap = Universe::heap();
|
||||
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
|
||||
@ -3587,8 +3838,8 @@ void G1CollectedHeap::log_gc_header() {
|
||||
gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
|
||||
|
||||
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
|
||||
.append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
|
||||
.append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
|
||||
.append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
|
||||
.append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
|
||||
|
||||
gclog_or_tty->print("[%s", (const char*)gc_cause_str);
|
||||
}
|
||||
@ -3616,6 +3867,21 @@ void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
|
||||
gclog_or_tty->flush();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::wait_for_root_region_scanning() {
|
||||
double scan_wait_start = os::elapsedTime();
|
||||
// We have to wait until the CM threads finish scanning the
|
||||
// root regions as it's the only way to ensure that all the
|
||||
// objects on them have been correctly scanned before we start
|
||||
// moving them during the GC.
|
||||
bool waited = _cm->root_regions()->wait_until_scan_finished();
|
||||
double wait_time_ms = 0.0;
|
||||
if (waited) {
|
||||
double scan_wait_end = os::elapsedTime();
|
||||
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
|
||||
}
|
||||
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
|
||||
}
|
||||
|
||||
bool
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
@ -3632,6 +3898,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||
ResourceMark rm;
|
||||
|
||||
wait_for_root_region_scanning();
|
||||
|
||||
G1Log::update_level();
|
||||
print_heap_before_gc();
|
||||
trace_heap_before_gc(_gc_tracer_stw);
|
||||
@ -3645,29 +3913,29 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->decide_on_conc_mark_initiation();
|
||||
|
||||
// We do not allow initial-mark to be piggy-backed on a mixed GC.
|
||||
assert(!g1_policy()->during_initial_mark_pause() ||
|
||||
g1_policy()->gcs_are_young(), "sanity");
|
||||
assert(!collector_state()->during_initial_mark_pause() ||
|
||||
collector_state()->gcs_are_young(), "sanity");
|
||||
|
||||
// We also do not allow mixed GCs during marking.
|
||||
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
|
||||
assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
|
||||
|
||||
// Record whether this pause is an initial mark. When the current
|
||||
// thread has completed its logging output and it's safe to signal
|
||||
// the CM thread, the flag's value in the policy has been reset.
|
||||
bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
|
||||
bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
|
||||
|
||||
// Inner scope for scope based logging, timers, and stats collection
|
||||
{
|
||||
EvacuationInfo evacuation_info;
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
// We are about to start a marking cycle, so we increment the
|
||||
// full collection counter.
|
||||
increment_old_marking_cycles_started();
|
||||
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
|
||||
}
|
||||
|
||||
_gc_tracer_stw->report_yc_type(yc_type());
|
||||
_gc_tracer_stw->report_yc_type(collector_state()->yc_type());
|
||||
|
||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||
|
||||
@ -3677,7 +3945,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
workers()->set_active_workers(active_workers);
|
||||
|
||||
double pause_start_sec = os::elapsedTime();
|
||||
g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
|
||||
g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress());
|
||||
log_gc_header();
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
@ -3753,25 +4021,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
g1_policy()->record_collection_pause_start(sample_start_time_sec);
|
||||
|
||||
double scan_wait_start = os::elapsedTime();
|
||||
// We have to wait until the CM threads finish scanning the
|
||||
// root regions as it's the only way to ensure that all the
|
||||
// objects on them have been correctly scanned before we start
|
||||
// moving them during the GC.
|
||||
bool waited = _cm->root_regions()->wait_until_scan_finished();
|
||||
double wait_time_ms = 0.0;
|
||||
if (waited) {
|
||||
double scan_wait_end = os::elapsedTime();
|
||||
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
|
||||
}
|
||||
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
|
||||
_young_list->print();
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
concurrent_mark()->checkpointRootsInitialPre();
|
||||
}
|
||||
|
||||
@ -3848,6 +4103,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_allocator->set_used(recalculate_used());
|
||||
if (_archive_allocator != NULL) {
|
||||
_archive_allocator->clear_used();
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
|
||||
@ -3859,12 +4117,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
|
||||
}
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
// We have to do this before we notify the CM threads that
|
||||
// they can start working to make sure that all the
|
||||
// appropriate initialization is done on the CM object.
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
collector_state()->set_mark_in_progress(true);
|
||||
// Note that we don't actually trigger the CM thread at
|
||||
// this point. We do that later when we're sure that
|
||||
// the current thread has completed its logging output.
|
||||
@ -4343,7 +4601,7 @@ public:
|
||||
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
|
||||
bool only_young = _g1h->g1_policy()->gcs_are_young();
|
||||
bool only_young = _g1h->collector_state()->gcs_are_young();
|
||||
|
||||
// Non-IM young GC.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
|
||||
@ -4369,7 +4627,7 @@ public:
|
||||
|
||||
bool trace_metadata = false;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
strong_root_cl = &scan_mark_root_cl;
|
||||
strong_cld_cl = &scan_mark_cld_cl;
|
||||
@ -5021,7 +5279,7 @@ public:
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
}
|
||||
@ -5122,7 +5380,7 @@ public:
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
}
|
||||
@ -5234,7 +5492,7 @@ void G1CollectedHeap::process_discovered_references() {
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
}
|
||||
@ -5342,7 +5600,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
G1RootProcessor root_processor(this, n_workers);
|
||||
G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
|
||||
// InitialMark needs claim bits to keep track of the marked-through CLDs.
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
}
|
||||
|
||||
@ -5598,7 +5856,7 @@ bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
|
||||
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
|
||||
// if we happen to be in that state.
|
||||
if (mark_in_progress() || !_cmThread->in_progress()) {
|
||||
if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
|
||||
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
|
||||
}
|
||||
if (!res_p || !res_n) {
|
||||
@ -6169,13 +6427,18 @@ public:
|
||||
assert(!r->is_young(), "we should not come across young regions");
|
||||
|
||||
if (r->is_humongous()) {
|
||||
// We ignore humongous regions, we left the humongous set unchanged
|
||||
// We ignore humongous regions. We left the humongous set unchanged.
|
||||
} else {
|
||||
// Objects that were compacted would have ended up on regions
|
||||
// that were previously old or free.
|
||||
// that were previously old or free. Archive regions (which are
|
||||
// old) will not have been touched.
|
||||
assert(r->is_free() || r->is_old(), "invariant");
|
||||
// We now consider them old, so register as such.
|
||||
r->set_old();
|
||||
// We now consider them old, so register as such. Leave
|
||||
// archive regions set that way, however, while still adding
|
||||
// them to the old set.
|
||||
if (!r->is_archive()) {
|
||||
r->set_old();
|
||||
}
|
||||
_old_set->add(r);
|
||||
}
|
||||
_total_used += r->used();
|
||||
@ -6201,6 +6464,9 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
|
||||
if (!free_list_only) {
|
||||
_allocator->set_used(cl.total_used());
|
||||
if (_archive_allocator != NULL) {
|
||||
_archive_allocator->clear_used();
|
||||
}
|
||||
}
|
||||
assert(_allocator->used_unlocked() == recalculate_used(),
|
||||
err_msg("inconsistent _allocator->used_unlocked(), "
|
||||
@ -6279,7 +6545,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
|
||||
check_bitmaps("Old Region Allocation", new_alloc_region);
|
||||
}
|
||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
bool during_im = collector_state()->during_initial_mark_pause();
|
||||
new_alloc_region->note_start_of_copying(during_im);
|
||||
return new_alloc_region;
|
||||
}
|
||||
@ -6290,7 +6556,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes,
|
||||
InCSetState dest) {
|
||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
bool during_im = collector_state()->during_initial_mark_pause();
|
||||
alloc_region->note_end_of_copying(during_im);
|
||||
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
|
||||
if (dest.is_young()) {
|
||||
@ -6301,6 +6567,25 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
_hr_printer.retire(alloc_region);
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
bool expanded = false;
|
||||
uint index = _hrm.find_highest_free(&expanded);
|
||||
|
||||
if (index != G1_NO_HRM_INDEX) {
|
||||
if (expanded) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap expansion",
|
||||
ergo_format_reason("requested address range outside heap bounds")
|
||||
ergo_format_byte("region size"),
|
||||
HeapRegion::GrainWords * HeapWordSize);
|
||||
}
|
||||
_hrm.allocate_free_regions_starting_at(index, 1);
|
||||
return region_at(index);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// Heap region set verification
|
||||
|
||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||
@ -6337,6 +6622,9 @@ public:
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
// There are no other valid region types. Check for one invalid
|
||||
// one we can identify: pinned without old or humongous set.
|
||||
assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return false;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/g1/g1AllocationContext.hpp"
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1HRPrinter.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1MonitoringSupport.hpp"
|
||||
@ -187,6 +188,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
friend class SurvivorGCAllocRegion;
|
||||
friend class OldGCAllocRegion;
|
||||
friend class G1Allocator;
|
||||
friend class G1ArchiveAllocator;
|
||||
|
||||
// Closures used in implementation.
|
||||
friend class G1ParScanThreadState;
|
||||
@ -249,6 +251,9 @@ private:
|
||||
// Class that handles the different kinds of allocations.
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// Class that handles archive allocation ranges.
|
||||
G1ArchiveAllocator* _archive_allocator;
|
||||
|
||||
// Statistics for each allocation context
|
||||
AllocationContextStats _allocation_context_stats;
|
||||
|
||||
@ -328,6 +333,9 @@ private:
|
||||
// (d) cause == _g1_humongous_allocation
|
||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// indicates whether we are in young or mixed GC mode
|
||||
G1CollectorState _collector_state;
|
||||
|
||||
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
|
||||
// concurrent cycles) we have started.
|
||||
volatile uint _old_marking_cycles_started;
|
||||
@ -336,7 +344,6 @@ private:
|
||||
// concurrent cycles) we have completed.
|
||||
volatile uint _old_marking_cycles_completed;
|
||||
|
||||
bool _concurrent_cycle_started;
|
||||
bool _heap_summary_sent;
|
||||
|
||||
// This is a non-product method that is helpful for testing. It is
|
||||
@ -367,6 +374,8 @@ private:
|
||||
void log_gc_header();
|
||||
void log_gc_footer(double pause_time_sec);
|
||||
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
|
||||
@ -571,6 +580,10 @@ protected:
|
||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes, InCSetState dest);
|
||||
|
||||
// Allocate the highest free region in the reserved heap. This will commit
|
||||
// regions as necessary.
|
||||
HeapRegion* alloc_highest_free_region();
|
||||
|
||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||
// inspection request and should collect the entire heap
|
||||
// - if clear_all_soft_refs is true, all soft references should be
|
||||
@ -701,8 +714,6 @@ public:
|
||||
void register_concurrent_cycle_end();
|
||||
void trace_heap_after_concurrent_cycle();
|
||||
|
||||
G1YCType yc_type();
|
||||
|
||||
G1HRPrinter* hr_printer() { return &_hr_printer; }
|
||||
|
||||
// Frees a non-humongous region by initializing its contents and
|
||||
@ -728,6 +739,44 @@ public:
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool par);
|
||||
|
||||
// Facility for allocating in 'archive' regions in high heap memory and
|
||||
// recording the allocated ranges. These should all be called from the
|
||||
// VM thread at safepoints, without the heap lock held. They can be used
|
||||
// to create and archive a set of heap regions which can be mapped at the
|
||||
// same fixed addresses in a subsequent JVM invocation.
|
||||
void begin_archive_alloc_range();
|
||||
|
||||
// Check if the requested size would be too large for an archive allocation.
|
||||
bool is_archive_alloc_too_large(size_t word_size);
|
||||
|
||||
// Allocate memory of the requested size from the archive region. This will
|
||||
// return NULL if the size is too large or if no memory is available. It
|
||||
// does not trigger a garbage collection.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Optionally aligns the end address and returns the allocated ranges in
|
||||
// an array of MemRegions in order of ascending addresses.
|
||||
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes = 0);
|
||||
|
||||
// Facility for allocating a fixed range within the heap and marking
|
||||
// the containing regions as 'archive'. For use at JVM init time, when the
|
||||
// caller may mmap archived heap data at the specified range(s).
|
||||
// Verify that the MemRegions specified in the argument array are within the
|
||||
// reserved heap.
|
||||
bool check_archive_addresses(MemRegion* range, size_t count);
|
||||
|
||||
// Commit the appropriate G1 regions containing the specified MemRegions
|
||||
// and mark them as 'archive' regions. The regions in the array must be
|
||||
// non-overlapping and in order of ascending address.
|
||||
bool alloc_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
// Insert any required filler objects in the G1 regions around the specified
|
||||
// ranges to make the regions parseable. This must be called after
|
||||
// alloc_archive_regions, and after class loading has occurred.
|
||||
void fill_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
@ -756,6 +805,8 @@ protected:
|
||||
bool* succeeded,
|
||||
GCCause::Cause gc_cause);
|
||||
|
||||
void wait_for_root_region_scanning();
|
||||
|
||||
// The guts of the incremental collection pause, executed by the vm
|
||||
// thread. It returns false if it is unable to do the collection due
|
||||
// to the GC locker being active, true otherwise
|
||||
@ -791,7 +842,6 @@ protected:
|
||||
// The concurrent marker (and the thread it runs in.)
|
||||
ConcurrentMark* _cm;
|
||||
ConcurrentMarkThread* _cmThread;
|
||||
bool _mark_in_progress;
|
||||
|
||||
// The concurrent refiner.
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
@ -1019,6 +1069,8 @@ public:
|
||||
return CollectedHeap::G1CollectedHeap;
|
||||
}
|
||||
|
||||
G1CollectorState* collector_state() { return &_collector_state; }
|
||||
|
||||
// The current policy object for the collector.
|
||||
G1CollectorPolicy* g1_policy() const { return _g1_policy; }
|
||||
|
||||
@ -1391,6 +1443,11 @@ public:
|
||||
return word_size > _humongous_object_threshold_in_words;
|
||||
}
|
||||
|
||||
// Returns the humongous threshold for a specific region size
|
||||
static size_t humongous_threshold_for(size_t region_size) {
|
||||
return (region_size / 2);
|
||||
}
|
||||
|
||||
// Update mod union table with the set of dirty cards.
|
||||
void updateModUnion();
|
||||
|
||||
@ -1399,17 +1456,6 @@ public:
|
||||
// bits.
|
||||
void markModUnionRange(MemRegion mr);
|
||||
|
||||
// Records the fact that a marking phase is no longer in progress.
|
||||
void set_marking_complete() {
|
||||
_mark_in_progress = false;
|
||||
}
|
||||
void set_marking_started() {
|
||||
_mark_in_progress = true;
|
||||
}
|
||||
bool mark_in_progress() {
|
||||
return _mark_in_progress;
|
||||
}
|
||||
|
||||
// Print the maximum heap capacity.
|
||||
virtual size_t max_capacity() const;
|
||||
|
||||
@ -1448,21 +1494,23 @@ public:
|
||||
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs. An object is dead
|
||||
// iff a) it was not allocated since the last mark and b) it
|
||||
// is not marked.
|
||||
// iff a) it was not allocated since the last mark, b) it
|
||||
// is not marked, and c) it is not in an archive region.
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||
!isMarkedPrev(obj);
|
||||
!isMarkedPrev(obj) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
// This function returns true when an object has been
|
||||
// around since the previous marking and hasn't yet
|
||||
// been marked during this marking.
|
||||
// been marked during this marking, and is not in an archive region.
|
||||
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_next_marking(obj) &&
|
||||
!isMarkedNext(obj);
|
||||
!isMarkedNext(obj) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
// Determine if an object is dead, given only the object itself.
|
||||
@ -1522,14 +1570,6 @@ public:
|
||||
void redirty_logged_cards();
|
||||
// Verification
|
||||
|
||||
// The following is just to alert the verification code
|
||||
// that a full collection has occurred and that the
|
||||
// remembered sets are no longer up to date.
|
||||
bool _full_collection;
|
||||
void set_full_collection() { _full_collection = true;}
|
||||
void clear_full_collection() {_full_collection = false;}
|
||||
bool full_collection() {return _full_collection;}
|
||||
|
||||
// Perform any cleanup actions necessary before allowing a verification.
|
||||
virtual void prepare_for_verify();
|
||||
|
||||
@ -1565,6 +1605,8 @@ public:
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const;
|
||||
|
||||
G1HeapSummary create_g1_heap_summary();
|
||||
|
||||
// Printing
|
||||
|
||||
virtual void print_on(outputStream* st) const;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
@ -288,9 +289,9 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
|
||||
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
|
||||
|
||||
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
|
||||
const bool gcs_are_young = g1_policy()->gcs_are_young();
|
||||
const bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
const bool during_marking = mark_in_progress();
|
||||
const bool gcs_are_young = collector_state()->gcs_are_young();
|
||||
const bool during_im = collector_state()->during_initial_mark_pause();
|
||||
const bool during_marking = collector_state()->mark_in_progress();
|
||||
|
||||
_evacuation_failure_alot_for_current_gc &=
|
||||
evacuation_failure_alot_for_gc_type(gcs_are_young,
|
||||
|
@ -107,22 +107,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
_pause_time_target_ms((double) MaxGCPauseMillis),
|
||||
|
||||
_gcs_are_young(true),
|
||||
|
||||
_during_marking(false),
|
||||
_in_marking_window(false),
|
||||
_in_marking_window_im(false),
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec(
|
||||
new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
|
||||
_recent_avg_pause_time_ratio(0.0),
|
||||
|
||||
_initiate_conc_mark_if_possible(false),
|
||||
_during_initial_mark_pause(false),
|
||||
_last_young_gc(false),
|
||||
_last_gc_was_young(false),
|
||||
|
||||
_eden_used_bytes_before_gc(0),
|
||||
_survivor_used_bytes_before_gc(0),
|
||||
_heap_used_bytes_before_gc(0),
|
||||
@ -334,6 +323,8 @@ void G1CollectorPolicy::post_heap_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
|
||||
_min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
@ -552,7 +543,7 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
|
||||
|
||||
uint young_list_target_length = 0;
|
||||
if (adaptive_young_list_length()) {
|
||||
if (gcs_are_young()) {
|
||||
if (collector_state()->gcs_are_young()) {
|
||||
young_list_target_length =
|
||||
calculate_young_list_target_length(rs_lengths,
|
||||
base_min_length,
|
||||
@ -594,7 +585,7 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) {
|
||||
assert(adaptive_young_list_length(), "pre-condition");
|
||||
assert(gcs_are_young(), "only call this for young GCs");
|
||||
assert(collector_state()->gcs_are_young(), "only call this for young GCs");
|
||||
|
||||
// In case some edge-condition makes the desired max length too small...
|
||||
if (desired_max_length <= desired_min_length) {
|
||||
@ -697,7 +688,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() {
|
||||
for (HeapRegion * r = _recorded_survivor_head;
|
||||
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
|
||||
r = r->get_next_young_region()) {
|
||||
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
|
||||
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
|
||||
}
|
||||
return survivor_regions_evac_time;
|
||||
}
|
||||
@ -782,7 +773,7 @@ void G1CollectorPolicy::record_full_collection_start() {
|
||||
_full_collection_start_sec = os::elapsedTime();
|
||||
record_heap_size_info_at_start(true /* full */);
|
||||
// Release the future to-space so that it is available for compaction into.
|
||||
_g1->set_full_collection();
|
||||
collector_state()->set_full_collection(true);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_full_collection_end() {
|
||||
@ -796,16 +787,16 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
|
||||
update_recent_gc_times(end_sec, full_gc_time_ms);
|
||||
|
||||
_g1->clear_full_collection();
|
||||
collector_state()->set_full_collection(false);
|
||||
|
||||
// "Nuke" the heuristics that control the young/mixed GC
|
||||
// transitions and make sure we start with young GCs after the Full GC.
|
||||
set_gcs_are_young(true);
|
||||
_last_young_gc = false;
|
||||
clear_initiate_conc_mark_if_possible();
|
||||
clear_during_initial_mark_pause();
|
||||
_in_marking_window = false;
|
||||
_in_marking_window_im = false;
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
collector_state()->set_last_young_gc(false);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
collector_state()->set_during_initial_mark_pause(false);
|
||||
collector_state()->set_in_marking_window(false);
|
||||
collector_state()->set_in_marking_window_im(false);
|
||||
|
||||
_short_lived_surv_rate_group->start_adding_regions();
|
||||
// also call this on any additional surv rate groups
|
||||
@ -845,7 +836,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
_collection_set_bytes_used_before = 0;
|
||||
_bytes_copied_during_gc = 0;
|
||||
|
||||
_last_gc_was_young = false;
|
||||
collector_state()->set_last_gc_was_young(false);
|
||||
|
||||
// do that for any other surv rate groups
|
||||
_short_lived_surv_rate_group->stop_adding_regions();
|
||||
@ -856,15 +847,15 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_init_end(double
|
||||
mark_init_elapsed_time_ms) {
|
||||
_during_marking = true;
|
||||
assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
|
||||
clear_during_initial_mark_pause();
|
||||
collector_state()->set_during_marking(true);
|
||||
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
|
||||
collector_state()->set_during_initial_mark_pause(false);
|
||||
_cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_remark_start() {
|
||||
_mark_remark_start_sec = os::elapsedTime();
|
||||
_during_marking = false;
|
||||
collector_state()->set_during_marking(false);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
|
||||
@ -882,8 +873,8 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
|
||||
_last_young_gc = true;
|
||||
_in_marking_window = false;
|
||||
collector_state()->set_last_young_gc(true);
|
||||
collector_state()->set_in_marking_window(false);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_pause() {
|
||||
@ -904,7 +895,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
|
||||
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
|
||||
|
||||
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
|
||||
if (gcs_are_young() && !_last_young_gc) {
|
||||
if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
|
||||
ergo_verbose5(ErgoConcCycles,
|
||||
"request concurrent cycle initiation",
|
||||
ergo_format_reason("occupancy higher than threshold")
|
||||
@ -959,14 +950,14 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
last_pause_included_initial_mark = during_initial_mark_pause();
|
||||
last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
|
||||
if (last_pause_included_initial_mark) {
|
||||
record_concurrent_mark_init_end(0.0);
|
||||
} else if (need_to_start_conc_mark("end of GC")) {
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
// this pause we decided to postpone it. That's OK.
|
||||
set_initiate_conc_mark_if_possible();
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
}
|
||||
|
||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
||||
@ -1028,37 +1019,37 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
}
|
||||
}
|
||||
|
||||
bool new_in_marking_window = _in_marking_window;
|
||||
bool new_in_marking_window = collector_state()->in_marking_window();
|
||||
bool new_in_marking_window_im = false;
|
||||
if (last_pause_included_initial_mark) {
|
||||
new_in_marking_window = true;
|
||||
new_in_marking_window_im = true;
|
||||
}
|
||||
|
||||
if (_last_young_gc) {
|
||||
if (collector_state()->last_young_gc()) {
|
||||
// This is supposed to to be the "last young GC" before we start
|
||||
// doing mixed GCs. Here we decide whether to start mixed GCs or not.
|
||||
|
||||
if (!last_pause_included_initial_mark) {
|
||||
if (next_gc_should_be_mixed("start mixed GCs",
|
||||
"do not start mixed GCs")) {
|
||||
set_gcs_are_young(false);
|
||||
collector_state()->set_gcs_are_young(false);
|
||||
}
|
||||
} else {
|
||||
ergo_verbose0(ErgoMixedGCs,
|
||||
"do not start mixed GCs",
|
||||
ergo_format_reason("concurrent cycle is about to start"));
|
||||
}
|
||||
_last_young_gc = false;
|
||||
collector_state()->set_last_young_gc(false);
|
||||
}
|
||||
|
||||
if (!_last_gc_was_young) {
|
||||
if (!collector_state()->last_gc_was_young()) {
|
||||
// This is a mixed GC. Here we decide whether to continue doing
|
||||
// mixed GCs or not.
|
||||
|
||||
if (!next_gc_should_be_mixed("continue mixed GCs",
|
||||
"do not continue mixed GCs")) {
|
||||
set_gcs_are_young(true);
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1077,7 +1068,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
double cost_per_entry_ms = 0.0;
|
||||
if (cards_scanned > 10) {
|
||||
cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
|
||||
if (_last_gc_was_young) {
|
||||
if (collector_state()->last_gc_was_young()) {
|
||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
} else {
|
||||
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
@ -1087,7 +1078,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
if (_max_rs_lengths > 0) {
|
||||
double cards_per_entry_ratio =
|
||||
(double) cards_scanned / (double) _max_rs_lengths;
|
||||
if (_last_gc_was_young) {
|
||||
if (collector_state()->last_gc_was_young()) {
|
||||
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
} else {
|
||||
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
@ -1119,7 +1110,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
|
||||
if (copied_bytes > 0) {
|
||||
cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
|
||||
if (_in_marking_window) {
|
||||
if (collector_state()->in_marking_window()) {
|
||||
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
|
||||
} else {
|
||||
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
|
||||
@ -1162,8 +1153,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
||||
_rs_lengths_seq->add((double) _max_rs_lengths);
|
||||
}
|
||||
|
||||
_in_marking_window = new_in_marking_window;
|
||||
_in_marking_window_im = new_in_marking_window_im;
|
||||
collector_state()->set_in_marking_window(new_in_marking_window);
|
||||
collector_state()->set_in_marking_window_im(new_in_marking_window_im);
|
||||
_free_regions_at_end_of_collection = _g1->num_free_regions();
|
||||
update_young_list_target_length();
|
||||
|
||||
@ -1301,7 +1292,7 @@ double
|
||||
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
|
||||
size_t rs_length = predict_rs_length_diff();
|
||||
size_t card_num;
|
||||
if (gcs_are_young()) {
|
||||
if (collector_state()->gcs_are_young()) {
|
||||
card_num = predict_young_card_num(rs_length);
|
||||
} else {
|
||||
card_num = predict_non_young_card_num(rs_length);
|
||||
@ -1467,7 +1458,7 @@ bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
||||
ergo_format_reason("requested by GC cause")
|
||||
ergo_format_str("GC cause"),
|
||||
GCCause::to_string(gc_cause));
|
||||
set_initiate_conc_mark_if_possible();
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
return true;
|
||||
} else {
|
||||
ergo_verbose1(ErgoConcCycles,
|
||||
@ -1484,13 +1475,13 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// We are about to decide on whether this pause will be an
|
||||
// initial-mark pause.
|
||||
|
||||
// First, during_initial_mark_pause() should not be already set. We
|
||||
// First, collector_state()->during_initial_mark_pause() should not be already set. We
|
||||
// will set it here if we have to. However, it should be cleared by
|
||||
// the end of the pause (it's only set for the duration of an
|
||||
// initial-mark pause).
|
||||
assert(!during_initial_mark_pause(), "pre-condition");
|
||||
assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
|
||||
|
||||
if (initiate_conc_mark_if_possible()) {
|
||||
if (collector_state()->initiate_conc_mark_if_possible()) {
|
||||
// We had noticed on a previous pause that the heap occupancy has
|
||||
// gone over the initiating threshold and we should start a
|
||||
// concurrent marking cycle. So we might initiate one.
|
||||
@ -1501,10 +1492,10 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// it has completed the last one. So we can go ahead and
|
||||
// initiate a new cycle.
|
||||
|
||||
set_during_initial_mark_pause();
|
||||
collector_state()->set_during_initial_mark_pause(true);
|
||||
// We do not allow mixed GCs during marking.
|
||||
if (!gcs_are_young()) {
|
||||
set_gcs_are_young(true);
|
||||
if (!collector_state()->gcs_are_young()) {
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
ergo_verbose0(ErgoMixedGCs,
|
||||
"end mixed GCs",
|
||||
ergo_format_reason("concurrent cycle is about to start"));
|
||||
@ -1512,7 +1503,7 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
|
||||
// And we can now clear initiate_conc_mark_if_possible() as
|
||||
// we've already acted on it.
|
||||
clear_initiate_conc_mark_if_possible();
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
|
||||
ergo_verbose0(ErgoConcCycles,
|
||||
"initiate concurrent cycle",
|
||||
@ -1686,7 +1677,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
|
||||
// retiring the current allocation region) or a concurrent
|
||||
// refine thread (RSet sampling).
|
||||
|
||||
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
|
||||
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
|
||||
size_t used_bytes = hr->used();
|
||||
_inc_cset_recorded_rs_lengths += rs_length;
|
||||
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
|
||||
@ -1721,7 +1712,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
|
||||
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
|
||||
|
||||
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
|
||||
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
|
||||
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
|
||||
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
|
||||
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
|
||||
|
||||
@ -1914,9 +1905,9 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
||||
ergo_format_ms("target pause time"),
|
||||
_pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
|
||||
|
||||
_last_gc_was_young = gcs_are_young() ? true : false;
|
||||
collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
|
||||
|
||||
if (_last_gc_was_young) {
|
||||
if (collector_state()->last_gc_was_young()) {
|
||||
_trace_young_gen_time_data.increment_young_collection_count();
|
||||
} else {
|
||||
_trace_young_gen_time_data.increment_mixed_collection_count();
|
||||
@ -1967,7 +1958,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
||||
// Set the start of the non-young choice time.
|
||||
double non_young_start_time_sec = young_end_time_sec;
|
||||
|
||||
if (!gcs_are_young()) {
|
||||
if (!collector_state()->gcs_are_young()) {
|
||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||
cset_chooser->verify();
|
||||
const uint min_old_cset_length = calc_min_old_cset_length();
|
||||
@ -2013,7 +2004,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
||||
break;
|
||||
}
|
||||
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
|
||||
if (check_time_remaining) {
|
||||
if (predicted_time_ms > time_remaining_ms) {
|
||||
// Too expensive for the current CSet.
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/collectionSetChooser.hpp"
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
|
||||
@ -193,9 +194,6 @@ private:
|
||||
|
||||
double _stop_world_start;
|
||||
|
||||
// indicates whether we are in young or mixed GC mode
|
||||
bool _gcs_are_young;
|
||||
|
||||
uint _young_list_target_length;
|
||||
uint _young_list_fixed_length;
|
||||
|
||||
@ -203,12 +201,6 @@ private:
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
uint _young_list_max_length;
|
||||
|
||||
bool _last_gc_was_young;
|
||||
|
||||
bool _during_marking;
|
||||
bool _in_marking_window;
|
||||
bool _in_marking_window_im;
|
||||
|
||||
SurvRateGroup* _short_lived_surv_rate_group;
|
||||
SurvRateGroup* _survivor_surv_rate_group;
|
||||
// add here any more surv rate groups
|
||||
@ -218,10 +210,6 @@ private:
|
||||
double _reserve_factor;
|
||||
uint _reserve_regions;
|
||||
|
||||
bool during_marking() {
|
||||
return _during_marking;
|
||||
}
|
||||
|
||||
enum PredictionConstants {
|
||||
TruncatedSeqLength = 10
|
||||
};
|
||||
@ -363,7 +351,7 @@ public:
|
||||
}
|
||||
|
||||
double predict_rs_scan_time_ms(size_t card_num) {
|
||||
if (gcs_are_young()) {
|
||||
if (collector_state()->gcs_are_young()) {
|
||||
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||
} else {
|
||||
return predict_mixed_rs_scan_time_ms(card_num);
|
||||
@ -390,7 +378,7 @@ public:
|
||||
}
|
||||
|
||||
double predict_object_copy_time_ms(size_t bytes_to_copy) {
|
||||
if (_in_marking_window && !_in_marking_window_im) {
|
||||
if (collector_state()->during_concurrent_mark()) {
|
||||
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
||||
} else {
|
||||
return (double) bytes_to_copy *
|
||||
@ -428,7 +416,7 @@ public:
|
||||
double predict_survivor_regions_evac_time();
|
||||
|
||||
void cset_regions_freed() {
|
||||
bool propagate = _last_gc_was_young && !_in_marking_window;
|
||||
bool propagate = collector_state()->should_propagate();
|
||||
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
|
||||
_survivor_surv_rate_group->all_surviving_words_recorded(propagate);
|
||||
// also call it on any more surv rate groups
|
||||
@ -552,33 +540,6 @@ private:
|
||||
return _recent_avg_pause_time_ratio;
|
||||
}
|
||||
|
||||
// At the end of a pause we check the heap occupancy and we decide
|
||||
// whether we will start a marking cycle during the next pause. If
|
||||
// we decide that we want to do that, we will set this parameter to
|
||||
// true. So, this parameter will stay true between the end of a
|
||||
// pause and the beginning of a subsequent pause (not necessarily
|
||||
// the next one, see the comments on the next field) when we decide
|
||||
// that we will indeed start a marking cycle and do the initial-mark
|
||||
// work.
|
||||
volatile bool _initiate_conc_mark_if_possible;
|
||||
|
||||
// If initiate_conc_mark_if_possible() is set at the beginning of a
|
||||
// pause, it is a suggestion that the pause should start a marking
|
||||
// cycle by doing the initial-mark work. However, it is possible
|
||||
// that the concurrent marking thread is still finishing up the
|
||||
// previous marking cycle (e.g., clearing the next marking
|
||||
// bitmap). If that is the case we cannot start a new cycle and
|
||||
// we'll have to wait for the concurrent marking thread to finish
|
||||
// what it is doing. In this case we will postpone the marking cycle
|
||||
// initiation decision for the next pause. When we eventually decide
|
||||
// to start a cycle, we will set _during_initial_mark_pause which
|
||||
// will stay true until the end of the initial-mark pause and it's
|
||||
// the condition that indicates that a pause is doing the
|
||||
// initial-mark work.
|
||||
volatile bool _during_initial_mark_pause;
|
||||
|
||||
bool _last_young_gc;
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
double _cur_mark_stop_world_time_ms;
|
||||
@ -647,6 +608,8 @@ public:
|
||||
return CollectorPolicy::G1CollectorPolicyKind;
|
||||
}
|
||||
|
||||
G1CollectorState* collector_state();
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
|
||||
// Check the current value of the young list RSet lengths and
|
||||
@ -786,14 +749,6 @@ public:
|
||||
void print_collection_set(HeapRegion* list_head, outputStream* st);
|
||||
#endif // !PRODUCT
|
||||
|
||||
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
|
||||
void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
|
||||
void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
|
||||
|
||||
bool during_initial_mark_pause() { return _during_initial_mark_pause; }
|
||||
void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
|
||||
void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
|
||||
|
||||
// This sets the initiate_conc_mark_if_possible() flag to start a
|
||||
// new cycle, as long as we are not already in one. It's best if it
|
||||
// is called during a safepoint when the test whether a cycle is in
|
||||
@ -837,13 +792,6 @@ public:
|
||||
return _young_list_max_length;
|
||||
}
|
||||
|
||||
bool gcs_are_young() {
|
||||
return _gcs_are_young;
|
||||
}
|
||||
void set_gcs_are_young(bool gcs_are_young) {
|
||||
_gcs_are_young = gcs_are_young;
|
||||
}
|
||||
|
||||
bool adaptive_young_list_length() {
|
||||
return _young_gen_sizer->adaptive_young_list_length();
|
||||
}
|
||||
|
141
hotspot/src/share/vm/gc/g1/g1CollectorState.hpp
Normal file
141
hotspot/src/share/vm/gc/g1/g1CollectorState.hpp
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
|
||||
#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
|
||||
// Various state variables that indicate
|
||||
// the phase of the G1 collection.
|
||||
class G1CollectorState VALUE_OBJ_CLASS_SPEC {
|
||||
// Indicates whether we are in "full young" or "mixed" GC mode.
|
||||
bool _gcs_are_young;
|
||||
// Was the last GC "young"?
|
||||
bool _last_gc_was_young;
|
||||
// Is this the "last young GC" before we start doing mixed GCs?
|
||||
// Set after a concurrent mark has completed.
|
||||
bool _last_young_gc;
|
||||
|
||||
// If initiate_conc_mark_if_possible() is set at the beginning of a
|
||||
// pause, it is a suggestion that the pause should start a marking
|
||||
// cycle by doing the initial-mark work. However, it is possible
|
||||
// that the concurrent marking thread is still finishing up the
|
||||
// previous marking cycle (e.g., clearing the next marking
|
||||
// bitmap). If that is the case we cannot start a new cycle and
|
||||
// we'll have to wait for the concurrent marking thread to finish
|
||||
// what it is doing. In this case we will postpone the marking cycle
|
||||
// initiation decision for the next pause. When we eventually decide
|
||||
// to start a cycle, we will set _during_initial_mark_pause which
|
||||
// will stay true until the end of the initial-mark pause and it's
|
||||
// the condition that indicates that a pause is doing the
|
||||
// initial-mark work.
|
||||
volatile bool _during_initial_mark_pause;
|
||||
|
||||
// At the end of a pause we check the heap occupancy and we decide
|
||||
// whether we will start a marking cycle during the next pause. If
|
||||
// we decide that we want to do that, we will set this parameter to
|
||||
// true. So, this parameter will stay true between the end of a
|
||||
// pause and the beginning of a subsequent pause (not necessarily
|
||||
// the next one, see the comments on the next field) when we decide
|
||||
// that we will indeed start a marking cycle and do the initial-mark
|
||||
// work.
|
||||
volatile bool _initiate_conc_mark_if_possible;
|
||||
|
||||
// NOTE: if some of these are synonyms for others,
|
||||
// the redundant fields should be eliminated. XXX
|
||||
bool _during_marking;
|
||||
bool _mark_in_progress;
|
||||
bool _in_marking_window;
|
||||
bool _in_marking_window_im;
|
||||
|
||||
bool _concurrent_cycle_started;
|
||||
bool _full_collection;
|
||||
|
||||
public:
|
||||
G1CollectorState() :
|
||||
_gcs_are_young(true),
|
||||
_last_gc_was_young(false),
|
||||
_last_young_gc(false),
|
||||
|
||||
_during_initial_mark_pause(false),
|
||||
_initiate_conc_mark_if_possible(false),
|
||||
|
||||
_during_marking(false),
|
||||
_mark_in_progress(false),
|
||||
_in_marking_window(false),
|
||||
_in_marking_window_im(false),
|
||||
_concurrent_cycle_started(false),
|
||||
_full_collection(false) {}
|
||||
|
||||
// Setters
|
||||
void set_gcs_are_young(bool v) { _gcs_are_young = v; }
|
||||
void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
|
||||
void set_last_young_gc(bool v) { _last_young_gc = v; }
|
||||
void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
|
||||
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
|
||||
void set_during_marking(bool v) { _during_marking = v; }
|
||||
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
|
||||
void set_in_marking_window(bool v) { _in_marking_window = v; }
|
||||
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
|
||||
void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
|
||||
void set_full_collection(bool v) { _full_collection = v; }
|
||||
|
||||
// Getters
|
||||
bool gcs_are_young() { return _gcs_are_young; }
|
||||
bool last_gc_was_young() { return _last_gc_was_young; }
|
||||
bool last_young_gc() { return _last_young_gc; }
|
||||
bool during_initial_mark_pause() { return _during_initial_mark_pause; }
|
||||
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
|
||||
bool during_marking() { return _during_marking; }
|
||||
bool mark_in_progress() { return _mark_in_progress; }
|
||||
bool in_marking_window() { return _in_marking_window; }
|
||||
bool in_marking_window_im() { return _in_marking_window_im; }
|
||||
bool concurrent_cycle_started() { return _concurrent_cycle_started; }
|
||||
bool full_collection() { return _full_collection; }
|
||||
|
||||
// Composite booleans (clients worry about flickering)
|
||||
bool during_concurrent_mark() {
|
||||
return (_in_marking_window && !_in_marking_window_im);
|
||||
}
|
||||
|
||||
bool should_propagate() { // XXX should have a more suitable state name or abstraction for this
|
||||
return (_last_young_gc && !_in_marking_window);
|
||||
}
|
||||
|
||||
G1YCType yc_type() {
|
||||
if (during_initial_mark_pause()) {
|
||||
return InitialMark;
|
||||
} else if (mark_in_progress()) {
|
||||
return DuringMark;
|
||||
} else if (gcs_are_young()) {
|
||||
return Normal;
|
||||
} else {
|
||||
return Mixed;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user