Merge
This commit is contained in:
commit
9430535189
2
.hgtags
2
.hgtags
@ -210,3 +210,5 @@ d7ad0dfaa41151bd3a9ae46725b0aec3730a9cd0 jdk8-b84
|
||||
da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
|
||||
5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
|
||||
e517701a4d0e25ae9c7945bca6e1762a8c5d8aa6 jdk8-b88
|
||||
4dec41b3c5e3bb616f0c6f15830d940905aa5d16 jdk8-b89
|
||||
f09ab0c416185e3cba371e81bcb6a16060c90f44 jdk8-b90
|
||||
|
@ -210,3 +210,5 @@ fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
|
||||
df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
|
||||
b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
|
||||
e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
|
||||
892a0196d10c67f3a12f0eefb0bb536e423d8868 jdk8-b89
|
||||
69b773a221b956a3386933ecdbfeccee0edeac47 jdk8-b90
|
||||
|
@ -73,7 +73,7 @@ else
|
||||
grep ^.PHONY: | head -n 1 | cut -d " " -f 2-)))
|
||||
|
||||
$(all_phony_targets):
|
||||
@$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) $(VERBOSE) VERBOSE=$(VERBOSE) $@) &&) true
|
||||
@$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) $(VERBOSE) VERBOSE=$(VERBOSE) LOG_LEVEL=$(LOG_LEVEL) $@) &&) true
|
||||
|
||||
endif
|
||||
endif
|
||||
|
@ -54,9 +54,9 @@ MAKE_ARGS="SPEC=$(SPEC)"
|
||||
|
||||
MAKE:=@MAKE@
|
||||
|
||||
# Pass along the verbosity setting.
|
||||
# Pass along the verbosity and log level settings.
|
||||
ifeq (,$(findstring VERBOSE=,$(MAKE)))
|
||||
MAKE:=$(MAKE) $(VERBOSE) VERBOSE="$(VERBOSE)"
|
||||
MAKE:=$(MAKE) $(VERBOSE) VERBOSE="$(VERBOSE)" LOG_LEVEL="$(LOG_LEVEL)"
|
||||
endif
|
||||
|
||||
# No implicit variables or rules!
|
||||
|
@ -240,10 +240,10 @@ clean-docs:
|
||||
clean-test:
|
||||
$(call CleanComponent,testoutput)
|
||||
|
||||
.PHONY: langtools corba jaxp jaxws hotspot jdk nashorn images overlay-images install
|
||||
.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only nashorn-only images-only overlay-images-only install-only
|
||||
.PHONY: all test clean dist-clean bootcycle-images start-make
|
||||
.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-overlay-images clean-bootcycle-build
|
||||
.PHONY: langtools corba jaxp jaxws hotspot jdk nashorn images overlay-images install test docs
|
||||
.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only nashorn-only images-only overlay-images-only install-only test-only docs-only
|
||||
.PHONY: all clean dist-clean bootcycle-images start-make
|
||||
.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-docs clean-test clean-overlay-images clean-bootcycle-build
|
||||
.PHONY: profiles profiles-only profiles-oscheck
|
||||
|
||||
FRC: # Force target
|
||||
|
@ -321,11 +321,17 @@ define SetupNativeCompilation
|
||||
|
||||
ifneq (,$$($1_DEBUG_SYMBOLS))
|
||||
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
|
||||
# Programs don't get the debug symbols added in the old build. It's not clear if
|
||||
# this is intentional.
|
||||
ifeq ($$($1_PROGRAM),)
|
||||
ifdef OPENJDK
|
||||
# Always add debug symbols
|
||||
$1_EXTRA_CFLAGS+=$(CFLAGS_DEBUG_SYMBOLS)
|
||||
$1_EXTRA_CXXFLAGS+=$(CXXFLAGS_DEBUG_SYMBOLS)
|
||||
else
|
||||
# Programs don't get the debug symbols added in the old build. It's not clear if
|
||||
# this is intentional.
|
||||
ifeq ($$($1_PROGRAM),)
|
||||
$1_EXTRA_CFLAGS+=$(CFLAGS_DEBUG_SYMBOLS)
|
||||
$1_EXTRA_CXXFLAGS+=$(CXXFLAGS_DEBUG_SYMBOLS)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
@ -210,3 +210,5 @@ a45bb25a67c7517b45f00c9682e317f46fecbba9 jdk8-b83
|
||||
44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
|
||||
f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
|
||||
4e3a881ebb1ee96ce0872508b0066d74f310dbfa jdk8-b88
|
||||
fe4150590ee597f4e125fea950aa3b352622cc2d jdk8-b89
|
||||
c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
|
||||
|
@ -339,3 +339,7 @@ d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87
|
||||
c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
|
||||
8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88
|
||||
4ec91349972255650f97bedfd07e6423e02428cf hs25-b31
|
||||
9c1fe0b419b40a9ecdd1653cc9af1b6d67a12c46 jdk8-b89
|
||||
69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32
|
||||
1ae0472ff3a0117b5b019d380ad59fface2fde14 jdk8-b90
|
||||
b19517cecc2e91636d7c16ba2f35e3d3dc628099 hs25-b33
|
||||
|
@ -199,10 +199,10 @@ static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
|
||||
//---------------------------------------------------------------
|
||||
// Part of the class sharing workaround:
|
||||
//
|
||||
// With class sharing, pages are mapped from classes[_g].jsa file.
|
||||
// With class sharing, pages are mapped from classes.jsa file.
|
||||
// The read-only class sharing pages are mapped as MAP_SHARED,
|
||||
// PROT_READ pages. These pages are not dumped into core dump.
|
||||
// With this workaround, these pages are read from classes[_g].jsa.
|
||||
// With this workaround, these pages are read from classes.jsa.
|
||||
|
||||
// FIXME: !HACK ALERT!
|
||||
// The format of sharing achive file header is needed to read shared heap
|
||||
@ -298,14 +298,12 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
lib_info* lib = ph->libs;
|
||||
while (lib != NULL) {
|
||||
// we are iterating over shared objects from the core dump. look for
|
||||
// libjvm[_g].so.
|
||||
// libjvm.so.
|
||||
const char *jvm_name = 0;
|
||||
#ifdef __APPLE__
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.dylib")) != 0)
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
|
||||
#else
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0)
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
|
||||
#endif // __APPLE__
|
||||
{
|
||||
char classes_jsa[PATH_MAX];
|
||||
@ -389,7 +387,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
ph->core->classes_jsa_fd = fd;
|
||||
// add read-only maps from classes[_g].jsa to the list of maps
|
||||
// add read-only maps from classes.jsa to the list of maps
|
||||
for (m = 0; m < NUM_SHARED_MAPS; m++) {
|
||||
if (header._space[m]._read_only) {
|
||||
base = (uintptr_t) header._space[m]._base;
|
||||
|
@ -195,10 +195,10 @@ static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
|
||||
//---------------------------------------------------------------
|
||||
// Part of the class sharing workaround:
|
||||
//
|
||||
// With class sharing, pages are mapped from classes[_g].jsa file.
|
||||
// With class sharing, pages are mapped from classes.jsa file.
|
||||
// The read-only class sharing pages are mapped as MAP_SHARED,
|
||||
// PROT_READ pages. These pages are not dumped into core dump.
|
||||
// With this workaround, these pages are read from classes[_g].jsa.
|
||||
// With this workaround, these pages are read from classes.jsa.
|
||||
|
||||
// FIXME: !HACK ALERT!
|
||||
// The format of sharing achive file header is needed to read shared heap
|
||||
@ -284,10 +284,9 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
lib_info* lib = ph->libs;
|
||||
while (lib != NULL) {
|
||||
// we are iterating over shared objects from the core dump. look for
|
||||
// libjvm[_g].so.
|
||||
// libjvm.so.
|
||||
const char *jvm_name = 0;
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
|
||||
(jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) {
|
||||
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
|
||||
char classes_jsa[PATH_MAX];
|
||||
struct FileMapHeader header;
|
||||
size_t n = 0;
|
||||
@ -371,7 +370,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
ph->core->classes_jsa_fd = fd;
|
||||
// add read-only maps from classes[_g].jsa to the list of maps
|
||||
// add read-only maps from classes.jsa to the list of maps
|
||||
for (m = 0; m < NUM_SHARED_MAPS; m++) {
|
||||
if (header._space[m]._read_only) {
|
||||
base = (uintptr_t) header._space[m]._base;
|
||||
|
@ -589,8 +589,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
JNIEnv* env = dbg->env;
|
||||
jobject this_obj = dbg->this_obj;
|
||||
const char* jvm_name = 0;
|
||||
if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL ||
|
||||
(jvm_name = strstr(obj_name, "libjvm_g.so")) != NULL) {
|
||||
if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL) {
|
||||
jvm_name = obj_name;
|
||||
} else {
|
||||
return 0;
|
||||
@ -598,7 +597,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
|
||||
struct ps_prochandle* ph = (struct ps_prochandle*) env->GetLongField(this_obj, p_ps_prochandle_ID);
|
||||
|
||||
// initialize classes[_g].jsa file descriptor field.
|
||||
// initialize classes.jsa file descriptor field.
|
||||
dbg->env->SetIntField(this_obj, classes_jsa_fd_ID, -1);
|
||||
|
||||
// check whether class sharing is on by reading variable "UseSharedSpaces"
|
||||
@ -641,7 +640,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
|
||||
print_debug("looking for %s\n", classes_jsa);
|
||||
|
||||
// open the classes[_g].jsa
|
||||
// open the classes.jsa
|
||||
int fd = libsaproc_open(classes_jsa, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
char errMsg[ERR_MSG_SIZE];
|
||||
@ -651,7 +650,7 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
print_debug("opened shared archive file %s\n", classes_jsa);
|
||||
}
|
||||
|
||||
// parse classes[_g].jsa
|
||||
// parse classes.jsa
|
||||
struct FileMapHeader* pheader = (struct FileMapHeader*) malloc(sizeof(struct FileMapHeader));
|
||||
if (pheader == NULL) {
|
||||
close(fd);
|
||||
@ -798,8 +797,8 @@ static void attach_internal(JNIEnv* env, jobject this_obj, jstring cmdLine, jboo
|
||||
if (! isProcess) {
|
||||
/*
|
||||
* With class sharing, shared perm. gen heap is allocated in with MAP_SHARED|PROT_READ.
|
||||
* These pages are mapped from the file "classes[_g].jsa". MAP_SHARED pages are not dumped
|
||||
* in Solaris core.To read shared heap pages, we have to read classes[_g].jsa file.
|
||||
* These pages are mapped from the file "classes.jsa". MAP_SHARED pages are not dumped
|
||||
* in Solaris core.To read shared heap pages, we have to read classes.jsa file.
|
||||
*/
|
||||
Pobject_iter(ph, init_classsharing_workaround, &dbg);
|
||||
exception = env->ExceptionOccurred();
|
||||
|
@ -24,20 +24,29 @@
|
||||
|
||||
package sun.jvm.hotspot;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.net.*;
|
||||
import java.rmi.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.bsd.*;
|
||||
import sun.jvm.hotspot.debugger.proc.*;
|
||||
import sun.jvm.hotspot.debugger.remote.*;
|
||||
import sun.jvm.hotspot.debugger.windbg.*;
|
||||
import sun.jvm.hotspot.debugger.linux.*;
|
||||
import sun.jvm.hotspot.memory.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import java.rmi.RemoteException;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Debugger;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.JVMDebugger;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionAMD64;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionIA64;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit;
|
||||
import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit;
|
||||
import sun.jvm.hotspot.debugger.NoSuchSymbolException;
|
||||
import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebugger;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebuggerClient;
|
||||
import sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer;
|
||||
import sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
import sun.jvm.hotspot.utilities.UnsupportedPlatformException;
|
||||
|
||||
/** <P> This class wraps much of the basic functionality and is the
|
||||
* highest-level factory for VM data structures. It makes it simple
|
||||
@ -475,7 +484,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesSolaris() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so", "gamma_g" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -507,7 +516,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesWin32() {
|
||||
jvmLibNames = new String[] { "jvm.dll", "jvm_g.dll" };
|
||||
jvmLibNames = new String[] { "jvm.dll" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -547,7 +556,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesLinux() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -572,7 +581,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesBsd() {
|
||||
jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
|
||||
jvmLibNames = new String[] { "libjvm.so" };
|
||||
}
|
||||
|
||||
//
|
||||
@ -595,7 +604,7 @@ public class HotSpotAgent {
|
||||
}
|
||||
|
||||
private void setupJVMLibNamesDarwin() {
|
||||
jvmLibNames = new String[] { "libjvm.dylib", "libjvm_g.dylib" };
|
||||
jvmLibNames = new String[] { "libjvm.dylib" };
|
||||
}
|
||||
|
||||
/** Convenience routine which should be called by per-platform
|
||||
|
@ -24,9 +24,9 @@
|
||||
|
||||
package sun.jvm.hotspot;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.types.basic.*;
|
||||
import sun.jvm.hotspot.debugger.SymbolLookup;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.basic.BasicVtblAccess;
|
||||
|
||||
public class LinuxVtblAccess extends BasicVtblAccess {
|
||||
private String vt;
|
||||
@ -35,8 +35,7 @@ public class LinuxVtblAccess extends BasicVtblAccess {
|
||||
String[] dllNames) {
|
||||
super(symbolLookup, dllNames);
|
||||
|
||||
if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null ||
|
||||
symbolLookup.lookup("libjvm_g.so", "__vt_10JavaThread") != null) {
|
||||
if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null) {
|
||||
// old C++ ABI
|
||||
vt = "__vt_";
|
||||
} else {
|
||||
|
@ -97,8 +97,8 @@ public class ciMethod extends ciMetadata {
|
||||
holder.getName().asString() + " " +
|
||||
OopUtilities.escapeString(method.getName().asString()) + " " +
|
||||
method.getSignature().asString() + " " +
|
||||
method.getInvocationCounter() + " " +
|
||||
method.getBackedgeCounter() + " " +
|
||||
method.getInvocationCount() + " " +
|
||||
method.getBackedgeCount() + " " +
|
||||
interpreterInvocationCount() + " " +
|
||||
interpreterThrowoutCount() + " " +
|
||||
instructionsSize());
|
||||
|
@ -24,17 +24,28 @@
|
||||
|
||||
package sun.jvm.hotspot.debugger.bsd;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.Threads;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.debugger.DebuggerBase;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.DebuggerUtilities;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.NotInHeapException;
|
||||
import sun.jvm.hotspot.debugger.OopHandle;
|
||||
import sun.jvm.hotspot.debugger.ReadResult;
|
||||
import sun.jvm.hotspot.debugger.ThreadProxy;
|
||||
import sun.jvm.hotspot.debugger.UnalignedAddressException;
|
||||
import sun.jvm.hotspot.debugger.UnmappedAddressException;
|
||||
import sun.jvm.hotspot.debugger.cdbg.CDebugger;
|
||||
import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
|
||||
import sun.jvm.hotspot.debugger.cdbg.LoadObject;
|
||||
import sun.jvm.hotspot.runtime.JavaThread;
|
||||
import java.lang.reflect.*;
|
||||
import sun.jvm.hotspot.runtime.Threads;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
|
||||
/** <P> An implementation of the JVMDebugger interface. The basic debug
|
||||
facilities are implemented through ptrace interface in the JNI code
|
||||
@ -246,10 +257,8 @@ public class BsdDebuggerLocal extends DebuggerBase implements BsdDebugger {
|
||||
/* called from attach methods */
|
||||
private void findABIVersion() throws DebuggerException {
|
||||
String libjvmName = isDarwin ? "libjvm.dylib" : "libjvm.so";
|
||||
String libjvm_gName = isDarwin? "libjvm_g.dylib" : "libjvm_g.so";
|
||||
String javaThreadVt = isDarwin ? "_vt_10JavaThread" : "__vt_10JavaThread";
|
||||
if (lookupByName0(libjvmName, javaThreadVt) != 0 ||
|
||||
lookupByName0(libjvm_gName, javaThreadVt) != 0) {
|
||||
if (lookupByName0(libjvmName, javaThreadVt) != 0) {
|
||||
// old C++ ABI
|
||||
useGCC32ABI = false;
|
||||
} else {
|
||||
|
@ -24,14 +24,25 @@
|
||||
|
||||
package sun.jvm.hotspot.debugger.linux;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.debugger.x86.*;
|
||||
import sun.jvm.hotspot.debugger.cdbg.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import java.lang.reflect.*;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.debugger.DebuggerBase;
|
||||
import sun.jvm.hotspot.debugger.DebuggerException;
|
||||
import sun.jvm.hotspot.debugger.DebuggerUtilities;
|
||||
import sun.jvm.hotspot.debugger.MachineDescription;
|
||||
import sun.jvm.hotspot.debugger.NotInHeapException;
|
||||
import sun.jvm.hotspot.debugger.OopHandle;
|
||||
import sun.jvm.hotspot.debugger.ReadResult;
|
||||
import sun.jvm.hotspot.debugger.ThreadProxy;
|
||||
import sun.jvm.hotspot.debugger.UnalignedAddressException;
|
||||
import sun.jvm.hotspot.debugger.UnmappedAddressException;
|
||||
import sun.jvm.hotspot.debugger.cdbg.CDebugger;
|
||||
import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
|
||||
import sun.jvm.hotspot.debugger.cdbg.LoadObject;
|
||||
import sun.jvm.hotspot.utilities.PlatformInfo;
|
||||
|
||||
/** <P> An implementation of the JVMDebugger interface. The basic debug
|
||||
facilities are implemented through ptrace interface in the JNI code
|
||||
@ -238,8 +249,7 @@ public class LinuxDebuggerLocal extends DebuggerBase implements LinuxDebugger {
|
||||
|
||||
/* called from attach methods */
|
||||
private void findABIVersion() throws DebuggerException {
|
||||
if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0 ||
|
||||
lookupByName0("libjvm_g.so", "__vt_10JavaThread") != 0) {
|
||||
if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0) {
|
||||
// old C++ ABI
|
||||
useGCC32ABI = false;
|
||||
} else {
|
||||
|
@ -24,15 +24,21 @@
|
||||
|
||||
package sun.jvm.hotspot.oops;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.interpreter.*;
|
||||
import sun.jvm.hotspot.memory.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.code.NMethod;
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.interpreter.OopMapCacheEntry;
|
||||
import sun.jvm.hotspot.runtime.SignatureConverter;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
import sun.jvm.hotspot.types.WrongTypeException;
|
||||
import sun.jvm.hotspot.utilities.Assert;
|
||||
|
||||
// A Method represents a Java method
|
||||
|
||||
@ -132,11 +138,13 @@ public class Method extends Metadata {
|
||||
public long getAccessFlags() { return accessFlags.getValue(this); }
|
||||
public long getCodeSize() { return getConstMethod().getCodeSize(); }
|
||||
public long getVtableIndex() { return vtableIndex.getValue(this); }
|
||||
public long getInvocationCounter() {
|
||||
return getMethodCounters().getInvocationCounter();
|
||||
public long getInvocationCount() {
|
||||
MethodCounters mc = getMethodCounters();
|
||||
return mc == null ? 0 : mc.getInvocationCounter();
|
||||
}
|
||||
public long getBackedgeCounter() {
|
||||
return getMethodCounters().getBackedgeCounter();
|
||||
public long getBackedgeCount() {
|
||||
MethodCounters mc = getMethodCounters();
|
||||
return mc == null ? 0 : mc.getBackedgeCounter();
|
||||
}
|
||||
|
||||
// get associated compiled native method, if available, else return null.
|
||||
@ -349,8 +357,8 @@ public class Method extends Metadata {
|
||||
holder.getName().asString() + " " +
|
||||
OopUtilities.escapeString(getName().asString()) + " " +
|
||||
getSignature().asString() + " " +
|
||||
getInvocationCounter() + " " +
|
||||
getBackedgeCounter() + " " +
|
||||
getInvocationCount() + " " +
|
||||
getBackedgeCount() + " " +
|
||||
interpreterInvocationCount() + " " +
|
||||
interpreterThrowoutCount() + " " +
|
||||
code_size);
|
||||
|
@ -316,8 +316,8 @@ public class MethodData extends Metadata {
|
||||
int iic = method.interpreterInvocationCount();
|
||||
if (mileage < iic) mileage = iic;
|
||||
|
||||
long ic = method.getInvocationCounter();
|
||||
long bc = method.getBackedgeCounter();
|
||||
long ic = method.getInvocationCount();
|
||||
long bc = method.getBackedgeCount();
|
||||
|
||||
long icval = ic >> 3;
|
||||
if ((ic & 4) != 0) icval += CompileThreshold;
|
||||
|
@ -151,32 +151,43 @@ else
|
||||
$(MAKE_ARGS) BUILD_FLAVOR=product docs
|
||||
endif
|
||||
|
||||
# Output directories
|
||||
C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
|
||||
C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
|
||||
MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
|
||||
ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero
|
||||
SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark
|
||||
|
||||
# Build variation of hotspot
|
||||
$(C1_VM_TARGETS):
|
||||
$(CD) $(GAMMADIR)/make; \
|
||||
$(MAKE) BUILD_FLAVOR=$(@:%1=%) VM_TARGET=$@ generic_build1 $(ALT_OUT)
|
||||
$(MAKE) BUILD_DIR=$(C1_DIR) BUILD_FLAVOR=$(@:%1=%) VM_TARGET=$@ generic_build1 $(ALT_OUT)
|
||||
|
||||
$(C2_VM_TARGETS):
|
||||
$(CD) $(GAMMADIR)/make; \
|
||||
$(MAKE) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
|
||||
$(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
|
||||
|
||||
$(ZERO_VM_TARGETS):
|
||||
$(CD) $(GAMMADIR)/make; \
|
||||
$(MAKE) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ \
|
||||
generic_buildzero $(ALT_OUT)
|
||||
$(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
|
||||
|
||||
$(SHARK_VM_TARGETS):
|
||||
$(CD) $(GAMMADIR)/make; \
|
||||
$(MAKE) BUILD_FLAVOR=$(@:%shark=%) VM_TARGET=$@ \
|
||||
generic_buildshark $(ALT_OUT)
|
||||
$(MAKE) BUILD_DIR=$(SHARK_DIR) BUILD_FLAVOR=$(@:%shark=%) VM_TARGET=$@ generic_buildshark $(ALT_OUT)
|
||||
|
||||
$(MINIMAL1_VM_TARGETS):
|
||||
$(CD) $(GAMMADIR)/make; \
|
||||
$(MAKE) BUILD_FLAVOR=$(@:%minimal1=%) VM_TARGET=$@ \
|
||||
generic_buildminimal1 $(ALT_OUT)
|
||||
$(MAKE) BUILD_DIR=$(MINIMAL1_DIR) BUILD_FLAVOR=$(@:%minimal1=%) VM_TARGET=$@ generic_buildminimal1 $(ALT_OUT)
|
||||
|
||||
# Install hotspot script in build directory
|
||||
HOTSPOT_SCRIPT=$(BUILD_DIR)/$(BUILD_FLAVOR)/hotspot
|
||||
$(HOTSPOT_SCRIPT): $(GAMMADIR)/make/hotspot.script
|
||||
$(QUIETLY) $(MKDIR) -p $(BUILD_DIR)/$(BUILD_FLAVOR)
|
||||
$(QUIETLY) cat $< | sed -e 's|@@LIBARCH@@|$(LIBARCH)|g' | sed -e 's|@@JDK_IMPORT_PATH@@|$(JDK_IMPORT_PATH)|g' > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
||||
# Build compiler1 (client) rule, different for platforms
|
||||
generic_build1:
|
||||
generic_build1: $(HOTSPOT_SCRIPT)
|
||||
$(MKDIR) -p $(OUTPUTDIR)
|
||||
ifeq ($(OSNAME),windows)
|
||||
ifeq ($(ARCH_DATA_MODEL), 32)
|
||||
@ -201,7 +212,7 @@ else
|
||||
endif
|
||||
|
||||
# Build compiler2 (server) rule, different for platforms
|
||||
generic_build2:
|
||||
generic_build2: $(HOTSPOT_SCRIPT)
|
||||
$(MKDIR) -p $(OUTPUTDIR)
|
||||
ifeq ($(OSNAME),windows)
|
||||
$(CD) $(OUTPUTDIR); \
|
||||
@ -217,19 +228,19 @@ else
|
||||
$(MAKE_ARGS) $(VM_TARGET)
|
||||
endif
|
||||
|
||||
generic_buildzero:
|
||||
generic_buildzero: $(HOTSPOT_SCRIPT)
|
||||
$(MKDIR) -p $(OUTPUTDIR)
|
||||
$(CD) $(OUTPUTDIR); \
|
||||
$(MAKE) -f $(ABS_OS_MAKEFILE) \
|
||||
$(MAKE_ARGS) $(VM_TARGET)
|
||||
|
||||
generic_buildshark:
|
||||
generic_buildshark: $(HOTSPOT_SCRIPT)
|
||||
$(MKDIR) -p $(OUTPUTDIR)
|
||||
$(CD) $(OUTPUTDIR); \
|
||||
$(MAKE) -f $(ABS_OS_MAKEFILE) \
|
||||
$(MAKE_ARGS) $(VM_TARGET)
|
||||
|
||||
generic_buildminimal1:
|
||||
generic_buildminimal1: $(HOTSPOT_SCRIPT)
|
||||
ifeq ($(JVM_VARIANT_MINIMAL1),true)
|
||||
$(MKDIR) -p $(OUTPUTDIR)
|
||||
ifeq ($(ARCH_DATA_MODEL), 32)
|
||||
@ -252,224 +263,210 @@ endif
|
||||
|
||||
# Export file rule
|
||||
generic_export: $(EXPORT_LIST)
|
||||
|
||||
export_product:
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
|
||||
export_fastdebug:
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \
|
||||
EXPORT_SUBDIR=/$(@:export_%=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
|
||||
export_debug:
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \
|
||||
EXPORT_SUBDIR=/$(@:export_%=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
|
||||
export_optimized:
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \
|
||||
EXPORT_SUBDIR=/$(@:export_%=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
|
||||
|
||||
export_product_jdk::
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \
|
||||
VM_SUBDIR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
|
||||
export_optimized_jdk::
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \
|
||||
VM_SUBDIR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
|
||||
export_fastdebug_jdk::
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) \
|
||||
VM_SUBDIR=$(@:export_%_jdk=%) \
|
||||
ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
|
||||
export_debug_jdk::
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=$(@:export_%_jdk=%) \
|
||||
ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \
|
||||
generic_export
|
||||
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
|
||||
|
||||
# Export file copy rules
|
||||
XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
|
||||
DOCS_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_docs
|
||||
C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1/$(VM_SUBDIR)
|
||||
C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2/$(VM_SUBDIR)
|
||||
MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1/$(VM_SUBDIR)
|
||||
ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero/$(VM_SUBDIR)
|
||||
SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark/$(VM_SUBDIR)
|
||||
DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
|
||||
C1_BUILD_DIR =$(C1_DIR)/$(BUILD_FLAVOR)
|
||||
C2_BUILD_DIR =$(C2_DIR)/$(BUILD_FLAVOR)
|
||||
MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR)
|
||||
ZERO_BUILD_DIR =$(ZERO_DIR)/$(BUILD_FLAVOR)
|
||||
SHARK_BUILD_DIR =$(SHARK_DIR)/$(BUILD_FLAVOR)
|
||||
|
||||
# Server (C2)
|
||||
ifeq ($(JVM_VARIANT_SERVER), true)
|
||||
# Common
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(C2_DIR)/../generated/%.jar
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(C2_BUILD_DIR)/../generated/%.jar
|
||||
$(install-file)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(C2_DIR)/../generated/jvmtifiles/%
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(C2_BUILD_DIR)/../generated/jvmtifiles/%
|
||||
$(install-file)
|
||||
# Windows
|
||||
$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll
|
||||
$(EXPORT_SERVER_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb
|
||||
$(EXPORT_SERVER_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map
|
||||
$(EXPORT_SERVER_DIR)/%.map: $(C2_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(C2_DIR)/%.lib
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(C2_BUILD_DIR)/%.lib
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_DIR)/%.diz
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_DIR)/%.dll
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_DIR)/%.pdb
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_DIR)/%.map
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
# Unix
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo
|
||||
$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz
|
||||
$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
endif
|
||||
|
||||
# Client (C1)
|
||||
ifeq ($(JVM_VARIANT_CLIENT), true)
|
||||
# Common
|
||||
$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz
|
||||
$(EXPORT_CLIENT_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(C1_DIR)/../generated/%.jar
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(C1_BUILD_DIR)/../generated/%.jar
|
||||
$(install-file)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(C1_DIR)/../generated/jvmtifiles/%
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(C1_BUILD_DIR)/../generated/jvmtifiles/%
|
||||
$(install-file)
|
||||
# Windows
|
||||
$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll
|
||||
$(EXPORT_CLIENT_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb
|
||||
$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map
|
||||
$(EXPORT_CLIENT_DIR)/%.map: $(C1_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(C1_DIR)/%.lib
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(C1_BUILD_DIR)/%.lib
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_DIR)/%.diz
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_DIR)/%.dll
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_DIR)/%.pdb
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_DIR)/%.map
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
# Unix
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
|
||||
$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo
|
||||
$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz
|
||||
$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
endif
|
||||
|
||||
# Minimal1
|
||||
ifeq ($(JVM_VARIANT_MINIMAL1), true)
|
||||
# Common
|
||||
$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
|
||||
$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(MINIMAL1_DIR)/../generated/%.jar
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(MINIMAL1_BUILD_DIR)/../generated/%.jar
|
||||
$(install-file)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(MINIMAL1_DIR)/../generated/jvmtifiles/%
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(MINIMAL1_BUILD_DIR)/../generated/jvmtifiles/%
|
||||
$(install-file)
|
||||
# Windows
|
||||
$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll
|
||||
$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb
|
||||
$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map
|
||||
$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_DIR)/%.lib
|
||||
$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_BUILD_DIR)/%.lib
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
|
||||
$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll
|
||||
$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb
|
||||
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_DIR)/%.map
|
||||
$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
|
||||
$(install-file)
|
||||
# Unix
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
|
||||
$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
endif
|
||||
|
||||
# Zero
|
||||
ifeq ($(JVM_VARIANT_ZERO), true)
|
||||
# Common
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(ZERO_DIR)/../generated/%.jar
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(ZERO_BUILD_DIR)/../generated/%.jar
|
||||
$(install-file)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(ZERO_DIR)/../generated/jvmtifiles/%
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(ZERO_BUILD_DIR)/../generated/jvmtifiles/%
|
||||
$(install-file)
|
||||
# Unix
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
endif
|
||||
|
||||
# Shark
|
||||
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
|
||||
# Common
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(SHARK_DIR)/../generated/%.jar
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(SHARK_BUILD_DIR)/../generated/%.jar
|
||||
$(install-file)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(SHARK_DIR)/../generated/jvmtifiles/%
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(SHARK_BUILD_DIR)/../generated/jvmtifiles/%
|
||||
$(install-file)
|
||||
# Unix
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo
|
||||
$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
endif
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
# adlc.make -
|
||||
# jvmti.make - generate JVMTI bindings from the spec (JSR-163)
|
||||
# sa.make - generate SA jar file and natives
|
||||
# env.[ck]sh - environment settings
|
||||
#
|
||||
# The makefiles are split this way so that "make foo" will run faster by not
|
||||
# having to read the dependency files for the vm.
|
||||
@ -129,9 +128,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
|
||||
|
||||
# dtrace.make is used on BSD versions that implement Dtrace (like MacOS X)
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make \
|
||||
jvmti.make sa.make dtrace.make \
|
||||
env.sh env.csh jdkpath.sh
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make dtrace.make
|
||||
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
|
||||
SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
|
||||
@ -354,33 +351,6 @@ dtrace.make: $(BUILDTREE_MAKE)
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
|
||||
) > $@
|
||||
|
||||
env.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \
|
||||
{ \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.sh
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "setenv JAVA_HOME \"$(JDK_IMPORT_PATH)\""; }; \
|
||||
sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \
|
||||
) > $@
|
||||
|
||||
jdkpath.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo "JDK=${JAVA_HOME}"; \
|
||||
) > $@
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: all FORCE
|
||||
|
@ -58,6 +58,6 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
|
||||
# Linker mapfile
|
||||
MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
|
||||
|
||||
VERSION = optimized
|
||||
VERSION = fastdebug
|
||||
SYSDEFS += -DASSERT
|
||||
PICFLAGS = DEFAULT
|
||||
|
@ -1,115 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
# Rules to build gamma launcher, used by vm.make
|
||||
|
||||
|
||||
LAUNCHER_SCRIPT = hotspot
|
||||
LAUNCHER = gamma
|
||||
|
||||
LAUNCHERDIR := $(GAMMADIR)/src/os/posix/launcher
|
||||
LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher
|
||||
LAUNCHERFLAGS := $(ARCHFLAG) \
|
||||
-I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \
|
||||
-I$(LAUNCHERDIR_SHARE) \
|
||||
-DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
|
||||
-DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
|
||||
-DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
|
||||
-DARCH=\"$(LIBARCH)\" \
|
||||
-DGAMMA \
|
||||
-DLAUNCHER_TYPE=\"gamma\" \
|
||||
-DLINK_INTO_$(LINK_INTO) \
|
||||
$(TARGET_DEFINES)
|
||||
# Give the launcher task_for_pid() privileges so that it can be used to run JStack, JInfo, et al.
|
||||
LFLAGS_LAUNCHER += -sectcreate __TEXT __info_plist $(GAMMADIR)/src/os/bsd/launcher/Info-privileged.plist
|
||||
|
||||
ifeq ($(LINK_INTO),AOUT)
|
||||
LAUNCHER.o = launcher.o $(JVM_OBJ_FILES)
|
||||
LAUNCHER_MAPFILE = mapfile_reorder
|
||||
LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE))
|
||||
LFLAGS_LAUNCHER += $(SONAMEFLAG:SONAME=$(LIBJVM)) $(STATIC_LIBGCC)
|
||||
LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS)
|
||||
else
|
||||
LAUNCHER.o = launcher.o
|
||||
LFLAGS_LAUNCHER += -L`pwd`
|
||||
|
||||
# The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a
|
||||
# freshly built JVM at ./libjvm.{so|dylib}. This is accomplished by setting
|
||||
# the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM
|
||||
# first. Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is
|
||||
# statically linked with CoreFoundation framework libs. Unfortunately, gamma's
|
||||
# unique searchpath results in some unresolved symbols in the framework
|
||||
# libraries, because JDK libraries are inadvertently discovered first on the
|
||||
# searchpath, e.g. libjpeg. On Mac OS X, filenames are case *insensitive*.
|
||||
# So, the actual filename collision is libjpeg.dylib and libJPEG.dylib.
|
||||
# To resolve this, gamma needs to also statically link with the CoreFoundation
|
||||
# framework libraries.
|
||||
|
||||
ifeq ($(OS_VENDOR),Darwin)
|
||||
LFLAGS_LAUNCHER += -framework CoreFoundation
|
||||
endif
|
||||
|
||||
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
|
||||
endif
|
||||
|
||||
LINK_LAUNCHER = $(LINK.CC)
|
||||
|
||||
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
|
||||
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
|
||||
|
||||
LAUNCHER_OUT = launcher
|
||||
|
||||
SUFFIXES += .d
|
||||
|
||||
SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c")
|
||||
SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c")
|
||||
|
||||
OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE))
|
||||
|
||||
DEPFILES := $(patsubst %.o,%.d,$(OBJS))
|
||||
-include $(DEPFILES)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
|
||||
$(QUIETLY) echo Linking launcher...
|
||||
$(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK)
|
||||
$(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER)
|
||||
$(QUIETLY) $(LINK_LAUNCHER/POST_HOOK)
|
||||
# Sign the launcher with the development certificate (if present) so that it can be used
|
||||
# to run JStack, JInfo, et al.
|
||||
$(QUIETLY) -codesign -s openjdk_codesign $@
|
||||
|
||||
$(LAUNCHER): $(LAUNCHER_SCRIPT)
|
||||
|
||||
$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script
|
||||
$(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
@ -144,6 +144,9 @@ JVM = jvm
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
LIBJVM = lib$(JVM).dylib
|
||||
CFLAGS += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE
|
||||
ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug))
|
||||
CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
|
||||
endif
|
||||
else
|
||||
LIBJVM = lib$(JVM).so
|
||||
endif
|
||||
@ -328,9 +331,6 @@ install_jvm: $(LIBJVM)
|
||||
#----------------------------------------------------------------------
|
||||
# Other files
|
||||
|
||||
# Gamma launcher
|
||||
include $(MAKEFILES_DIR)/launcher.make
|
||||
|
||||
# Signal interposition library
|
||||
include $(MAKEFILES_DIR)/jsig.make
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,22 +81,25 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
||||
cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
|
||||
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
|
||||
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
|
||||
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
|
||||
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
|
||||
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
|
||||
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
|
||||
g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \
|
||||
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \
|
||||
satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \
|
||||
adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \
|
||||
gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \
|
||||
pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \
|
||||
psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \
|
||||
psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \
|
||||
psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \
|
||||
parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \
|
||||
spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \
|
||||
immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldingWorkGroup.cpp g1Log.cpp
|
||||
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
|
||||
collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
|
||||
concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
|
||||
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
|
||||
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
|
||||
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
|
||||
g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
|
||||
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
|
||||
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
|
||||
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
|
||||
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
|
||||
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
|
||||
psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
|
||||
psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
|
||||
psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
|
||||
psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
|
||||
parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
|
||||
gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
|
||||
mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
|
||||
endif
|
||||
|
||||
ifeq ($(INCLUDE_NMT), false)
|
||||
|
@ -72,6 +72,7 @@ EMACS=emacs
|
||||
REL_MYDIR=`dirname $0`
|
||||
MYDIR=`cd $REL_MYDIR && pwd`
|
||||
|
||||
#
|
||||
# Look whether the user wants to run inside gdb
|
||||
case "$1" in
|
||||
-gdb)
|
||||
@ -95,16 +96,14 @@ case "$1" in
|
||||
;;
|
||||
esac
|
||||
|
||||
JDK=
|
||||
if [ "${ALT_JAVA_HOME}" = "" ]; then
|
||||
. ${MYDIR}/jdkpath.sh
|
||||
if [ "${ALT_JAVA_HOME}" != "" ]; then
|
||||
JDK=${ALT_JAVA_HOME%%/jre}
|
||||
else
|
||||
JDK=${ALT_JAVA_HOME%%/jre};
|
||||
JDK=@@JDK_IMPORT_PATH@@
|
||||
fi
|
||||
|
||||
if [ "${JDK}" = "" ]; then
|
||||
echo Failed to find JDK. ALT_JAVA_HOME is not set or ./jdkpath.sh is empty or not found.
|
||||
exit 1
|
||||
echo "Failed to find JDK. Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty."
|
||||
fi
|
||||
|
||||
# We will set the LD_LIBRARY_PATH as follows:
|
||||
@ -142,12 +141,12 @@ else
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
|
||||
JPARMS="$@ $JAVA_ARGS";
|
||||
JPARMS="-Dsun.java.launcher=gamma -XXaltjvm=$MYDIR $@ $JAVA_ARGS";
|
||||
|
||||
# Locate the gamma development launcher
|
||||
LAUNCHER=${MYDIR}/gamma
|
||||
# Locate the java launcher
|
||||
LAUNCHER=$JDK/bin/java
|
||||
if [ ! -x $LAUNCHER ] ; then
|
||||
echo Error: Cannot find the gamma development launcher \"$LAUNCHER\"
|
||||
echo Error: Cannot find the java launcher \"$LAUNCHER\"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -166,9 +165,10 @@ set args $JPARMS
|
||||
file $LAUNCHER
|
||||
directory $GDBSRCDIR
|
||||
# Get us to a point where we can set breakpoints in libjvm.so
|
||||
break InitializeJVM
|
||||
set breakpoint pending on
|
||||
break JNI_CreateJavaVM
|
||||
run
|
||||
# Stop in InitializeJVM
|
||||
# Stop in JNI_CreateJavaVM
|
||||
delete 1
|
||||
# We can now set breakpoints wherever we like
|
||||
EOF
|
||||
@ -199,7 +199,7 @@ case "$MODE" in
|
||||
rm -f $GDBSCR
|
||||
;;
|
||||
dbx)
|
||||
$DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS
|
||||
$DBX -s $HOME/.dbxrc -c "loadobject -load libjvm.so; stop in JNI_CreateJavaVM; run $JPARMS; delete all" $LAUNCHER
|
||||
;;
|
||||
valgrind)
|
||||
echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
||||
|
||||
HS_MAJOR_VER=25
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=31
|
||||
HS_BUILD_NUMBER=33
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -134,14 +134,14 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.build.targets.standard= \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.solaris.i586}-{product|fastdebug}, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.linux.armvh}-{product|fastdebug}
|
||||
|
||||
jprt.build.targets.open= \
|
||||
|
@ -49,7 +49,6 @@
|
||||
# adlc.make -
|
||||
# jvmti.make - generate JVMTI bindings from the spec (JSR-163)
|
||||
# sa.make - generate SA jar file and natives
|
||||
# env.[ck]sh - environment settings
|
||||
#
|
||||
# The makefiles are split this way so that "make foo" will run faster by not
|
||||
# having to read the dependency files for the vm.
|
||||
@ -123,8 +122,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
|
||||
# For dependencies and recursive makes.
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
|
||||
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
|
||||
env.sh env.csh jdkpath.sh
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make
|
||||
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
|
||||
SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
|
||||
@ -349,33 +347,6 @@ sa.make: $(BUILDTREE_MAKE)
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
|
||||
) > $@
|
||||
|
||||
env.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \
|
||||
{ \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.sh
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "setenv JAVA_HOME \"$(JDK_IMPORT_PATH)\""; }; \
|
||||
sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \
|
||||
) > $@
|
||||
|
||||
jdkpath.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo "JDK=${JAVA_HOME}"; \
|
||||
) > $@
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: all FORCE
|
||||
|
@ -1,93 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
# Rules to build gamma launcher, used by vm.make
|
||||
|
||||
|
||||
LAUNCHER_SCRIPT = hotspot
|
||||
LAUNCHER = gamma
|
||||
|
||||
LAUNCHERDIR := $(GAMMADIR)/src/os/posix/launcher
|
||||
LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher
|
||||
LAUNCHERFLAGS := $(ARCHFLAG) \
|
||||
-I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \
|
||||
-I$(LAUNCHERDIR_SHARE) \
|
||||
-DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
|
||||
-DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
|
||||
-DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
|
||||
-DARCH=\"$(LIBARCH)\" \
|
||||
-DGAMMA \
|
||||
-DLAUNCHER_TYPE=\"gamma\" \
|
||||
-DLINK_INTO_$(LINK_INTO) \
|
||||
$(TARGET_DEFINES)
|
||||
|
||||
ifeq ($(LINK_INTO),AOUT)
|
||||
LAUNCHER.o = launcher.o $(JVM_OBJ_FILES)
|
||||
LAUNCHER_MAPFILE = mapfile_reorder
|
||||
LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE))
|
||||
LFLAGS_LAUNCHER += $(SONAMEFLAG:SONAME=$(LIBJVM)) $(STATIC_LIBGCC)
|
||||
LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS)
|
||||
else
|
||||
LAUNCHER.o = launcher.o
|
||||
LFLAGS_LAUNCHER += -L `pwd`
|
||||
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
|
||||
endif
|
||||
|
||||
LINK_LAUNCHER = $(LINK.CC)
|
||||
|
||||
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
|
||||
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
|
||||
|
||||
LAUNCHER_OUT = launcher
|
||||
|
||||
SUFFIXES += .d
|
||||
|
||||
SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c")
|
||||
SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c")
|
||||
|
||||
OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE))
|
||||
|
||||
DEPFILES := $(patsubst %.o,%.d,$(OBJS))
|
||||
-include $(DEPFILES)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
|
||||
$(QUIETLY) echo Linking launcher...
|
||||
$(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK)
|
||||
$(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER)
|
||||
$(QUIETLY) $(LINK_LAUNCHER/POST_HOOK)
|
||||
|
||||
$(LAUNCHER): $(LAUNCHER_SCRIPT)
|
||||
|
||||
$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script
|
||||
$(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
@ -372,9 +372,6 @@ install_jvm: $(LIBJVM)
|
||||
#----------------------------------------------------------------------
|
||||
# Other files
|
||||
|
||||
# Gamma launcher
|
||||
include $(MAKEFILES_DIR)/launcher.make
|
||||
|
||||
# Signal interposition library
|
||||
include $(MAKEFILES_DIR)/jsig.make
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
# adlc.make -
|
||||
# jvmti.make - generate JVMTI bindings from the spec (JSR-163)
|
||||
# sa.make - generate SA jar file and natives
|
||||
# env.[ck]sh - environment settings
|
||||
#
|
||||
# The makefiles are split this way so that "make foo" will run faster by not
|
||||
# having to read the dependency files for the vm.
|
||||
@ -116,8 +115,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
|
||||
# For dependencies and recursive makes.
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
|
||||
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
|
||||
env.sh env.csh jdkpath.sh
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make
|
||||
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
|
||||
ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
|
||||
@ -339,33 +337,6 @@ sa.make: $(BUILDTREE_MAKE)
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
|
||||
) > $@
|
||||
|
||||
env.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "JAVA_HOME=$(JDK_IMPORT_PATH)"; }; \
|
||||
{ \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.sh
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
{ echo "setenv JAVA_HOME \"$(JDK_IMPORT_PATH)\""; }; \
|
||||
sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \
|
||||
) > $@
|
||||
|
||||
jdkpath.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo "JDK=${JAVA_HOME}"; \
|
||||
) > $@
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: all FORCE
|
||||
|
@ -1,108 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
# Rules to build gamma launcher, used by vm.make
|
||||
|
||||
LAUNCHER_SCRIPT = hotspot
|
||||
LAUNCHER = gamma
|
||||
|
||||
LAUNCHERDIR = $(GAMMADIR)/src/os/posix/launcher
|
||||
LAUNCHERDIR_SHARE := $(GAMMADIR)/src/share/tools/launcher
|
||||
LAUNCHERFLAGS = $(ARCHFLAG) \
|
||||
-I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \
|
||||
-I$(LAUNCHERDIR_SHARE) \
|
||||
-DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
|
||||
-DJDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
|
||||
-DJDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
|
||||
-DARCH=\"$(LIBARCH)\" \
|
||||
-DGAMMA \
|
||||
-DLAUNCHER_TYPE=\"gamma\" \
|
||||
-DLINK_INTO_$(LINK_INTO) \
|
||||
$(TARGET_DEFINES)
|
||||
|
||||
ifeq ($(LINK_INTO),AOUT)
|
||||
LAUNCHER.o = launcher.o $(JVM_OBJ_FILES)
|
||||
LAUNCHER_MAPFILE = mapfile_extended
|
||||
LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE))
|
||||
LIBS_LAUNCHER += $(LIBS)
|
||||
else
|
||||
LAUNCHER.o = launcher.o
|
||||
LFLAGS_LAUNCHER += -L `pwd`
|
||||
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
|
||||
endif
|
||||
|
||||
LINK_LAUNCHER = $(LINK.CXX)
|
||||
|
||||
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
|
||||
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
|
||||
|
||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||
# Enable the following LAUNCHERFLAGS addition if you need to compare the
|
||||
# built ELF objects.
|
||||
#
|
||||
# The -g option makes static data global and the "-W0,-noglobal"
|
||||
# option tells the compiler to not globalize static data using a unique
|
||||
# globalization prefix. Instead force the use of a static globalization
|
||||
# prefix based on the source filepath so the objects from two identical
|
||||
# compilations are the same.
|
||||
#
|
||||
# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't
|
||||
# seem to work. I got "-W0,-noglobal" from Kelly and that works.
|
||||
#LAUNCHERFLAGS += -W0,-noglobal
|
||||
endif # Platform_compiler == sparcWorks
|
||||
|
||||
LAUNCHER_OUT = launcher
|
||||
|
||||
SUFFIXES += .d
|
||||
|
||||
SOURCES := $(shell find $(LAUNCHERDIR) -name "*.c")
|
||||
SOURCES_SHARE := $(shell find $(LAUNCHERDIR_SHARE) -name "*.c")
|
||||
|
||||
OBJS := $(patsubst $(LAUNCHERDIR)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES)) $(patsubst $(LAUNCHERDIR_SHARE)/%.c,$(LAUNCHER_OUT)/%.o,$(SOURCES_SHARE))
|
||||
|
||||
DEPFILES := $(patsubst %.o,%.d,$(OBJS))
|
||||
-include $(DEPFILES)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
|
||||
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
|
||||
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
|
||||
|
||||
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
|
||||
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
|
||||
$(QUIETLY) echo Linking launcher...
|
||||
$(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK)
|
||||
$(QUIETLY) $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(sort $(OBJS)) $(LIBS_LAUNCHER)
|
||||
$(QUIETLY) $(LINK_LAUNCHER/POST_HOOK)
|
||||
endif # filter -sbfast -xsbfast
|
||||
|
||||
$(LAUNCHER): $(LAUNCHER_SCRIPT)
|
||||
|
||||
$(LAUNCHER_SCRIPT): $(LAUNCHERDIR)/launcher.script
|
||||
$(QUIETLY) sed -e 's/@@LIBARCH@@/$(LIBARCH)/g' $< > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
@ -338,9 +338,6 @@ install_jvm: $(LIBJVM)
|
||||
#----------------------------------------------------------------------
|
||||
# Other files
|
||||
|
||||
# Gamma launcher
|
||||
include $(MAKEFILES_DIR)/launcher.make
|
||||
|
||||
# Signal interposition library
|
||||
include $(MAKEFILES_DIR)/jsig.make
|
||||
|
||||
|
@ -52,7 +52,7 @@ CXX=cl.exe
|
||||
# improving the quality of crash log stack traces involving jvm.dll.
|
||||
|
||||
# These are always used in all compiles
|
||||
CXX_FLAGS=/nologo /W3 /WX
|
||||
CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
|
||||
|
||||
# Let's add debug information when Full Debug Symbols is enabled
|
||||
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
||||
|
@ -33,7 +33,7 @@ GENERATED=../generated
|
||||
BUILD_PCH_FILE=_build_pch_file.obj
|
||||
!endif
|
||||
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA
|
||||
|
||||
!include ../local.make
|
||||
!include compile.make
|
||||
@ -71,4 +71,3 @@ $(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||
|
||||
!include $(WorkSpace)/make/windows/makefiles/shared.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/sa.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/launcher.make
|
||||
|
@ -193,7 +193,7 @@ ifdef COOKED_BUILD_NUMBER
|
||||
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
|
||||
endif
|
||||
|
||||
NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
|
||||
NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO
|
||||
ifndef SYSTEM_UNAME
|
||||
SYSTEM_UNAME := $(shell uname)
|
||||
export SYSTEM_UNAME
|
||||
|
@ -33,7 +33,7 @@ GENERATED=../generated
|
||||
BUILD_PCH_FILE=_build_pch_file.obj
|
||||
!endif
|
||||
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA
|
||||
|
||||
!include ../local.make
|
||||
!include compile.make
|
||||
@ -70,4 +70,3 @@ $(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||
|
||||
!include $(WorkSpace)/make/windows/makefiles/shared.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/sa.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/launcher.make
|
||||
|
@ -1,73 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
LAUNCHER_FLAGS=$(CXX_FLAGS) $(ARCHFLAG) \
|
||||
/D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
|
||||
/D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
|
||||
/D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
|
||||
/D GAMMA \
|
||||
/D LAUNCHER_TYPE=\"gamma\" \
|
||||
/D _CRT_SECURE_NO_WARNINGS \
|
||||
/D _CRT_SECURE_NO_DEPRECATE \
|
||||
/D LINK_INTO_LIBJVM \
|
||||
/I $(WorkSpace)\src\os\windows\launcher \
|
||||
/I $(WorkSpace)\src\share\tools\launcher \
|
||||
/I $(WorkSpace)\src\share\vm\prims \
|
||||
/I $(WorkSpace)\src\share\vm \
|
||||
/I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \
|
||||
/I $(WorkSpace)\src\os\windows\vm
|
||||
|
||||
LD_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console
|
||||
|
||||
!if "$(COMPILER_NAME)" == "VS2005"
|
||||
# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib
|
||||
# on the link command line, otherwise we get missing __security_check_cookie
|
||||
# externals at link time. Even with /GS-, you need bufferoverflowU.lib.
|
||||
BUFFEROVERFLOWLIB = bufferoverflowU.lib
|
||||
LD_FLAGS = $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
|
||||
!endif
|
||||
|
||||
!if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486"
|
||||
LD_FLAGS = /SAFESEH $(LD_FLAGS)
|
||||
!endif
|
||||
|
||||
LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher
|
||||
LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher
|
||||
|
||||
OUTDIR = launcher
|
||||
|
||||
{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj:
|
||||
-mkdir $(OUTDIR) 2>NUL >NUL
|
||||
$(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
|
||||
|
||||
{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj:
|
||||
-mkdir $(OUTDIR) 2>NUL >NUL
|
||||
$(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
|
||||
|
||||
$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h
|
||||
|
||||
launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj
|
||||
echo $(JAVA_HOME) > jdkpath.txt
|
||||
$(LD) $(LD_FLAGS) /out:hotspot.exe $**
|
@ -32,7 +32,7 @@ GENERATED=../generated
|
||||
BUILD_PCH_FILE=_build_pch_file.obj
|
||||
!endif
|
||||
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
|
||||
default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA
|
||||
|
||||
!include ../local.make
|
||||
!include compile.make
|
||||
@ -73,4 +73,3 @@ $(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||
|
||||
!include $(WorkSpace)/make/windows/makefiles/shared.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/sa.make
|
||||
!include $(WorkSpace)/make/windows/makefiles/launcher.make
|
||||
|
@ -59,7 +59,6 @@ ProjectCreatorIncludesPRIVATE=\
|
||||
-relativeSrcInclude src \
|
||||
-absoluteSrcInclude $(HOTSPOTBUILDSPACE) \
|
||||
-ignorePath $(HOTSPOTBUILDSPACE) \
|
||||
-ignorePath launcher \
|
||||
-ignorePath share\vm\adlc \
|
||||
-ignorePath share\vm\shark \
|
||||
-ignorePath share\tools \
|
||||
@ -105,7 +104,6 @@ ProjectCreatorIDEOptions=\
|
||||
-define ALIGN_STACK_FRAMES \
|
||||
-define VM_LITTLE_ENDIAN \
|
||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
|
||||
-postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
|
||||
-ignoreFile jsig.c \
|
||||
-ignoreFile jvmtiEnvRecommended.cpp \
|
||||
-ignoreFile jvmtiEnvStub.cpp \
|
||||
|
@ -65,7 +65,6 @@ JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
|
||||
!endif
|
||||
|
||||
HS_INTERNAL_NAME=jvm
|
||||
!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/launcher.make
|
||||
|
||||
default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
|
||||
|
||||
|
@ -30,4 +30,6 @@ const int BytesPerInstWord = 4;
|
||||
|
||||
const int StackAlignmentInBytes = (2*wordSize);
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
|
||||
|
@ -74,7 +74,7 @@ define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
define_pd_global(bool, UseMembar, false);
|
||||
|
||||
// GC Ergo Flags
|
||||
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
\
|
||||
|
@ -23,7 +23,12 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
@ -27,4 +27,6 @@
|
||||
|
||||
const int StackAlignmentInBytes = 16;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
|
||||
|
@ -77,7 +77,7 @@ define_pd_global(bool, UseMembar, false);
|
||||
#endif
|
||||
|
||||
// GC Ergo Flags
|
||||
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
|
||||
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
\
|
||||
|
@ -28,7 +28,13 @@
|
||||
|
||||
#if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
@ -212,7 +212,13 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
|
||||
|
||||
// Update the invocation counter
|
||||
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
|
||||
InvocationCounter *counter = method->invocation_counter();
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs == NULL) {
|
||||
CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method));
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
goto unwind_and_return;
|
||||
}
|
||||
InvocationCounter *counter = mcs->invocation_counter();
|
||||
counter->increment();
|
||||
if (counter->reached_InvocationLimit()) {
|
||||
CALL_VM_NOCHECK(
|
||||
|
@ -55,7 +55,7 @@ define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
define_pd_global(bool, UseMembar, true);
|
||||
|
||||
// GC Ergo Flags
|
||||
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
|
||||
|
||||
|
@ -25,7 +25,13 @@
|
||||
*/
|
||||
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,82 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef JAVA_MD_H
|
||||
#define JAVA_MD_H
|
||||
|
||||
#include <limits.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/param.h>
|
||||
#ifndef GAMMA
|
||||
#include "manifest_info.h"
|
||||
#endif
|
||||
#include "jli_util.h"
|
||||
|
||||
#define PATH_SEPARATOR ':'
|
||||
#define FILESEP "/"
|
||||
#define FILE_SEPARATOR '/'
|
||||
#define IS_FILE_SEPARATOR(c) ((c) == '/')
|
||||
#ifndef MAXNAMELEN
|
||||
#define MAXNAMELEN PATH_MAX
|
||||
#endif
|
||||
|
||||
#ifdef JAVA_ARGS
|
||||
/*
|
||||
* ApplicationHome is prepended to each of these entries; the resulting
|
||||
* strings are concatenated (separated by PATH_SEPARATOR) and used as the
|
||||
* value of -cp option to the launcher.
|
||||
*/
|
||||
#ifndef APP_CLASSPATH
|
||||
#define APP_CLASSPATH { "/lib/tools.jar", "/classes" }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_GETHRTIME
|
||||
/*
|
||||
* Support for doing cheap, accurate interval timing.
|
||||
*/
|
||||
#include <sys/time.h>
|
||||
#define CounterGet() (gethrtime()/1000)
|
||||
#define Counter2Micros(counts) (counts)
|
||||
#else
|
||||
#define CounterGet() (0)
|
||||
#define Counter2Micros(counts) (1)
|
||||
#endif /* HAVE_GETHRTIME */
|
||||
|
||||
#ifdef _LP64
|
||||
#define JLONG_FORMAT "%ld"
|
||||
#else
|
||||
#define JLONG_FORMAT "%lld"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Function prototypes.
|
||||
*/
|
||||
#ifndef GAMMA
|
||||
char *LocateJRE(manifest_info *info);
|
||||
void ExecJRE(char *jre, char **argv);
|
||||
#endif
|
||||
int UnsetEnv(char *name);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,83 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef JAVA_MD_H
|
||||
#define JAVA_MD_H
|
||||
|
||||
#include <jni.h>
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#ifndef GAMMA
|
||||
#include "manifest_info.h"
|
||||
#endif
|
||||
#include "jli_util.h"
|
||||
|
||||
#ifdef GAMMA
|
||||
#define stricmp _stricmp
|
||||
#define strnicmp _strnicmp
|
||||
#define snprintf _snprintf
|
||||
#define strdup _strdup
|
||||
#endif
|
||||
|
||||
#define PATH_SEPARATOR ';'
|
||||
#define FILESEP "\\"
|
||||
#define FILE_SEPARATOR '\\'
|
||||
#define IS_FILE_SEPARATOR(c) ((c) == '\\' || (c) == '/')
|
||||
#define MAXPATHLEN MAX_PATH
|
||||
#define MAXNAMELEN MAX_PATH
|
||||
|
||||
#ifdef JAVA_ARGS
|
||||
/*
|
||||
* ApplicationHome is prepended to each of these entries; the resulting
|
||||
* strings are concatenated (separated by PATH_SEPARATOR) and used as the
|
||||
* value of -cp option to the launcher.
|
||||
*/
|
||||
#ifndef APP_CLASSPATH
|
||||
#define APP_CLASSPATH { "\\lib\\tools.jar", "\\classes" }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Support for doing cheap, accurate interval timing.
|
||||
*/
|
||||
extern jlong CounterGet(void);
|
||||
extern jlong Counter2Micros(jlong counts);
|
||||
|
||||
#ifdef JAVAW
|
||||
#define main _main
|
||||
extern int _main(int argc, char **argv);
|
||||
#endif
|
||||
|
||||
#define JLONG_FORMAT "%I64d"
|
||||
|
||||
/*
|
||||
* Function prototypes.
|
||||
*/
|
||||
#ifndef GAMMA
|
||||
char *LocateJRE(manifest_info *info);
|
||||
void ExecJRE(char *jre, char **argv);
|
||||
#endif
|
||||
int UnsetEnv(char *name);
|
||||
|
||||
#endif
|
@ -3307,7 +3307,7 @@ void os::pd_start_thread(Thread* thread) {
|
||||
assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
|
||||
}
|
||||
|
||||
class HighResolutionInterval {
|
||||
class HighResolutionInterval : public CHeapObj<mtThread> {
|
||||
// The default timer resolution seems to be 10 milliseconds.
|
||||
// (Where is this written down?)
|
||||
// If someone wants to sleep for only a fraction of the default,
|
||||
|
@ -65,6 +65,7 @@ class BuildConfig {
|
||||
String sourceBase = getFieldString(null, "SourceBase");
|
||||
String buildSpace = getFieldString(null, "BuildSpace");
|
||||
String outDir = buildBase;
|
||||
String jdkTargetRoot = getFieldString(null, "JdkTargetRoot");
|
||||
|
||||
put("Id", flavourBuild);
|
||||
put("OutputDir", outDir);
|
||||
@ -72,6 +73,7 @@ class BuildConfig {
|
||||
put("BuildBase", buildBase);
|
||||
put("BuildSpace", buildSpace);
|
||||
put("OutputDll", outDir + Util.sep + outDll);
|
||||
put("JdkTargetRoot", jdkTargetRoot);
|
||||
|
||||
context = new String [] {flavourBuild, flavour, build, null};
|
||||
}
|
||||
|
@ -98,11 +98,6 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
|
||||
tagV(cfg.getV("LinkerFlags"));
|
||||
endTag();
|
||||
|
||||
startTag("PostBuildEvent");
|
||||
tagData("Message", BuildConfig.getFieldString(null, "PostbuildDescription"));
|
||||
tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace("\t", "\r\n")));
|
||||
endTag();
|
||||
|
||||
startTag("PreLinkEvent");
|
||||
tagData("Message", BuildConfig.getFieldString(null, "PrelinkDescription"));
|
||||
tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace("\t", "\r\n")));
|
||||
@ -141,7 +136,9 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
|
||||
|
||||
for (BuildConfig cfg : allConfigs) {
|
||||
startTag(cfg, "PropertyGroup");
|
||||
tagData("LocalDebuggerCommand", "$(TargetDir)/hotspot.exe");
|
||||
tagData("LocalDebuggerCommand", cfg.get("JdkTargetRoot") + "\\bin\\java.exe");
|
||||
tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) -Dsun.java.launcher=gamma");
|
||||
tagData("LocalDebuggerEnvironment", "JAVA_HOME=" + cfg.get("JdkTargetRoot"));
|
||||
endTag();
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _JAVA_H_
|
||||
#define _JAVA_H_
|
||||
|
||||
/*
|
||||
* Get system specific defines.
|
||||
*/
|
||||
#include "jni.h"
|
||||
#include "java_md.h"
|
||||
#include "jli_util.h"
|
||||
|
||||
/*
|
||||
* Pointers to the needed JNI invocation API, initialized by LoadJavaVM.
|
||||
*/
|
||||
typedef jint (JNICALL *CreateJavaVM_t)(JavaVM **pvm, void **env, void *args);
|
||||
typedef jint (JNICALL *GetDefaultJavaVMInitArgs_t)(void *args);
|
||||
|
||||
typedef struct {
|
||||
CreateJavaVM_t CreateJavaVM;
|
||||
GetDefaultJavaVMInitArgs_t GetDefaultJavaVMInitArgs;
|
||||
} InvocationFunctions;
|
||||
|
||||
/*
|
||||
* Prototypes for launcher functions in the system specific java_md.c.
|
||||
*/
|
||||
|
||||
jboolean
|
||||
LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn);
|
||||
|
||||
void
|
||||
GetXUsagePath(char *buf, jint bufsize);
|
||||
|
||||
jboolean
|
||||
GetApplicationHome(char *buf, jint bufsize);
|
||||
|
||||
const char *
|
||||
GetArch();
|
||||
|
||||
void CreateExecutionEnvironment(int *_argc,
|
||||
char ***_argv,
|
||||
char jrepath[],
|
||||
jint so_jrepath,
|
||||
char jvmpath[],
|
||||
jint so_jvmpath,
|
||||
char **original_argv);
|
||||
|
||||
/*
|
||||
* Report an error message to stderr or a window as appropriate. The
|
||||
* flag always is set to JNI_TRUE if message is to be reported to both
|
||||
* strerr and windows and set to JNI_FALSE if the message should only
|
||||
* be sent to a window.
|
||||
*/
|
||||
void ReportErrorMessage(char * message, jboolean always);
|
||||
void ReportErrorMessage2(char * format, char * string, jboolean always);
|
||||
|
||||
/*
|
||||
* Report an exception which terminates the vm to stderr or a window
|
||||
* as appropriate.
|
||||
*/
|
||||
void ReportExceptionDescription(JNIEnv * env);
|
||||
|
||||
jboolean RemovableMachineDependentOption(char * option);
|
||||
void PrintMachineDependentOptions();
|
||||
|
||||
/*
|
||||
* Block current thread and continue execution in new thread
|
||||
*/
|
||||
int ContinueInNewThread(int (JNICALL *continuation)(void *),
|
||||
jlong stack_size, void * args);
|
||||
|
||||
/* sun.java.launcher.* platform properties. */
|
||||
void SetJavaLauncherPlatformProps(void);
|
||||
|
||||
/*
|
||||
* Functions defined in java.c and used in java_md.c.
|
||||
*/
|
||||
jint ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative);
|
||||
char *CheckJvmType(int *argc, char ***argv, jboolean speculative);
|
||||
void AddOption(char *str, void *info);
|
||||
|
||||
/*
|
||||
* Make launcher spit debug output.
|
||||
*/
|
||||
extern jboolean _launcher_debug;
|
||||
|
||||
#endif /* _JAVA_H_ */
|
@ -1,89 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "jli_util.h"
|
||||
|
||||
#ifdef GAMMA
|
||||
#ifdef TARGET_OS_FAMILY_windows
|
||||
#define strdup _strdup
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Returns a pointer to a block of at least 'size' bytes of memory.
|
||||
* Prints error message and exits if the memory could not be allocated.
|
||||
*/
|
||||
void *
|
||||
JLI_MemAlloc(size_t size)
|
||||
{
|
||||
void *p = malloc(size);
|
||||
if (p == 0) {
|
||||
perror("malloc");
|
||||
exit(1);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Equivalent to realloc(size).
|
||||
* Prints error message and exits if the memory could not be reallocated.
|
||||
*/
|
||||
void *
|
||||
JLI_MemRealloc(void *ptr, size_t size)
|
||||
{
|
||||
void *p = realloc(ptr, size);
|
||||
if (p == 0) {
|
||||
perror("realloc");
|
||||
exit(1);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper over strdup(3C) which prints an error message and exits if memory
|
||||
* could not be allocated.
|
||||
*/
|
||||
char *
|
||||
JLI_StringDup(const char *s1)
|
||||
{
|
||||
char *s = strdup(s1);
|
||||
if (s == NULL) {
|
||||
perror("strdup");
|
||||
exit(1);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
/*
|
||||
* Very equivalent to free(ptr).
|
||||
* Here to maintain pairing with the above routines.
|
||||
*/
|
||||
void
|
||||
JLI_MemFree(void *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
@ -1,496 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Class-Path Wildcards
|
||||
*
|
||||
* The syntax for wildcards is a single asterisk. The class path
|
||||
* foo/"*", e.g., loads all jar files in the directory named foo.
|
||||
* (This requires careful quotation when used in shell scripts.)
|
||||
*
|
||||
* Only files whose names end in .jar or .JAR are matched.
|
||||
* Files whose names end in .zip, or which have a particular
|
||||
* magic number, regardless of filename extension, are not
|
||||
* matched.
|
||||
*
|
||||
* Files are considered regardless of whether or not they are
|
||||
* "hidden" in the UNIX sense, i.e., have names beginning with '.'.
|
||||
*
|
||||
* A wildcard only matches jar files, not class files in the same
|
||||
* directory. If you want to load both class files and jar files from
|
||||
* a single directory foo then you can say foo:foo/"*", or foo/"*":foo
|
||||
* if you want the jar files to take precedence.
|
||||
*
|
||||
* Subdirectories are not searched recursively, i.e., foo/"*" only
|
||||
* looks for jar files in foo, not in foo/bar, foo/baz, etc.
|
||||
*
|
||||
* Expansion of wildcards is done early, prior to the invocation of a
|
||||
* program's main method, rather than late, during the class-loading
|
||||
* process itself. Each element of the input class path containing a
|
||||
* wildcard is replaced by the (possibly empty) sequence of elements
|
||||
* generated by enumerating the jar files in the named directory. If
|
||||
* the directory foo contains a.jar, b.jar, and c.jar,
|
||||
* e.g., then the class path foo/"*" is expanded into
|
||||
* foo/a.jar:foo/b.jar:foo/c.jar, and that string would be the value
|
||||
* of the system property java.class.path.
|
||||
*
|
||||
* The order in which the jar files in a directory are enumerated in
|
||||
* the expanded class path is not specified and may vary from platform
|
||||
* to platform and even from moment to moment on the same machine. A
|
||||
* well-constructed application should not depend upon any particular
|
||||
* order. If a specific order is required then the jar files can be
|
||||
* enumerated explicitly in the class path.
|
||||
*
|
||||
* The CLASSPATH environment variable is not treated any differently
|
||||
* from the -classpath (equiv. -cp) command-line option,
|
||||
* i.e. wildcards are honored in all these cases.
|
||||
*
|
||||
* Class-path wildcards are not honored in the Class-Path jar-manifest
|
||||
* header.
|
||||
*
|
||||
* Class-path wildcards are honored not only by the Java launcher but
|
||||
* also by most other command-line tools that accept class paths, and
|
||||
* in particular by javac and javadoc.
|
||||
*
|
||||
* Class-path wildcards are not honored in any other kind of path, and
|
||||
* especially not in the bootstrap class path, which is a mere
|
||||
* artifact of our implementation and not something that developers
|
||||
* should use.
|
||||
*
|
||||
* Classpath wildcards are only expanded in the Java launcher code,
|
||||
* supporting the use of wildcards on the command line and in the
|
||||
* CLASSPATH environment variable. We do not support the use of
|
||||
* wildcards by applications that embed the JVM.
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include "java.h" /* Strictly for PATH_SEPARATOR/FILE_SEPARATOR */
|
||||
#include "jli_util.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#else /* Unix */
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#endif /* Unix */
|
||||
|
||||
static int
|
||||
exists(const char* filename)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
return _access(filename, 0) == 0;
|
||||
#else
|
||||
return access(filename, F_OK) == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define NEW_(TYPE) ((TYPE) JLI_MemAlloc(sizeof(struct TYPE##_)))
|
||||
|
||||
/*
|
||||
* Wildcard directory iteration.
|
||||
* WildcardIterator_for(wildcard) returns an iterator.
|
||||
* Each call to that iterator's next() method returns the basename
|
||||
* of an entry in the wildcard's directory. The basename's memory
|
||||
* belongs to the iterator. The caller is responsible for prepending
|
||||
* the directory name and file separator, if necessary.
|
||||
* When done with the iterator, call the close method to clean up.
|
||||
*/
|
||||
typedef struct WildcardIterator_* WildcardIterator;
|
||||
|
||||
#ifdef _WIN32
|
||||
struct WildcardIterator_
|
||||
{
|
||||
HANDLE handle;
|
||||
char *firstFile; /* Stupid FindFirstFile...FindNextFile */
|
||||
};
|
||||
|
||||
static WildcardIterator
|
||||
WildcardIterator_for(const char *wildcard)
|
||||
{
|
||||
WIN32_FIND_DATA find_data;
|
||||
WildcardIterator it = NEW_(WildcardIterator);
|
||||
HANDLE handle = FindFirstFile(wildcard, &find_data);
|
||||
if (handle == INVALID_HANDLE_VALUE)
|
||||
return NULL;
|
||||
it->handle = handle;
|
||||
it->firstFile = find_data.cFileName;
|
||||
return it;
|
||||
}
|
||||
|
||||
static char *
|
||||
WildcardIterator_next(WildcardIterator it)
|
||||
{
|
||||
WIN32_FIND_DATA find_data;
|
||||
if (it->firstFile != NULL) {
|
||||
char *firstFile = it->firstFile;
|
||||
it->firstFile = NULL;
|
||||
return firstFile;
|
||||
}
|
||||
return FindNextFile(it->handle, &find_data)
|
||||
? find_data.cFileName : NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
WildcardIterator_close(WildcardIterator it)
|
||||
{
|
||||
if (it) {
|
||||
FindClose(it->handle);
|
||||
JLI_MemFree(it->firstFile);
|
||||
JLI_MemFree(it);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* Unix */
|
||||
struct WildcardIterator_
|
||||
{
|
||||
DIR *dir;
|
||||
};
|
||||
|
||||
static WildcardIterator
|
||||
WildcardIterator_for(const char *wildcard)
|
||||
{
|
||||
DIR *dir;
|
||||
int wildlen = strlen(wildcard);
|
||||
if (wildlen < 2) {
|
||||
dir = opendir(".");
|
||||
} else {
|
||||
char *dirname = JLI_StringDup(wildcard);
|
||||
dirname[wildlen - 1] = '\0';
|
||||
dir = opendir(dirname);
|
||||
JLI_MemFree(dirname);
|
||||
}
|
||||
if (dir == NULL)
|
||||
return NULL;
|
||||
else {
|
||||
WildcardIterator it = NEW_(WildcardIterator);
|
||||
it->dir = dir;
|
||||
return it;
|
||||
}
|
||||
}
|
||||
|
||||
static char *
|
||||
WildcardIterator_next(WildcardIterator it)
|
||||
{
|
||||
struct dirent* dirp = readdir(it->dir);
|
||||
return dirp ? dirp->d_name : NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
WildcardIterator_close(WildcardIterator it)
|
||||
{
|
||||
if (it) {
|
||||
closedir(it->dir);
|
||||
JLI_MemFree(it);
|
||||
}
|
||||
}
|
||||
#endif /* Unix */
|
||||
|
||||
static int
|
||||
equal(const char *s1, const char *s2)
|
||||
{
|
||||
return strcmp(s1, s2) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* FileList ADT - a dynamic list of C filenames
|
||||
*/
|
||||
struct FileList_
|
||||
{
|
||||
char **files;
|
||||
int size;
|
||||
int capacity;
|
||||
};
|
||||
typedef struct FileList_ *FileList;
|
||||
|
||||
static FileList
|
||||
FileList_new(int capacity)
|
||||
{
|
||||
FileList fl = NEW_(FileList);
|
||||
fl->capacity = capacity;
|
||||
fl->files = (char **) JLI_MemAlloc(capacity * sizeof(fl->files[0]));
|
||||
fl->size = 0;
|
||||
return fl;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_WILDCARD
|
||||
static void
|
||||
FileList_print(FileList fl)
|
||||
{
|
||||
int i;
|
||||
putchar('[');
|
||||
for (i = 0; i < fl->size; i++) {
|
||||
if (i > 0) printf(", ");
|
||||
printf("\"%s\"",fl->files[i]);
|
||||
}
|
||||
putchar(']');
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
FileList_free(FileList fl)
|
||||
{
|
||||
if (fl) {
|
||||
if (fl->files) {
|
||||
int i;
|
||||
for (i = 0; i < fl->size; i++)
|
||||
JLI_MemFree(fl->files[i]);
|
||||
JLI_MemFree(fl->files);
|
||||
}
|
||||
JLI_MemFree(fl);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
FileList_ensureCapacity(FileList fl, int capacity)
|
||||
{
|
||||
if (fl->capacity < capacity) {
|
||||
while (fl->capacity < capacity)
|
||||
fl->capacity *= 2;
|
||||
fl->files = JLI_MemRealloc(fl->files,
|
||||
fl->capacity * sizeof(fl->files[0]));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
FileList_add(FileList fl, char *file)
|
||||
{
|
||||
FileList_ensureCapacity(fl, fl->size+1);
|
||||
fl->files[fl->size++] = file;
|
||||
}
|
||||
|
||||
static void
|
||||
FileList_addSubstring(FileList fl, const char *beg, int len)
|
||||
{
|
||||
char *filename = (char *) JLI_MemAlloc(len+1);
|
||||
memcpy(filename, beg, len);
|
||||
filename[len] = '\0';
|
||||
FileList_ensureCapacity(fl, fl->size+1);
|
||||
fl->files[fl->size++] = filename;
|
||||
}
|
||||
|
||||
static char *
|
||||
FileList_join(FileList fl, char sep)
|
||||
{
|
||||
int i;
|
||||
int size;
|
||||
char *path;
|
||||
char *p;
|
||||
for (i = 0, size = 1; i < fl->size; i++)
|
||||
size += strlen(fl->files[i]) + 1;
|
||||
|
||||
path = JLI_MemAlloc(size);
|
||||
|
||||
for (i = 0, p = path; i < fl->size; i++) {
|
||||
int len = strlen(fl->files[i]);
|
||||
if (i > 0) *p++ = sep;
|
||||
memcpy(p, fl->files[i], len);
|
||||
p += len;
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
static FileList
|
||||
FileList_split(const char *path, char sep)
|
||||
{
|
||||
const char *p, *q;
|
||||
int len = strlen(path);
|
||||
int count;
|
||||
FileList fl;
|
||||
for (count = 1, p = path; p < path + len; p++)
|
||||
count += (*p == sep);
|
||||
fl = FileList_new(count);
|
||||
for (p = path;;) {
|
||||
for (q = p; q <= path + len; q++) {
|
||||
if (*q == sep || *q == '\0') {
|
||||
FileList_addSubstring(fl, p, q - p);
|
||||
if (*q == '\0')
|
||||
return fl;
|
||||
p = q + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
isJarFileName(const char *filename)
|
||||
{
|
||||
int len = strlen(filename);
|
||||
return (len >= 4) &&
|
||||
(filename[len - 4] == '.') &&
|
||||
(equal(filename + len - 3, "jar") ||
|
||||
equal(filename + len - 3, "JAR")) &&
|
||||
/* Paranoia: Maybe filename is "DIR:foo.jar" */
|
||||
(strchr(filename, PATH_SEPARATOR) == NULL);
|
||||
}
|
||||
|
||||
static char *
|
||||
wildcardConcat(const char *wildcard, const char *basename)
|
||||
{
|
||||
int wildlen = strlen(wildcard);
|
||||
int baselen = strlen(basename);
|
||||
char *filename = (char *) JLI_MemAlloc(wildlen + baselen);
|
||||
/* Replace the trailing '*' with basename */
|
||||
memcpy(filename, wildcard, wildlen-1);
|
||||
memcpy(filename+wildlen-1, basename, baselen+1);
|
||||
return filename;
|
||||
}
|
||||
|
||||
static FileList
|
||||
wildcardFileList(const char *wildcard)
|
||||
{
|
||||
const char *basename;
|
||||
FileList fl = FileList_new(16);
|
||||
WildcardIterator it = WildcardIterator_for(wildcard);
|
||||
if (it == NULL) {
|
||||
FileList_free(fl);
|
||||
return NULL;
|
||||
}
|
||||
while ((basename = WildcardIterator_next(it)) != NULL)
|
||||
if (isJarFileName(basename))
|
||||
FileList_add(fl, wildcardConcat(wildcard, basename));
|
||||
WildcardIterator_close(it);
|
||||
return fl;
|
||||
}
|
||||
|
||||
static int
|
||||
isWildcard(const char *filename)
|
||||
{
|
||||
int len = strlen(filename);
|
||||
return (len > 0) &&
|
||||
(filename[len - 1] == '*') &&
|
||||
(len == 1 || IS_FILE_SEPARATOR(filename[len - 2])) &&
|
||||
(! exists(filename));
|
||||
}
|
||||
|
||||
static void
|
||||
FileList_expandWildcards(FileList fl)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < fl->size; i++) {
|
||||
if (isWildcard(fl->files[i])) {
|
||||
FileList expanded = wildcardFileList(fl->files[i]);
|
||||
if (expanded != NULL && expanded->size > 0) {
|
||||
JLI_MemFree(fl->files[i]);
|
||||
FileList_ensureCapacity(fl, fl->size + expanded->size);
|
||||
for (j = fl->size - 1; j >= i+1; j--)
|
||||
fl->files[j+expanded->size-1] = fl->files[j];
|
||||
for (j = 0; j < expanded->size; j++)
|
||||
fl->files[i+j] = expanded->files[j];
|
||||
i += expanded->size - 1;
|
||||
fl->size += expanded->size - 1;
|
||||
/* fl expropriates expanded's elements. */
|
||||
expanded->size = 0;
|
||||
}
|
||||
FileList_free(expanded);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const char *
|
||||
JLI_WildcardExpandClasspath(const char *classpath)
|
||||
{
|
||||
char *expanded;
|
||||
FileList fl;
|
||||
|
||||
if (strchr(classpath, '*') == NULL)
|
||||
return classpath;
|
||||
fl = FileList_split(classpath, PATH_SEPARATOR);
|
||||
FileList_expandWildcards(fl);
|
||||
expanded = FileList_join(fl, PATH_SEPARATOR);
|
||||
FileList_free(fl);
|
||||
if (getenv("_JAVA_LAUNCHER_DEBUG") != 0)
|
||||
printf("Expanded wildcards:\n"
|
||||
" before: \"%s\"\n"
|
||||
" after : \"%s\"\n",
|
||||
classpath, expanded);
|
||||
return expanded;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_WILDCARD
|
||||
static void
|
||||
wildcardExpandArgv(const char ***argv)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; (*argv)[i]; i++) {
|
||||
if (equal((*argv)[i], "-cp") ||
|
||||
equal((*argv)[i], "-classpath")) {
|
||||
i++;
|
||||
(*argv)[i] = wildcardExpandClasspath((*argv)[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
debugPrintArgv(char *argv[])
|
||||
{
|
||||
int i;
|
||||
putchar('[');
|
||||
for (i = 0; argv[i]; i++) {
|
||||
if (i > 0) printf(", ");
|
||||
printf("\"%s\"", argv[i]);
|
||||
}
|
||||
printf("]\n");
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
{
|
||||
argv[0] = "java";
|
||||
wildcardExpandArgv((const char***)&argv);
|
||||
debugPrintArgv(argv);
|
||||
/* execvp("java", argv); */
|
||||
return 0;
|
||||
}
|
||||
#endif /* DEBUG_WILDCARD */
|
||||
|
||||
/* Cute little perl prototype implementation....
|
||||
|
||||
my $sep = ($^O =~ /^(Windows|cygwin)/) ? ";" : ":";
|
||||
|
||||
sub expand($) {
|
||||
opendir DIR, $_[0] or return $_[0];
|
||||
join $sep, map {"$_[0]/$_"} grep {/\.(jar|JAR)$/} readdir DIR;
|
||||
}
|
||||
|
||||
sub munge($) {
|
||||
join $sep,
|
||||
map {(! -r $_ and s/[\/\\]+\*$//) ? expand $_ : $_} split $sep, $_[0];
|
||||
}
|
||||
|
||||
for (my $i = 0; $i < @ARGV - 1; $i++) {
|
||||
$ARGV[$i+1] = munge $ARGV[$i+1] if $ARGV[$i] =~ /^-c(p|lasspath)$/;
|
||||
}
|
||||
|
||||
$ENV{CLASSPATH} = munge $ENV{CLASSPATH} if exists $ENV{CLASSPATH};
|
||||
@ARGV = ("java", @ARGV);
|
||||
print "@ARGV\n";
|
||||
exec @ARGV;
|
||||
|
||||
*/
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1261,7 +1261,7 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
|
||||
|
||||
if (length == 0) return ac_ok;
|
||||
if (src->is_typeArray()) {
|
||||
Klass* const klass_oop = src->klass();
|
||||
Klass* klass_oop = src->klass();
|
||||
if (klass_oop != dst->klass()) return ac_failed;
|
||||
TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
|
||||
const int l2es = klass->log2_element_size();
|
||||
|
@ -211,12 +211,41 @@ bool ciInstanceKlass::is_java_lang_Object() const {
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciInstanceKlass::uses_default_loader
|
||||
bool ciInstanceKlass::uses_default_loader() {
|
||||
bool ciInstanceKlass::uses_default_loader() const {
|
||||
// Note: We do not need to resolve the handle or enter the VM
|
||||
// in order to test null-ness.
|
||||
return _loader == NULL;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Return basic type of boxed value for box klass or T_OBJECT if not.
|
||||
*/
|
||||
BasicType ciInstanceKlass::box_klass_type() const {
|
||||
if (uses_default_loader() && is_loaded()) {
|
||||
return SystemDictionary::box_klass_type(get_Klass());
|
||||
} else {
|
||||
return T_OBJECT;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this boxing klass?
|
||||
*/
|
||||
bool ciInstanceKlass::is_box_klass() const {
|
||||
return is_java_primitive(box_klass_type());
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this boxed value offset?
|
||||
*/
|
||||
bool ciInstanceKlass::is_boxed_value_offset(int offset) const {
|
||||
BasicType bt = box_klass_type();
|
||||
return is_java_primitive(bt) &&
|
||||
(offset == java_lang_boxing_object::value_offset_in_bytes(bt));
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciInstanceKlass::is_in_package
|
||||
//
|
||||
|
@ -217,10 +217,14 @@ public:
|
||||
ciInstanceKlass* implementor();
|
||||
|
||||
// Is the defining class loader of this class the default loader?
|
||||
bool uses_default_loader();
|
||||
bool uses_default_loader() const;
|
||||
|
||||
bool is_java_lang_Object() const;
|
||||
|
||||
BasicType box_klass_type() const;
|
||||
bool is_box_klass() const;
|
||||
bool is_boxed_value_offset(int offset) const;
|
||||
|
||||
// Is this klass in the given package?
|
||||
bool is_in_package(const char* packagename) {
|
||||
return is_in_package(packagename, (int) strlen(packagename));
|
||||
|
@ -1179,6 +1179,44 @@ bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs);
|
||||
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
|
||||
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }
|
||||
|
||||
bool ciMethod::is_boxing_method() const {
|
||||
if (holder()->is_box_klass()) {
|
||||
switch (intrinsic_id()) {
|
||||
case vmIntrinsics::_Boolean_valueOf:
|
||||
case vmIntrinsics::_Byte_valueOf:
|
||||
case vmIntrinsics::_Character_valueOf:
|
||||
case vmIntrinsics::_Short_valueOf:
|
||||
case vmIntrinsics::_Integer_valueOf:
|
||||
case vmIntrinsics::_Long_valueOf:
|
||||
case vmIntrinsics::_Float_valueOf:
|
||||
case vmIntrinsics::_Double_valueOf:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ciMethod::is_unboxing_method() const {
|
||||
if (holder()->is_box_klass()) {
|
||||
switch (intrinsic_id()) {
|
||||
case vmIntrinsics::_booleanValue:
|
||||
case vmIntrinsics::_byteValue:
|
||||
case vmIntrinsics::_charValue:
|
||||
case vmIntrinsics::_shortValue:
|
||||
case vmIntrinsics::_intValue:
|
||||
case vmIntrinsics::_longValue:
|
||||
case vmIntrinsics::_floatValue:
|
||||
case vmIntrinsics::_doubleValue:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BCEscapeAnalyzer *ciMethod::get_bcea() {
|
||||
#ifdef COMPILER2
|
||||
if (_bcea == NULL) {
|
||||
|
@ -298,6 +298,8 @@ class ciMethod : public ciMetadata {
|
||||
bool is_initializer () const;
|
||||
bool can_be_statically_bound() const { return _can_be_statically_bound; }
|
||||
void dump_replay_data(outputStream* st);
|
||||
bool is_boxing_method() const;
|
||||
bool is_unboxing_method() const;
|
||||
|
||||
// Print the bytecodes of this method.
|
||||
void print_codes_on(outputStream* st);
|
||||
|
@ -492,7 +492,9 @@ class CompileReplay : public StackObj {
|
||||
}
|
||||
Klass* k = parse_klass(CHECK);
|
||||
rec->oops_offsets[i] = offset;
|
||||
rec->oops_handles[i] = (jobject)(new KlassHandle(THREAD, k));
|
||||
KlassHandle *kh = NEW_C_HEAP_OBJ(KlassHandle, mtCompiler);
|
||||
::new ((void*)kh) KlassHandle(THREAD, k);
|
||||
rec->oops_handles[i] = (jobject)kh;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -242,8 +242,8 @@ static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
|
||||
void AltHashing::testMurmur3_32_ByteArray() {
|
||||
// printf("testMurmur3_32_ByteArray\n");
|
||||
|
||||
jbyte* vector = new jbyte[256];
|
||||
jbyte* hashes = new jbyte[4 * 256];
|
||||
jbyte vector[256];
|
||||
jbyte hashes[4 * 256];
|
||||
|
||||
for (int i = 0; i < 256; i++) {
|
||||
vector[i] = (jbyte) i;
|
||||
|
@ -75,8 +75,8 @@ ConstantPool* BytecodeConstantPool::create_constant_pool(TRAPS) const {
|
||||
int idx = i + _orig->length();
|
||||
switch (entry._tag) {
|
||||
case BytecodeCPEntry::UTF8:
|
||||
cp->symbol_at_put(idx, entry._u.utf8);
|
||||
entry._u.utf8->increment_refcount();
|
||||
cp->symbol_at_put(idx, entry._u.utf8);
|
||||
break;
|
||||
case BytecodeCPEntry::KLASS:
|
||||
cp->unresolved_klass_at_put(
|
||||
|
@ -3028,7 +3028,7 @@ AnnotationArray* ClassFileParser::assemble_annotations(u1* runtime_visible_annot
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
static void parseAndPrintGenericSignatures(
|
||||
instanceKlassHandle this_klass, TRAPS) {
|
||||
assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
|
||||
@ -3053,7 +3053,7 @@ static void parseAndPrintGenericSignatures(
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
#endif // def ASSERT
|
||||
|
||||
|
||||
instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
|
||||
@ -3114,9 +3114,6 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
||||
|
||||
// Field size and offset computation
|
||||
int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size();
|
||||
#ifndef PRODUCT
|
||||
int orig_nonstatic_field_size = 0;
|
||||
#endif
|
||||
int next_static_oop_offset;
|
||||
int next_static_double_offset;
|
||||
int next_static_word_offset;
|
||||
@ -3201,25 +3198,6 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
||||
|
||||
first_nonstatic_oop_offset = 0; // will be set for first oop field
|
||||
|
||||
#ifndef PRODUCT
|
||||
if( PrintCompactFieldsSavings ) {
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset +
|
||||
(nonstatic_oop_count * heapOopSize);
|
||||
if ( nonstatic_double_count > 0 ) {
|
||||
next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
|
||||
}
|
||||
next_nonstatic_word_offset = next_nonstatic_double_offset +
|
||||
(nonstatic_double_count * BytesPerLong);
|
||||
next_nonstatic_short_offset = next_nonstatic_word_offset +
|
||||
(nonstatic_word_count * BytesPerInt);
|
||||
next_nonstatic_byte_offset = next_nonstatic_short_offset +
|
||||
(nonstatic_short_count * BytesPerShort);
|
||||
next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset +
|
||||
nonstatic_byte_count ), heapOopSize );
|
||||
orig_nonstatic_field_size = nonstatic_field_size +
|
||||
((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize);
|
||||
}
|
||||
#endif
|
||||
bool compact_fields = CompactFields;
|
||||
int allocation_style = FieldsAllocationStyle;
|
||||
if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
|
||||
@ -3593,21 +3571,6 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
||||
first_nonstatic_oop_offset);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if( PrintCompactFieldsSavings ) {
|
||||
ResourceMark rm;
|
||||
if( nonstatic_field_size < orig_nonstatic_field_size ) {
|
||||
tty->print("[Saved %d of %d bytes in %s]\n",
|
||||
(orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
|
||||
orig_nonstatic_field_size*heapOopSize,
|
||||
_class_name);
|
||||
} else if( nonstatic_field_size > orig_nonstatic_field_size ) {
|
||||
tty->print("[Wasted %d over %d bytes in %s]\n",
|
||||
(nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize,
|
||||
orig_nonstatic_field_size*heapOopSize,
|
||||
_class_name);
|
||||
}
|
||||
}
|
||||
|
||||
if (PrintFieldLayout) {
|
||||
print_field_layout(_class_name,
|
||||
_fields,
|
||||
|
@ -253,22 +253,6 @@ void Dictionary::classes_do(void f(Klass*, TRAPS), TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// All classes, and their class loaders
|
||||
// (added for helpers that use HandleMarks and ResourceMarks)
|
||||
// Don't iterate over placeholders
|
||||
void Dictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
Klass* k = probe->klass();
|
||||
f(k, probe->loader_data(), CHECK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// All classes, and their class loaders
|
||||
// Don't iterate over placeholders
|
||||
void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
|
||||
|
@ -90,7 +90,6 @@ public:
|
||||
void classes_do(void f(Klass*));
|
||||
void classes_do(void f(Klass*, TRAPS), TRAPS);
|
||||
void classes_do(void f(Klass*, ClassLoaderData*));
|
||||
void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS);
|
||||
|
||||
void methods_do(void f(Method*));
|
||||
|
||||
|
@ -830,7 +830,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
|
||||
Klass *kk;
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
kk = find_class(name, ik->class_loader_data());
|
||||
kk = find_class(d_index, d_hash, name, ik->class_loader_data());
|
||||
}
|
||||
if (kk != NULL) {
|
||||
// No clean up is needed if the shared class has been entered
|
||||
@ -1747,13 +1747,6 @@ void SystemDictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
|
||||
dictionary()->classes_do(f);
|
||||
}
|
||||
|
||||
// All classes, and their class loaders
|
||||
// (added for helpers that use HandleMarks and ResourceMarks)
|
||||
// Don't iterate over placeholders
|
||||
void SystemDictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) {
|
||||
dictionary()->classes_do(f, CHECK);
|
||||
}
|
||||
|
||||
void SystemDictionary::placeholders_do(void f(Symbol*)) {
|
||||
placeholders()->entries_do(f);
|
||||
}
|
||||
|
@ -313,10 +313,7 @@ public:
|
||||
static void classes_do(void f(Klass*, TRAPS), TRAPS);
|
||||
// All classes, and their class loaders
|
||||
static void classes_do(void f(Klass*, ClassLoaderData*));
|
||||
// All classes, and their class loaders
|
||||
// (added for helpers that use HandleMarks and ResourceMarks)
|
||||
static void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS);
|
||||
// All entries in the placeholder table and their class loaders
|
||||
|
||||
static void placeholders_do(void f(Symbol*));
|
||||
|
||||
// Iterate over all methods in all klasses in dictionary
|
||||
|
@ -362,7 +362,7 @@ void TypeOrigin::print_on(outputStream* str) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
void ErrorContext::details(outputStream* ss, Method* method) const {
|
||||
void ErrorContext::details(outputStream* ss, const Method* method) const {
|
||||
if (is_valid()) {
|
||||
ss->print_cr("");
|
||||
ss->print_cr("Exception Details:");
|
||||
@ -435,7 +435,7 @@ void ErrorContext::reason_details(outputStream* ss) const {
|
||||
ss->print_cr("");
|
||||
}
|
||||
|
||||
void ErrorContext::location_details(outputStream* ss, Method* method) const {
|
||||
void ErrorContext::location_details(outputStream* ss, const Method* method) const {
|
||||
if (_bci != -1 && method != NULL) {
|
||||
streamIndentor si(ss);
|
||||
const char* bytecode_name = "<invalid>";
|
||||
@ -470,7 +470,7 @@ void ErrorContext::frame_details(outputStream* ss) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorContext::bytecode_details(outputStream* ss, Method* method) const {
|
||||
void ErrorContext::bytecode_details(outputStream* ss, const Method* method) const {
|
||||
if (method != NULL) {
|
||||
streamIndentor si(ss);
|
||||
ss->indent().print_cr("Bytecode:");
|
||||
@ -479,7 +479,7 @@ void ErrorContext::bytecode_details(outputStream* ss, Method* method) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorContext::handler_details(outputStream* ss, Method* method) const {
|
||||
void ErrorContext::handler_details(outputStream* ss, const Method* method) const {
|
||||
if (method != NULL) {
|
||||
streamIndentor si(ss);
|
||||
ExceptionTable table(method);
|
||||
@ -494,7 +494,7 @@ void ErrorContext::handler_details(outputStream* ss, Method* method) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorContext::stackmap_details(outputStream* ss, Method* method) const {
|
||||
void ErrorContext::stackmap_details(outputStream* ss, const Method* method) const {
|
||||
if (method != NULL && method->has_stackmap_table()) {
|
||||
streamIndentor si(ss);
|
||||
ss->indent().print_cr("Stackmap Table:");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -224,7 +224,7 @@ class ErrorContext VALUE_OBJ_CLASS_SPEC {
|
||||
_expected.reset_frame();
|
||||
}
|
||||
|
||||
void details(outputStream* ss, Method* method) const;
|
||||
void details(outputStream* ss, const Method* method) const;
|
||||
|
||||
#ifdef ASSERT
|
||||
void print_on(outputStream* str) const {
|
||||
@ -237,12 +237,12 @@ class ErrorContext VALUE_OBJ_CLASS_SPEC {
|
||||
#endif
|
||||
|
||||
private:
|
||||
void location_details(outputStream* ss, Method* method) const;
|
||||
void location_details(outputStream* ss, const Method* method) const;
|
||||
void reason_details(outputStream* ss) const;
|
||||
void frame_details(outputStream* ss) const;
|
||||
void bytecode_details(outputStream* ss, Method* method) const;
|
||||
void handler_details(outputStream* ss, Method* method) const;
|
||||
void stackmap_details(outputStream* ss, Method* method) const;
|
||||
void bytecode_details(outputStream* ss, const Method* method) const;
|
||||
void handler_details(outputStream* ss, const Method* method) const;
|
||||
void stackmap_details(outputStream* ss, const Method* method) const;
|
||||
};
|
||||
|
||||
// A new instance of this class is created for each class being verified
|
||||
|
@ -49,7 +49,7 @@ extern "C" {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
#define VM_SYMBOL_ENUM_NAME_BODY(name, string) #name "\0"
|
||||
static const char* vm_symbol_enum_names =
|
||||
VM_SYMBOLS_DO(VM_SYMBOL_ENUM_NAME_BODY, VM_ALIAS_IGNORE)
|
||||
@ -64,7 +64,7 @@ static const char* vm_symbol_enum_name(vmSymbols::SID sid) {
|
||||
}
|
||||
return string;
|
||||
}
|
||||
#endif //PRODUCT
|
||||
#endif //ASSERT
|
||||
|
||||
// Put all the VM symbol strings in one place.
|
||||
// Makes for a more compact libjvm.
|
||||
|
@ -68,7 +68,7 @@
|
||||
template(java_lang_Float, "java/lang/Float") \
|
||||
template(java_lang_Double, "java/lang/Double") \
|
||||
template(java_lang_Byte, "java/lang/Byte") \
|
||||
template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \
|
||||
template(java_lang_Byte_ByteCache, "java/lang/Byte$ByteCache") \
|
||||
template(java_lang_Short, "java/lang/Short") \
|
||||
template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \
|
||||
template(java_lang_Integer, "java/lang/Integer") \
|
||||
@ -517,13 +517,18 @@
|
||||
template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \
|
||||
template(sun_management_Sensor, "sun/management/Sensor") \
|
||||
template(sun_management_Agent, "sun/management/Agent") \
|
||||
template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \
|
||||
template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \
|
||||
template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \
|
||||
template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \
|
||||
template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \
|
||||
template(getGcInfoBuilder_name, "getGcInfoBuilder") \
|
||||
template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \
|
||||
template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \
|
||||
template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
|
||||
template(createGCNotification_name, "createGCNotification") \
|
||||
template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
|
||||
template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \
|
||||
template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \
|
||||
template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \
|
||||
template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \
|
||||
|
@ -1794,6 +1794,19 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||
Metadata* md = r->metadata_value();
|
||||
f(md);
|
||||
}
|
||||
} else if (iter.type() == relocInfo::virtual_call_type) {
|
||||
// Check compiledIC holders associated with this nmethod
|
||||
CompiledIC *ic = CompiledIC_at(iter.reloc());
|
||||
if (ic->is_icholder_call()) {
|
||||
CompiledICHolder* cichk = ic->cached_icholder();
|
||||
f(cichk->holder_method());
|
||||
f(cichk->holder_klass());
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
f(ic_oop);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1804,6 +1817,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||
Metadata* md = *p;
|
||||
f(md);
|
||||
}
|
||||
|
||||
// Call function Method*, not embedded in these other places.
|
||||
if (_method != NULL) f(_method);
|
||||
}
|
||||
|
@ -1854,8 +1854,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
|
||||
tty->print("%4d ", compile_id); // print compilation number
|
||||
tty->print("%s ", (is_osr ? "%" : " "));
|
||||
int code_size = (task->code() == NULL) ? 0 : task->code()->total_size();
|
||||
tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes());
|
||||
if (task->code() != NULL) {
|
||||
tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size());
|
||||
}
|
||||
tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
|
||||
}
|
||||
|
||||
if (PrintCodeCacheOnCompilation)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,7 @@ ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() {
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||
_generations = new GenerationSpecPtr[number_of_generations()];
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
if (_generations == NULL)
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
|
||||
|
@ -193,7 +193,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
||||
CardGeneration(rs, initial_byte_size, level, ct),
|
||||
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||
_debug_collection_type(Concurrent_collection_type)
|
||||
_debug_collection_type(Concurrent_collection_type),
|
||||
_did_compact(false)
|
||||
{
|
||||
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
||||
HeapWord* end = (HeapWord*) _virtual_space.high();
|
||||
@ -691,8 +692,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
|
||||
|
||||
// Clip CMSBootstrapOccupancy between 0 and 100.
|
||||
_bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
|
||||
/(double)100;
|
||||
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
|
||||
|
||||
_full_gcs_since_conc_gc = 0;
|
||||
|
||||
@ -917,18 +917,15 @@ void ConcurrentMarkSweepGeneration::compute_new_size() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Compute some numbers about the state of the heap.
|
||||
const size_t used_after_gc = used();
|
||||
const size_t capacity_after_gc = capacity();
|
||||
// The heap has been compacted but not reset yet.
|
||||
// Any metric such as free() or used() will be incorrect.
|
||||
|
||||
CardGeneration::compute_new_size();
|
||||
|
||||
// Reset again after a possible resizing
|
||||
cmsSpace()->reset_after_compaction();
|
||||
|
||||
assert(used() == used_after_gc && used_after_gc <= capacity(),
|
||||
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
|
||||
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
|
||||
if (did_compact()) {
|
||||
cmsSpace()->reset_after_compaction();
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
||||
@ -1578,6 +1575,8 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
|
||||
|
||||
// Clear _expansion_cause fields of constituent generations
|
||||
void CMSCollector::clear_expansion_cause() {
|
||||
_cmsGen->clear_expansion_cause();
|
||||
@ -1675,7 +1674,6 @@ void CMSCollector::collect(bool full,
|
||||
}
|
||||
acquire_control_and_collect(full, clear_all_soft_refs);
|
||||
_full_gcs_since_conc_gc++;
|
||||
|
||||
}
|
||||
|
||||
void CMSCollector::request_full_gc(unsigned int full_gc_count) {
|
||||
@ -1857,6 +1855,7 @@ NOT_PRODUCT(
|
||||
}
|
||||
}
|
||||
|
||||
set_did_compact(should_compact);
|
||||
if (should_compact) {
|
||||
// If the collection is being acquired from the background
|
||||
// collector, there may be references on the discovered
|
||||
@ -2718,6 +2717,7 @@ void CMSCollector::gc_epilogue(bool full) {
|
||||
Chunk::clean_chunk_pool();
|
||||
}
|
||||
|
||||
set_did_compact(false);
|
||||
_between_prologue_and_epilogue = false; // ready for next cycle
|
||||
}
|
||||
|
||||
|
@ -604,6 +604,8 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
ConcurrentMarkSweepPolicy* _collector_policy;
|
||||
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
|
||||
|
||||
void set_did_compact(bool v);
|
||||
|
||||
// XXX Move these to CMSStats ??? FIX ME !!!
|
||||
elapsedTimer _inter_sweep_timer; // time between sweeps
|
||||
elapsedTimer _intra_sweep_timer; // time _in_ sweeps
|
||||
@ -1081,6 +1083,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
CollectionTypes _debug_collection_type;
|
||||
|
||||
// True if a compactiing collection was done.
|
||||
bool _did_compact;
|
||||
bool did_compact() { return _did_compact; }
|
||||
|
||||
// Fraction of current occupancy at which to start a CMS collection which
|
||||
// will collect this generation (at least).
|
||||
double _initiating_occupancy;
|
||||
@ -1121,6 +1127,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
// Adaptive size policy
|
||||
CMSAdaptiveSizePolicy* size_policy();
|
||||
|
||||
void set_did_compact(bool v) { _did_compact = v; }
|
||||
|
||||
bool refs_discovery_is_atomic() const { return false; }
|
||||
bool refs_discovery_is_mt() const {
|
||||
// Note: CMS does MT-discovery during the parallel-remark
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,40 +26,12 @@
|
||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
|
||||
// Possible sizes for the card counts cache: odd primes that roughly double in size.
|
||||
// (See jvmtiTagMap.cpp).
|
||||
|
||||
#define MAX_SIZE ((size_t) -1)
|
||||
|
||||
size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
|
||||
16381, 32771, 76831, 150001, 307261,
|
||||
614563, 1228891, 2457733, 4915219, 9830479,
|
||||
19660831, 39321619, 78643219, 157286461, MAX_SIZE
|
||||
};
|
||||
|
||||
ConcurrentG1Refine::ConcurrentG1Refine() :
|
||||
_card_counts(NULL), _card_epochs(NULL),
|
||||
_n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
|
||||
_cache_size_index(0), _expand_card_counts(false),
|
||||
_hot_cache(NULL),
|
||||
_def_use_cache(false), _use_cache(false),
|
||||
// We initialize the epochs of the array to 0. By initializing
|
||||
// _n_periods to 1 and not 0 we automatically invalidate all the
|
||||
// entries on the array. Otherwise we might accidentally think that
|
||||
// we claimed a card that was in fact never set (see CR7033292).
|
||||
_n_periods(1),
|
||||
_threads(NULL), _n_threads(0)
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||
_threads(NULL), _n_threads(0),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
|
||||
// Ergomonically select initial concurrent refinement parameters
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
|
||||
@ -75,13 +47,17 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
|
||||
}
|
||||
set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
|
||||
|
||||
_n_worker_threads = thread_num();
|
||||
// We need one extra thread to do the young gen rset size sampling.
|
||||
_n_threads = _n_worker_threads + 1;
|
||||
|
||||
reset_threshold_step();
|
||||
|
||||
_threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
|
||||
|
||||
int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
|
||||
|
||||
ConcurrentG1RefineThread *next = NULL;
|
||||
for (int i = _n_threads - 1; i >= 0; i--) {
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
|
||||
@ -100,74 +76,8 @@ void ConcurrentG1Refine::reset_threshold_step() {
|
||||
}
|
||||
}
|
||||
|
||||
int ConcurrentG1Refine::thread_num() {
|
||||
return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::init() {
|
||||
if (G1ConcRSLogCacheSize > 0) {
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
|
||||
_max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
|
||||
_max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
|
||||
|
||||
size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
|
||||
guarantee(_max_cards < max_card_num, "card_num representation");
|
||||
|
||||
// We need _n_card_counts to be less than _max_n_card_counts here
|
||||
// so that the expansion call (below) actually allocates the
|
||||
// _counts and _epochs arrays.
|
||||
assert(_n_card_counts == 0, "pre-condition");
|
||||
assert(_max_n_card_counts > 0, "pre-condition");
|
||||
|
||||
// Find the index into cache size array that is of a size that's
|
||||
// large enough to hold desired_sz.
|
||||
size_t desired_sz = _max_cards / InitialCacheFraction;
|
||||
int desired_sz_index = 0;
|
||||
while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
|
||||
desired_sz_index += 1;
|
||||
assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
|
||||
}
|
||||
assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
|
||||
|
||||
// If the desired_sz value is between two sizes then
|
||||
// _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
|
||||
// we will start with the lower size in the optimistic expectation that
|
||||
// we will not need to expand up. Note desired_sz_index could also be 0.
|
||||
if (desired_sz_index > 0 &&
|
||||
_cc_cache_sizes[desired_sz_index] > desired_sz) {
|
||||
desired_sz_index -= 1;
|
||||
}
|
||||
|
||||
if (!expand_card_count_cache(desired_sz_index)) {
|
||||
// Allocation was unsuccessful - exit
|
||||
vm_exit_during_initialization("Could not reserve enough space for card count cache");
|
||||
}
|
||||
assert(_n_card_counts > 0, "post-condition");
|
||||
assert(_cache_size_index == desired_sz_index, "post-condition");
|
||||
|
||||
Copy::fill_to_bytes(&_card_counts[0],
|
||||
_n_card_counts * sizeof(CardCountCacheEntry));
|
||||
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
|
||||
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ct_bs = (CardTableModRefBS*)bs;
|
||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||
|
||||
_def_use_cache = true;
|
||||
_use_cache = true;
|
||||
_hot_cache_size = (1 << G1ConcRSLogCacheSize);
|
||||
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
|
||||
_n_hot = 0;
|
||||
_hot_cache_idx = 0;
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
int n_workers = (ParallelGCThreads > 0 ?
|
||||
_g1h->workers()->total_workers() : 1);
|
||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
_hot_card_cache.initialize();
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::stop() {
|
||||
@ -188,17 +98,6 @@ void ConcurrentG1Refine::reinitialize_threads() {
|
||||
}
|
||||
|
||||
ConcurrentG1Refine::~ConcurrentG1Refine() {
|
||||
if (G1ConcRSLogCacheSize > 0) {
|
||||
// Please see the comment in allocate_card_count_cache
|
||||
// for why we call os::malloc() and os::free() directly.
|
||||
assert(_card_counts != NULL, "Logic");
|
||||
os::free(_card_counts, mtGC);
|
||||
assert(_card_epochs != NULL, "Logic");
|
||||
os::free(_card_epochs, mtGC);
|
||||
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
|
||||
}
|
||||
if (_threads != NULL) {
|
||||
for (int i = 0; i < _n_threads; i++) {
|
||||
delete _threads[i];
|
||||
@ -215,317 +114,10 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
HeapRegion* r = _g1h->heap_region_containing(start);
|
||||
if (r != NULL && r->is_young()) {
|
||||
return true;
|
||||
}
|
||||
// This card is not associated with a heap region
|
||||
// so can't be young.
|
||||
return false;
|
||||
}
|
||||
|
||||
jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
|
||||
unsigned new_card_num = ptr_2_card_num(card_ptr);
|
||||
unsigned bucket = hash(new_card_num);
|
||||
assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
|
||||
|
||||
CardCountCacheEntry* count_ptr = &_card_counts[bucket];
|
||||
CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
|
||||
|
||||
// We have to construct a new entry if we haven't updated the counts
|
||||
// during the current period, or if the count was updated for a
|
||||
// different card number.
|
||||
unsigned int new_epoch = (unsigned int) _n_periods;
|
||||
julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
|
||||
|
||||
while (true) {
|
||||
// Fetch the previous epoch value
|
||||
julong prev_epoch_entry = epoch_ptr->_value;
|
||||
julong cas_res;
|
||||
|
||||
if (extract_epoch(prev_epoch_entry) != new_epoch) {
|
||||
// This entry has not yet been updated during this period.
|
||||
// Note: we update the epoch value atomically to ensure
|
||||
// that there is only one winner that updates the cached
|
||||
// card_ptr value even though all the refine threads share
|
||||
// the same epoch value.
|
||||
|
||||
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
|
||||
(volatile jlong*)&epoch_ptr->_value,
|
||||
(jlong) prev_epoch_entry);
|
||||
|
||||
if (cas_res == prev_epoch_entry) {
|
||||
// We have successfully won the race to update the
|
||||
// epoch and card_num value. Make it look like the
|
||||
// count and eviction count were previously cleared.
|
||||
count_ptr->_count = 1;
|
||||
count_ptr->_evict_count = 0;
|
||||
*count = 0;
|
||||
// We can defer the processing of card_ptr
|
||||
*defer = true;
|
||||
return card_ptr;
|
||||
}
|
||||
// We did not win the race to update the epoch field, so some other
|
||||
// thread must have done it. The value that gets returned by CAS
|
||||
// should be the new epoch value.
|
||||
assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
|
||||
// We could 'continue' here or just re-read the previous epoch value
|
||||
prev_epoch_entry = epoch_ptr->_value;
|
||||
}
|
||||
|
||||
// The epoch entry for card_ptr has been updated during this period.
|
||||
unsigned old_card_num = extract_card_num(prev_epoch_entry);
|
||||
|
||||
// The card count that will be returned to caller
|
||||
*count = count_ptr->_count;
|
||||
|
||||
// Are we updating the count for the same card?
|
||||
if (new_card_num == old_card_num) {
|
||||
// Same card - just update the count. We could have more than one
|
||||
// thread racing to update count for the current card. It should be
|
||||
// OK not to use a CAS as the only penalty should be some missed
|
||||
// increments of the count which delays identifying the card as "hot".
|
||||
|
||||
if (*count < max_jubyte) count_ptr->_count++;
|
||||
// We can defer the processing of card_ptr
|
||||
*defer = true;
|
||||
return card_ptr;
|
||||
}
|
||||
|
||||
// Different card - evict old card info
|
||||
if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
|
||||
if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
|
||||
// Trigger a resize the next time we clear
|
||||
_expand_card_counts = true;
|
||||
}
|
||||
|
||||
cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
|
||||
(volatile jlong*)&epoch_ptr->_value,
|
||||
(jlong) prev_epoch_entry);
|
||||
|
||||
if (cas_res == prev_epoch_entry) {
|
||||
// We successfully updated the card num value in the epoch entry
|
||||
count_ptr->_count = 0; // initialize counter for new card num
|
||||
jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
|
||||
|
||||
// Even though the region containg the card at old_card_num was not
|
||||
// in the young list when old_card_num was recorded in the epoch
|
||||
// cache it could have been added to the free list and subsequently
|
||||
// added to the young list in the intervening time. See CR 6817995.
|
||||
// We do not deal with this case here - it will be handled in
|
||||
// HeapRegion::oops_on_card_seq_iterate_careful after it has been
|
||||
// determined that the region containing the card has been allocated
|
||||
// to, and it's safe to check the young type of the region.
|
||||
|
||||
// We do not want to defer processing of card_ptr in this case
|
||||
// (we need to refine old_card_ptr and card_ptr)
|
||||
*defer = false;
|
||||
return old_card_ptr;
|
||||
}
|
||||
// Someone else beat us - try again.
|
||||
}
|
||||
}
|
||||
|
||||
jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
|
||||
int count;
|
||||
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
|
||||
assert(cached_ptr != NULL, "bad cached card ptr");
|
||||
|
||||
// We've just inserted a card pointer into the card count cache
|
||||
// and got back the card that we just inserted or (evicted) the
|
||||
// previous contents of that count slot.
|
||||
|
||||
// The card we got back could be in a young region. When the
|
||||
// returned card (if evicted) was originally inserted, we had
|
||||
// determined that its containing region was not young. However
|
||||
// it is possible for the region to be freed during a cleanup
|
||||
// pause, then reallocated and tagged as young which will result
|
||||
// in the returned card residing in a young region.
|
||||
//
|
||||
// We do not deal with this case here - the change from non-young
|
||||
// to young could be observed at any time - it will be handled in
|
||||
// HeapRegion::oops_on_card_seq_iterate_careful after it has been
|
||||
// determined that the region containing the card has been allocated
|
||||
// to.
|
||||
|
||||
// The card pointer we obtained from card count cache is not hot
|
||||
// so do not store it in the cache; return it for immediate
|
||||
// refining.
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
return cached_ptr;
|
||||
}
|
||||
|
||||
// Otherwise, the pointer we got from the _card_counts cache is hot.
|
||||
jbyte* res = NULL;
|
||||
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_n_hot == _hot_cache_size) {
|
||||
res = _hot_cache[_hot_cache_idx];
|
||||
_n_hot--;
|
||||
}
|
||||
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
|
||||
_hot_cache[_hot_cache_idx] = cached_ptr;
|
||||
_hot_cache_idx++;
|
||||
if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
|
||||
_n_hot++;
|
||||
|
||||
// The card obtained from the hot card cache could be in a young
|
||||
// region. See above on how this can happen.
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::clean_up_cache(int worker_i,
|
||||
G1RemSet* g1rs,
|
||||
DirtyCardQueue* into_cset_dcq) {
|
||||
assert(!use_cache(), "cache should be disabled");
|
||||
int start_idx;
|
||||
|
||||
while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
|
||||
int end_idx = start_idx + _hot_cache_par_chunk_size;
|
||||
|
||||
if (start_idx ==
|
||||
Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
|
||||
// The current worker has successfully claimed the chunk [start_idx..end_idx)
|
||||
end_idx = MIN2(end_idx, _n_hot);
|
||||
for (int i = start_idx; i < end_idx; i++) {
|
||||
jbyte* entry = _hot_cache[i];
|
||||
if (entry != NULL) {
|
||||
if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
|
||||
// 'entry' contains references that point into the current
|
||||
// collection set. We need to record 'entry' in the DCQS
|
||||
// that's used for that purpose.
|
||||
//
|
||||
// The only time we care about recording cards that contain
|
||||
// references that point into the collection set is during
|
||||
// RSet updating while within an evacuation pause.
|
||||
// In this case worker_i should be the id of a GC worker thread
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
|
||||
into_cset_dcq->enqueue(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The arrays used to hold the card counts and the epochs must have
|
||||
// a 1:1 correspondence. Hence they are allocated and freed together
|
||||
// Returns true if the allocations of both the counts and epochs
|
||||
// were successful; false otherwise.
|
||||
bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
|
||||
CardCountCacheEntry** counts,
|
||||
CardEpochCacheEntry** epochs) {
|
||||
// We call the allocation/free routines directly for the counts
|
||||
// and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
|
||||
// macros call AllocateHeap and FreeHeap respectively.
|
||||
// AllocateHeap will call vm_exit_out_of_memory in the event
|
||||
// of an allocation failure and abort the JVM. With the
|
||||
// _counts/epochs arrays we only need to abort the JVM if the
|
||||
// initial allocation of these arrays fails.
|
||||
//
|
||||
// Additionally AllocateHeap/FreeHeap do some tracing of
|
||||
// allocate/free calls so calling one without calling the
|
||||
// other can cause inconsistencies in the tracing. So we
|
||||
// call neither.
|
||||
|
||||
assert(*counts == NULL, "out param");
|
||||
assert(*epochs == NULL, "out param");
|
||||
|
||||
size_t counts_size = n * sizeof(CardCountCacheEntry);
|
||||
size_t epochs_size = n * sizeof(CardEpochCacheEntry);
|
||||
|
||||
*counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
|
||||
if (*counts == NULL) {
|
||||
// allocation was unsuccessful
|
||||
return false;
|
||||
}
|
||||
|
||||
*epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
|
||||
if (*epochs == NULL) {
|
||||
// allocation was unsuccessful - free counts array
|
||||
assert(*counts != NULL, "must be");
|
||||
os::free(*counts, mtGC);
|
||||
*counts = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// We successfully allocated both counts and epochs
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if the card counts/epochs cache was
|
||||
// successfully expanded; false otherwise.
|
||||
bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
|
||||
// Can we expand the card count and epoch tables?
|
||||
if (_n_card_counts < _max_n_card_counts) {
|
||||
assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob");
|
||||
|
||||
size_t cache_size = _cc_cache_sizes[cache_size_idx];
|
||||
// Make sure we don't go bigger than we will ever need
|
||||
cache_size = MIN2(cache_size, _max_n_card_counts);
|
||||
|
||||
// Should we expand the card count and card epoch tables?
|
||||
if (cache_size > _n_card_counts) {
|
||||
// We have been asked to allocate new, larger, arrays for
|
||||
// the card counts and the epochs. Attempt the allocation
|
||||
// of both before we free the existing arrays in case
|
||||
// the allocation is unsuccessful...
|
||||
CardCountCacheEntry* counts = NULL;
|
||||
CardEpochCacheEntry* epochs = NULL;
|
||||
|
||||
if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
|
||||
// Allocation was successful.
|
||||
// We can just free the old arrays; we're
|
||||
// not interested in preserving the contents
|
||||
if (_card_counts != NULL) os::free(_card_counts, mtGC);
|
||||
if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
|
||||
|
||||
// Cache the size of the arrays and the index that got us there.
|
||||
_n_card_counts = cache_size;
|
||||
_cache_size_index = cache_size_idx;
|
||||
|
||||
_card_counts = counts;
|
||||
_card_epochs = epochs;
|
||||
|
||||
// We successfully allocated/expanded the caches.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We did not successfully expand the caches.
|
||||
return false;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::clear_and_record_card_counts() {
|
||||
if (G1ConcRSLogCacheSize == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
if (_expand_card_counts) {
|
||||
int new_idx = _cache_size_index + 1;
|
||||
|
||||
if (expand_card_count_cache(new_idx)) {
|
||||
// Allocation was successful and _n_card_counts has
|
||||
// been updated to the new size. We only need to clear
|
||||
// the epochs so we don't read a bogus epoch value
|
||||
// when inserting a card into the hot card cache.
|
||||
Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
|
||||
}
|
||||
_expand_card_counts = false;
|
||||
}
|
||||
|
||||
int this_epoch = (int) _n_periods;
|
||||
assert((this_epoch+1) <= max_jint, "to many periods");
|
||||
// Update epoch
|
||||
_n_periods++;
|
||||
double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
|
||||
_g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
|
||||
int ConcurrentG1Refine::thread_num() {
|
||||
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
||||
: ParallelGCThreads;
|
||||
return MAX2<int>(n_threads, 1);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,13 +25,15 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Forward decl
|
||||
class ConcurrentG1RefineThread;
|
||||
class G1CollectedHeap;
|
||||
class G1HotCardCache;
|
||||
class G1RemSet;
|
||||
|
||||
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
@ -61,141 +63,14 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
|
||||
int _thread_threshold_step;
|
||||
|
||||
// We delay the refinement of 'hot' cards using the hot card cache.
|
||||
G1HotCardCache _hot_card_cache;
|
||||
|
||||
// Reset the threshold step value based of the current zone boundaries.
|
||||
void reset_threshold_step();
|
||||
|
||||
// The cache for card refinement.
|
||||
bool _use_cache;
|
||||
bool _def_use_cache;
|
||||
|
||||
size_t _n_periods; // Used as clearing epoch
|
||||
|
||||
// An evicting cache of the number of times each card
|
||||
// is accessed. Reduces, but does not eliminate, the amount
|
||||
// of duplicated processing of dirty cards.
|
||||
|
||||
enum SomePrivateConstants {
|
||||
epoch_bits = 32,
|
||||
card_num_shift = epoch_bits,
|
||||
epoch_mask = AllBits,
|
||||
card_num_mask = AllBits,
|
||||
|
||||
// The initial cache size is approximately this fraction
|
||||
// of a maximal cache (i.e. the size needed for all cards
|
||||
// in the heap)
|
||||
InitialCacheFraction = 512
|
||||
};
|
||||
|
||||
const static julong card_num_mask_in_place =
|
||||
(julong) card_num_mask << card_num_shift;
|
||||
|
||||
typedef struct {
|
||||
julong _value; // | card_num | epoch |
|
||||
} CardEpochCacheEntry;
|
||||
|
||||
julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
|
||||
assert(0 <= card_num && card_num < _max_cards, "Bounds");
|
||||
assert(0 <= epoch && epoch <= _n_periods, "must be");
|
||||
|
||||
return ((julong) card_num << card_num_shift) | epoch;
|
||||
}
|
||||
|
||||
unsigned int extract_epoch(julong v) {
|
||||
return (v & epoch_mask);
|
||||
}
|
||||
|
||||
unsigned int extract_card_num(julong v) {
|
||||
return (v & card_num_mask_in_place) >> card_num_shift;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
unsigned char _count;
|
||||
unsigned char _evict_count;
|
||||
} CardCountCacheEntry;
|
||||
|
||||
CardCountCacheEntry* _card_counts;
|
||||
CardEpochCacheEntry* _card_epochs;
|
||||
|
||||
// The current number of buckets in the card count cache
|
||||
size_t _n_card_counts;
|
||||
|
||||
// The number of cards for the entire reserved heap
|
||||
size_t _max_cards;
|
||||
|
||||
// The max number of buckets for the card counts and epochs caches.
|
||||
// This is the maximum that the counts and epochs will grow to.
|
||||
// It is specified as a fraction or percentage of _max_cards using
|
||||
// G1MaxHotCardCountSizePercent.
|
||||
size_t _max_n_card_counts;
|
||||
|
||||
// Possible sizes of the cache: odd primes that roughly double in size.
|
||||
// (See jvmtiTagMap.cpp).
|
||||
enum {
|
||||
MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array.
|
||||
};
|
||||
|
||||
static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX];
|
||||
|
||||
// The index in _cc_cache_sizes corresponding to the size of
|
||||
// _card_counts.
|
||||
int _cache_size_index;
|
||||
|
||||
bool _expand_card_counts;
|
||||
|
||||
const jbyte* _ct_bot;
|
||||
|
||||
jbyte** _hot_cache;
|
||||
int _hot_cache_size;
|
||||
int _n_hot;
|
||||
int _hot_cache_idx;
|
||||
|
||||
int _hot_cache_par_chunk_size;
|
||||
volatile int _hot_cache_par_claimed_idx;
|
||||
|
||||
// Needed to workaround 6817995
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// Helper routine for expand_card_count_cache().
|
||||
// The arrays used to hold the card counts and the epochs must have
|
||||
// a 1:1 correspondence. Hence they are allocated and freed together.
|
||||
// Returns true if the allocations of both the counts and epochs
|
||||
// were successful; false otherwise.
|
||||
bool allocate_card_count_cache(size_t n,
|
||||
CardCountCacheEntry** counts,
|
||||
CardEpochCacheEntry** epochs);
|
||||
|
||||
// Expands the arrays that hold the card counts and epochs
|
||||
// to the cache size at index. Returns true if the expansion/
|
||||
// allocation was successful; false otherwise.
|
||||
bool expand_card_count_cache(int index);
|
||||
|
||||
// hash a given key (index of card_ptr) with the specified size
|
||||
static unsigned int hash(size_t key, size_t size) {
|
||||
return (unsigned int) (key % size);
|
||||
}
|
||||
|
||||
// hash a given key (index of card_ptr)
|
||||
unsigned int hash(size_t key) {
|
||||
return hash(key, _n_card_counts);
|
||||
}
|
||||
|
||||
unsigned int ptr_2_card_num(jbyte* card_ptr) {
|
||||
return (unsigned int) (card_ptr - _ct_bot);
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(unsigned int card_num) {
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
// Returns the count of this card after incrementing it.
|
||||
jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
|
||||
|
||||
// Returns true if this card is in a young region
|
||||
bool is_young_card(jbyte* card_ptr);
|
||||
|
||||
public:
|
||||
ConcurrentG1Refine();
|
||||
ConcurrentG1Refine(G1CollectedHeap* g1h);
|
||||
~ConcurrentG1Refine();
|
||||
|
||||
void init(); // Accomplish some initialization that has to wait.
|
||||
@ -206,34 +81,6 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
// Iterate over the conc refine threads
|
||||
void threads_do(ThreadClosure *tc);
|
||||
|
||||
// If this is the first entry for the slot, writes into the cache and
|
||||
// returns NULL. If it causes an eviction, returns the evicted pointer.
|
||||
// Otherwise, its a cache hit, and returns NULL.
|
||||
jbyte* cache_insert(jbyte* card_ptr, bool* defer);
|
||||
|
||||
// Process the cached entries.
|
||||
void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
||||
|
||||
// Set up for parallel processing of the cards in the hot cache
|
||||
void clear_hot_cache_claimed_index() {
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
|
||||
// Discard entries in the hot cache.
|
||||
void clear_hot_cache() {
|
||||
_hot_cache_idx = 0; _n_hot = 0;
|
||||
}
|
||||
|
||||
bool hot_cache_is_empty() { return _n_hot == 0; }
|
||||
|
||||
bool use_cache() { return _use_cache; }
|
||||
void set_use_cache(bool b) {
|
||||
if (b) _use_cache = _def_use_cache;
|
||||
else _use_cache = false;
|
||||
}
|
||||
|
||||
void clear_and_record_card_counts();
|
||||
|
||||
static int thread_num();
|
||||
|
||||
void print_worker_threads_on(outputStream* st) const;
|
||||
@ -250,6 +97,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
int worker_thread_num() const { return _n_worker_threads; }
|
||||
|
||||
int thread_threshold_step() const { return _thread_threshold_step; }
|
||||
|
||||
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
|
||||
|
@ -4515,7 +4515,8 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
||||
_total_used_bytes(0), _total_capacity_bytes(0),
|
||||
_total_prev_live_bytes(0), _total_next_live_bytes(0),
|
||||
_hum_used_bytes(0), _hum_capacity_bytes(0),
|
||||
_hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
|
||||
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
|
||||
_total_remset_bytes(0) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
MemRegion g1_committed = g1h->g1_committed();
|
||||
MemRegion g1_reserved = g1h->g1_reserved();
|
||||
@ -4533,23 +4534,25 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
||||
HeapRegion::GrainBytes);
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX);
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||
G1PPRL_TYPE_H_FORMAT
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_DOUBLE_H_FORMAT,
|
||||
"type", "address-range",
|
||||
"used", "prev-live", "next-live", "gc-eff");
|
||||
G1PPRL_TYPE_H_FORMAT
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_DOUBLE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT,
|
||||
"type", "address-range",
|
||||
"used", "prev-live", "next-live", "gc-eff", "remset");
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||
G1PPRL_TYPE_H_FORMAT
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_DOUBLE_H_FORMAT,
|
||||
"", "",
|
||||
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
|
||||
G1PPRL_TYPE_H_FORMAT
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_DOUBLE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT,
|
||||
"", "",
|
||||
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)");
|
||||
}
|
||||
|
||||
// It takes as a parameter a reference to one of the _hum_* fields, it
|
||||
@ -4591,6 +4594,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
||||
size_t prev_live_bytes = r->live_bytes();
|
||||
size_t next_live_bytes = r->next_live_bytes();
|
||||
double gc_eff = r->gc_efficiency();
|
||||
size_t remset_bytes = r->rem_set()->mem_size();
|
||||
if (r->used() == 0) {
|
||||
type = "FREE";
|
||||
} else if (r->is_survivor()) {
|
||||
@ -4624,6 +4628,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
||||
_total_capacity_bytes += capacity_bytes;
|
||||
_total_prev_live_bytes += prev_live_bytes;
|
||||
_total_next_live_bytes += next_live_bytes;
|
||||
_total_remset_bytes += remset_bytes;
|
||||
|
||||
// Print a line for this particular region.
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||
@ -4632,14 +4637,17 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_DOUBLE_FORMAT,
|
||||
G1PPRL_DOUBLE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT,
|
||||
type, bottom, end,
|
||||
used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
|
||||
used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
|
||||
// add static memory usages to remembered set sizes
|
||||
_total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
|
||||
// Print the footer of the output.
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX);
|
||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||
@ -4647,13 +4655,15 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
|
||||
G1PPRL_SUM_MB_FORMAT("capacity")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("used")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("next-live")
|
||||
G1PPRL_SUM_MB_FORMAT("remset"),
|
||||
bytes_to_mb(_total_capacity_bytes),
|
||||
bytes_to_mb(_total_used_bytes),
|
||||
perc(_total_used_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_prev_live_bytes),
|
||||
perc(_total_prev_live_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_next_live_bytes),
|
||||
perc(_total_next_live_bytes, _total_capacity_bytes));
|
||||
perc(_total_next_live_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_remset_bytes));
|
||||
_out->cr();
|
||||
}
|
||||
|
@ -1257,6 +1257,9 @@ private:
|
||||
size_t _hum_prev_live_bytes;
|
||||
size_t _hum_next_live_bytes;
|
||||
|
||||
// Accumulator for the remembered set size
|
||||
size_t _total_remset_bytes;
|
||||
|
||||
static double perc(size_t val, size_t total) {
|
||||
if (total == 0) {
|
||||
return 0.0;
|
||||
|
215
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
Normal file
215
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CardCounts.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||
if (has_count_table()) {
|
||||
check_card_num(from_card_num,
|
||||
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
|
||||
assert(from_card_num < to_card_num,
|
||||
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
|
||||
from_card_num, to_card_num));
|
||||
assert(to_card_num <= _committed_max_card_num,
|
||||
err_msg("to card num out of range: "
|
||||
"to: "SIZE_FORMAT ", "
|
||||
"max: "SIZE_FORMAT,
|
||||
to_card_num, _committed_max_card_num));
|
||||
|
||||
to_card_num = MIN2(_committed_max_card_num, to_card_num);
|
||||
|
||||
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
||||
}
|
||||
}
|
||||
|
||||
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
||||
_g1h(g1h), _card_counts(NULL),
|
||||
_reserved_max_card_num(0), _committed_max_card_num(0),
|
||||
_committed_size(0) {}
|
||||
|
||||
void G1CardCounts::initialize() {
|
||||
assert(_g1h->max_capacity() > 0, "initialization order");
|
||||
assert(_g1h->capacity() == 0, "initialization order");
|
||||
|
||||
if (G1ConcRSHotCardLimit > 0) {
|
||||
// The max value we can store in the counts table is
|
||||
// max_jubyte. Guarantee the value of the hot
|
||||
// threshold limit is no more than this.
|
||||
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
||||
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ct_bs = (CardTableModRefBS*)bs;
|
||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||
|
||||
// Allocate/Reserve the counts table
|
||||
size_t reserved_bytes = _g1h->max_capacity();
|
||||
_reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
|
||||
|
||||
size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
|
||||
ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
|
||||
if (!rs.is_reserved()) {
|
||||
warning("Could not reserve enough space for the card counts table");
|
||||
guarantee(!has_reserved_count_table(), "should be NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
_card_counts_storage.initialize(rs, 0);
|
||||
_card_counts = (jubyte*) _card_counts_storage.low();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardCounts::resize(size_t heap_capacity) {
|
||||
// Expand the card counts table to handle a heap with the given capacity.
|
||||
|
||||
if (!has_reserved_count_table()) {
|
||||
// Don't expand if we failed to reserve the card counts table.
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_committed_size ==
|
||||
ReservedSpace::allocation_align_size_up(_committed_size),
|
||||
err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
|
||||
|
||||
// Verify that the committed space for the card counts matches our
|
||||
// committed max card num. Note for some allocation alignments, the
|
||||
// amount of space actually committed for the counts table will be able
|
||||
// to span more cards than the number spanned by the maximum heap.
|
||||
size_t prev_committed_size = _committed_size;
|
||||
size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
|
||||
|
||||
assert(prev_committed_card_num == _committed_max_card_num,
|
||||
err_msg("Card mismatch: "
|
||||
"prev: " SIZE_FORMAT ", "
|
||||
"committed: "SIZE_FORMAT", "
|
||||
"reserved: "SIZE_FORMAT,
|
||||
prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
|
||||
|
||||
size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
|
||||
size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
|
||||
size_t new_committed_card_num = committed_to_card_num(new_committed_size);
|
||||
|
||||
if (_committed_max_card_num < new_committed_card_num) {
|
||||
// we need to expand the backing store for the card counts
|
||||
size_t expand_size = new_committed_size - prev_committed_size;
|
||||
|
||||
if (!_card_counts_storage.expand_by(expand_size)) {
|
||||
warning("Card counts table backing store commit failure");
|
||||
return;
|
||||
}
|
||||
assert(_card_counts_storage.committed_size() == new_committed_size,
|
||||
"expansion commit failure");
|
||||
|
||||
_committed_size = new_committed_size;
|
||||
_committed_max_card_num = new_committed_card_num;
|
||||
|
||||
clear_range(prev_committed_card_num, _committed_max_card_num);
|
||||
}
|
||||
}
|
||||
|
||||
uint G1CardCounts::add_card_count(jbyte* card_ptr) {
|
||||
// Returns the number of times the card has been refined.
|
||||
// If we failed to reserve/commit the counts table, return 0.
|
||||
// If card_ptr is beyond the committed end of the counts table,
|
||||
// return 0.
|
||||
// Otherwise return the actual count.
|
||||
// Unless G1ConcRSHotCardLimit has been set appropriately,
|
||||
// returning 0 will result in the card being considered
|
||||
// cold and will be refined immediately.
|
||||
uint count = 0;
|
||||
if (has_count_table()) {
|
||||
size_t card_num = ptr_2_card_num(card_ptr);
|
||||
if (card_num < _committed_max_card_num) {
|
||||
count = (uint) _card_counts[card_num];
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
_card_counts[card_num] += 1;
|
||||
}
|
||||
assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
|
||||
err_msg("Refinement count overflow? "
|
||||
"new count: "UINT32_FORMAT,
|
||||
(uint) _card_counts[card_num]));
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bool G1CardCounts::is_hot(uint count) {
|
||||
return (count >= G1ConcRSHotCardLimit);
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_region(HeapRegion* hr) {
|
||||
assert(!hr->isHumongous(), "Should have been cleared");
|
||||
if (has_count_table()) {
|
||||
HeapWord* bottom = hr->bottom();
|
||||
|
||||
// We use the last address in hr as hr could be the
|
||||
// last region in the heap. In which case trying to find
|
||||
// the card for hr->end() will be an OOB accesss to the
|
||||
// card table.
|
||||
HeapWord* last = hr->end() - 1;
|
||||
assert(_g1h->g1_committed().contains(last),
|
||||
err_msg("last not in committed: "
|
||||
"last: " PTR_FORMAT ", "
|
||||
"committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
last,
|
||||
_g1h->g1_committed().start(),
|
||||
_g1h->g1_committed().end()));
|
||||
|
||||
const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
|
||||
const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
|
||||
assert(start_addr == hr->bottom(), "alignment");
|
||||
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
|
||||
assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
|
||||
#endif // ASSERT
|
||||
|
||||
// Clear the counts for the (exclusive) card range.
|
||||
size_t from_card_num = ptr_2_card_num(from_card_ptr);
|
||||
size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1;
|
||||
clear_range(from_card_num, to_card_num);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_all() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
|
||||
clear_range((size_t)0, _committed_max_card_num);
|
||||
}
|
||||
|
||||
G1CardCounts::~G1CardCounts() {
|
||||
if (has_reserved_count_table()) {
|
||||
_card_counts_storage.release();
|
||||
}
|
||||
}
|
||||
|
134
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
Normal file
134
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class CardTableModRefBS;
|
||||
class G1CollectedHeap;
|
||||
class HeapRegion;
|
||||
|
||||
// Table to track the number of times a card has been refined. Once
|
||||
// a card has been refined a certain number of times, it is
|
||||
// considered 'hot' and its refinement is delayed by inserting the
|
||||
// card into the hot card cache. The card will then be refined when
|
||||
// it is evicted from the hot card cache, or when the hot card cache
|
||||
// is 'drained' during the next evacuation pause.
|
||||
|
||||
class G1CardCounts: public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The table of counts
|
||||
jubyte* _card_counts;
|
||||
|
||||
// Max capacity of the reserved space for the counts table
|
||||
size_t _reserved_max_card_num;
|
||||
|
||||
// Max capacity of the committed space for the counts table
|
||||
size_t _committed_max_card_num;
|
||||
|
||||
// Size of committed space for the counts table
|
||||
size_t _committed_size;
|
||||
|
||||
// CardTable bottom.
|
||||
const jbyte* _ct_bot;
|
||||
|
||||
// Barrier set
|
||||
CardTableModRefBS* _ct_bs;
|
||||
|
||||
// The virtual memory backing the counts table
|
||||
VirtualSpace _card_counts_storage;
|
||||
|
||||
// Returns true if the card counts table has been reserved.
|
||||
bool has_reserved_count_table() { return _card_counts != NULL; }
|
||||
|
||||
// Returns true if the card counts table has been reserved and committed.
|
||||
bool has_count_table() {
|
||||
return has_reserved_count_table() && _committed_max_card_num > 0;
|
||||
}
|
||||
|
||||
void check_card_num(size_t card_num, const char* msg) {
|
||||
assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
|
||||
}
|
||||
|
||||
size_t ptr_2_card_num(const jbyte* card_ptr) {
|
||||
assert(card_ptr >= _ct_bot,
|
||||
err_msg("Inavalied card pointer: "
|
||||
"card_ptr: " PTR_FORMAT ", "
|
||||
"_ct_bot: " PTR_FORMAT,
|
||||
card_ptr, _ct_bot));
|
||||
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
|
||||
check_card_num(card_num,
|
||||
err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
|
||||
return card_num;
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(size_t card_num) {
|
||||
check_card_num(card_num,
|
||||
err_msg("card num out of range: "SIZE_FORMAT, card_num));
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
// Helper routine.
|
||||
// Returns the number of cards that can be counted by the given committed
|
||||
// table size, with a maximum of the number of cards spanned by the max
|
||||
// capacity of the heap.
|
||||
size_t committed_to_card_num(size_t committed_size) {
|
||||
return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
|
||||
}
|
||||
|
||||
// Clear the counts table for the given (exclusive) index range.
|
||||
void clear_range(size_t from_card_num, size_t to_card_num);
|
||||
|
||||
public:
|
||||
G1CardCounts(G1CollectedHeap* g1h);
|
||||
~G1CardCounts();
|
||||
|
||||
void initialize();
|
||||
|
||||
// Resize the committed space for the card counts table in
|
||||
// response to a resize of the committed space for the heap.
|
||||
void resize(size_t heap_capacity);
|
||||
|
||||
// Increments the refinement count for the given card.
|
||||
// Returns the pre-increment count value.
|
||||
uint add_card_count(jbyte* card_ptr);
|
||||
|
||||
// Returns true if the given count is high enough to be considered
|
||||
// 'hot'; false otherwise.
|
||||
bool is_hot(uint count);
|
||||
|
||||
// Clears the card counts for the cards spanned by the region
|
||||
void clear_region(HeapRegion* hr);
|
||||
|
||||
// Clear the entire card counts table during GC.
|
||||
// Updates the policy stats with the duration.
|
||||
void clear_all();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
@ -96,7 +96,7 @@ public:
|
||||
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
||||
{}
|
||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
||||
bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
|
||||
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
|
||||
// This path is executed by the concurrent refine or mutator threads,
|
||||
// concurrently, and so we do not care if card_ptr contains references
|
||||
// that point into the collection set.
|
||||
@ -1452,9 +1452,10 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
|
||||
}
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
_cg1r->clear_hot_cache();
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
if (hot_card_cache->use_cache()) {
|
||||
hot_card_cache->reset_card_counts();
|
||||
hot_card_cache->reset_hot_cache();
|
||||
}
|
||||
|
||||
// Rebuild remembered sets of all regions.
|
||||
@ -1548,7 +1549,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
}
|
||||
|
||||
if (G1Log::finer()) {
|
||||
g1_policy()->print_detailed_heap_transition();
|
||||
g1_policy()->print_detailed_heap_transition(true /* full */);
|
||||
}
|
||||
|
||||
print_heap_after_gc();
|
||||
@ -1767,6 +1768,8 @@ void G1CollectedHeap::update_committed_space(HeapWord* old_end,
|
||||
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
|
||||
// Tell the BOT about the update.
|
||||
_bot_shared->resize(_g1_committed.word_size());
|
||||
// Tell the hot card cache about the update
|
||||
_cg1r->hot_card_cache()->resize_card_counts(capacity());
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
@ -1843,33 +1846,32 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
||||
ReservedSpace::page_align_size_down(shrink_bytes);
|
||||
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
||||
HeapRegion::GrainBytes);
|
||||
uint num_regions_deleted = 0;
|
||||
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
|
||||
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
|
||||
|
||||
uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
|
||||
HeapWord* old_end = (HeapWord*) _g1_storage.high();
|
||||
assert(mr.end() == old_end, "post-condition");
|
||||
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
|
||||
|
||||
ergo_verbose3(ErgoHeapSizing,
|
||||
"shrink the heap",
|
||||
ergo_format_byte("requested shrinking amount")
|
||||
ergo_format_byte("aligned shrinking amount")
|
||||
ergo_format_byte("attempted shrinking amount"),
|
||||
shrink_bytes, aligned_shrink_bytes, mr.byte_size());
|
||||
if (mr.byte_size() > 0) {
|
||||
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
|
||||
if (num_regions_removed > 0) {
|
||||
_g1_storage.shrink_by(shrunk_bytes);
|
||||
HeapWord* new_end = (HeapWord*) _g1_storage.high();
|
||||
|
||||
if (_hr_printer.is_active()) {
|
||||
HeapWord* curr = mr.end();
|
||||
while (curr > mr.start()) {
|
||||
HeapWord* curr = old_end;
|
||||
while (curr > new_end) {
|
||||
HeapWord* curr_end = curr;
|
||||
curr -= HeapRegion::GrainWords;
|
||||
_hr_printer.uncommit(curr, curr_end);
|
||||
}
|
||||
assert(curr == mr.start(), "post-condition");
|
||||
}
|
||||
|
||||
_g1_storage.shrink_by(mr.byte_size());
|
||||
HeapWord* new_end = (HeapWord*) _g1_storage.high();
|
||||
assert(mr.start() == new_end, "post-condition");
|
||||
|
||||
_expansion_regions += num_regions_deleted;
|
||||
_expansion_regions += num_regions_removed;
|
||||
update_committed_space(old_end, new_end);
|
||||
HeapRegionRemSet::shrink_heap(n_regions());
|
||||
g1_policy()->record_new_heap_size(n_regions());
|
||||
@ -2000,7 +2002,7 @@ jint G1CollectedHeap::initialize() {
|
||||
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||
|
||||
_cg1r = new ConcurrentG1Refine();
|
||||
_cg1r = new ConcurrentG1Refine(this);
|
||||
|
||||
// Reserve the maximum.
|
||||
|
||||
@ -2061,6 +2063,9 @@ jint G1CollectedHeap::initialize() {
|
||||
(HeapWord*) _g1_reserved.end(),
|
||||
_expansion_regions);
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init();
|
||||
|
||||
// 6843694 - ensure that the maximum region index can fit
|
||||
// in the remembered set structures.
|
||||
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||
@ -2078,20 +2083,20 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
|
||||
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
@ -2153,9 +2158,6 @@ jint G1CollectedHeap::initialize() {
|
||||
// counts and that mechanism.
|
||||
SpecializationStats::clear();
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init();
|
||||
|
||||
// Here we allocate the dummy full region that is required by the
|
||||
// G1AllocRegion class. If we don't pass an address in the reserved
|
||||
// space here, lots of asserts fire.
|
||||
@ -2314,7 +2316,8 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
bool concurrent,
|
||||
int worker_i) {
|
||||
// Clean cards in the hot card cache
|
||||
concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
|
||||
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
int n_completed_buffers = 0;
|
||||
@ -5604,8 +5607,11 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
|
||||
|
||||
g1_rem_set()->prepare_for_oops_into_collection_set_do();
|
||||
concurrent_g1_refine()->set_use_cache(false);
|
||||
concurrent_g1_refine()->clear_hot_cache_claimed_index();
|
||||
|
||||
// Disable the hot card cache.
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
hot_card_cache->reset_hot_cache_claimed_index();
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
uint n_workers;
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
@ -5687,8 +5693,11 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
release_gc_alloc_regions(n_workers);
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
concurrent_g1_refine()->clear_hot_cache();
|
||||
concurrent_g1_refine()->set_use_cache(true);
|
||||
// Reset and re-enable the hot card cache.
|
||||
// Note the counts for the cards in the regions in the
|
||||
// collection set are reset when the collection set is freed.
|
||||
hot_card_cache->reset_hot_cache();
|
||||
hot_card_cache->set_use_cache(true);
|
||||
|
||||
finalize_for_evac_failure();
|
||||
|
||||
@ -5750,6 +5759,12 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
// Clear the card counts for this region.
|
||||
// Note: we only need to do this if the region is not young
|
||||
// (since we don't refine cards in young regions).
|
||||
if (!hr->is_young()) {
|
||||
_cg1r->hot_card_cache()->reset_card_counts(hr);
|
||||
}
|
||||
*pre_used += hr->used();
|
||||
hr->hr_clear(par, true /* clear_space */);
|
||||
free_list->add_as_head(hr);
|
||||
|
@ -124,9 +124,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_last_young_gc(false),
|
||||
_last_gc_was_young(false),
|
||||
|
||||
_eden_bytes_before_gc(0),
|
||||
_survivor_bytes_before_gc(0),
|
||||
_capacity_before_gc(0),
|
||||
_eden_used_bytes_before_gc(0),
|
||||
_survivor_used_bytes_before_gc(0),
|
||||
_heap_used_bytes_before_gc(0),
|
||||
_metaspace_used_bytes_before_gc(0),
|
||||
_eden_capacity_bytes_before_gc(0),
|
||||
_heap_capacity_bytes_before_gc(0),
|
||||
|
||||
_eden_cset_region_length(0),
|
||||
_survivor_cset_region_length(0),
|
||||
@ -309,7 +312,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
void G1CollectorPolicy::initialize_flags() {
|
||||
set_min_alignment(HeapRegion::GrainBytes);
|
||||
set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
|
||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
||||
set_max_alignment(MAX2(card_table_alignment, min_alignment()));
|
||||
if (SurvivorRatio < 1) {
|
||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||
}
|
||||
@ -745,7 +749,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
|
||||
|
||||
void G1CollectorPolicy::record_full_collection_start() {
|
||||
_full_collection_start_sec = os::elapsedTime();
|
||||
record_heap_size_info_at_start();
|
||||
record_heap_size_info_at_start(true /* full */);
|
||||
// Release the future to-space so that it is available for compaction into.
|
||||
_g1->set_full_collection();
|
||||
}
|
||||
@ -802,7 +806,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
_trace_gen0_time_data.record_start_collection(s_w_t_ms);
|
||||
_stop_world_start = 0.0;
|
||||
|
||||
record_heap_size_info_at_start();
|
||||
record_heap_size_info_at_start(false /* full */);
|
||||
|
||||
phase_times()->record_cur_collection_start_sec(start_time_sec);
|
||||
_pending_cards = _g1->pending_card_num();
|
||||
@ -937,14 +941,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
||||
end_time_sec, false);
|
||||
|
||||
size_t freed_bytes =
|
||||
_cur_collection_pause_used_at_start_bytes - cur_used_bytes;
|
||||
size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
|
||||
|
||||
double survival_fraction =
|
||||
(double)surviving_bytes/
|
||||
(double)_collection_set_bytes_used_before;
|
||||
|
||||
if (update_stats) {
|
||||
_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
|
||||
// this is where we update the allocation rate of the application
|
||||
@ -997,6 +993,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool new_in_marking_window = _in_marking_window;
|
||||
bool new_in_marking_window_im = false;
|
||||
if (during_initial_mark_pause()) {
|
||||
@ -1082,8 +1079,10 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
}
|
||||
_rs_length_diff_seq->add((double) rs_length_diff);
|
||||
|
||||
size_t copied_bytes = surviving_bytes;
|
||||
size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
|
||||
size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
|
||||
double cost_per_byte_ms = 0.0;
|
||||
|
||||
if (copied_bytes > 0) {
|
||||
cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
|
||||
if (_in_marking_window) {
|
||||
@ -1147,51 +1146,61 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
byte_size_in_proper_unit((double)(bytes)), \
|
||||
proper_unit_for_byte_size((bytes))
|
||||
|
||||
void G1CollectorPolicy::record_heap_size_info_at_start() {
|
||||
void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
_eden_bytes_before_gc = young_list->eden_used_bytes();
|
||||
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
|
||||
_capacity_before_gc = _g1->capacity();
|
||||
|
||||
_cur_collection_pause_used_at_start_bytes = _g1->used();
|
||||
_eden_used_bytes_before_gc = young_list->eden_used_bytes();
|
||||
_survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
|
||||
_heap_capacity_bytes_before_gc = _g1->capacity();
|
||||
_heap_used_bytes_before_gc = _g1->used();
|
||||
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
|
||||
|
||||
size_t eden_capacity_before_gc =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
|
||||
_eden_capacity_bytes_before_gc =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
|
||||
|
||||
_prev_eden_capacity = eden_capacity_before_gc;
|
||||
if (full) {
|
||||
_metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_heap_transition() {
|
||||
_g1->print_size_transition(gclog_or_tty,
|
||||
_cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
|
||||
_heap_used_bytes_before_gc,
|
||||
_g1->used(),
|
||||
_g1->capacity());
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_detailed_heap_transition() {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
size_t eden_bytes = young_list->eden_used_bytes();
|
||||
size_t survivor_bytes = young_list->survivor_used_bytes();
|
||||
size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
|
||||
size_t used = _g1->used();
|
||||
size_t capacity = _g1->capacity();
|
||||
size_t eden_capacity =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
|
||||
void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
|
||||
gclog_or_tty->print_cr(
|
||||
" [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
|
||||
"Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
|
||||
"Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
|
||||
EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
|
||||
EXT_SIZE_PARAMS(_eden_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(_prev_eden_capacity),
|
||||
EXT_SIZE_PARAMS(eden_bytes),
|
||||
EXT_SIZE_PARAMS(eden_capacity),
|
||||
EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(survivor_bytes),
|
||||
EXT_SIZE_PARAMS(used_before_gc),
|
||||
EXT_SIZE_PARAMS(_capacity_before_gc),
|
||||
EXT_SIZE_PARAMS(used),
|
||||
EXT_SIZE_PARAMS(capacity));
|
||||
size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
|
||||
size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
|
||||
size_t heap_used_bytes_after_gc = _g1->used();
|
||||
|
||||
size_t heap_capacity_bytes_after_gc = _g1->capacity();
|
||||
size_t eden_capacity_bytes_after_gc =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
|
||||
|
||||
gclog_or_tty->print(
|
||||
" [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
|
||||
"Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
|
||||
"Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
|
||||
EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
|
||||
EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
|
||||
EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
|
||||
EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
|
||||
EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
|
||||
EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
|
||||
EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
|
||||
|
||||
if (full) {
|
||||
MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
|
||||
}
|
||||
|
||||
gclog_or_tty->cr();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
||||
|
@ -175,7 +175,6 @@ private:
|
||||
CollectionSetChooser* _collectionSetChooser;
|
||||
|
||||
double _full_collection_start_sec;
|
||||
size_t _cur_collection_pause_used_at_start_bytes;
|
||||
uint _cur_collection_pause_used_regions_at_start;
|
||||
|
||||
// These exclude marking times.
|
||||
@ -194,7 +193,6 @@ private:
|
||||
|
||||
uint _young_list_target_length;
|
||||
uint _young_list_fixed_length;
|
||||
size_t _prev_eden_capacity; // used for logging
|
||||
|
||||
// The max number of regions we can extend the eden by while the GC
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
@ -693,11 +691,11 @@ public:
|
||||
|
||||
// Records the information about the heap size for reporting in
|
||||
// print_detailed_heap_transition
|
||||
void record_heap_size_info_at_start();
|
||||
void record_heap_size_info_at_start(bool full);
|
||||
|
||||
// Print heap sizing transition (with less and more detail).
|
||||
void print_heap_transition();
|
||||
void print_detailed_heap_transition();
|
||||
void print_detailed_heap_transition(bool full = false);
|
||||
|
||||
void record_stop_world_start();
|
||||
void record_concurrent_pause();
|
||||
@ -861,9 +859,16 @@ private:
|
||||
uint _max_survivor_regions;
|
||||
|
||||
// For reporting purposes.
|
||||
size_t _eden_bytes_before_gc;
|
||||
size_t _survivor_bytes_before_gc;
|
||||
size_t _capacity_before_gc;
|
||||
// The value of _heap_bytes_before_gc is also used to calculate
|
||||
// the cost of copying.
|
||||
|
||||
size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
|
||||
size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
|
||||
size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
|
||||
size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
|
||||
|
||||
size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
|
||||
size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
|
||||
|
||||
// The amount of survivor regions after a collection.
|
||||
uint _recorded_survivor_regions;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -155,11 +155,6 @@ void WorkerDataArray<T>::verify() {
|
||||
|
||||
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_max_gc_threads(max_gc_threads),
|
||||
_min_clear_cc_time_ms(-1.0),
|
||||
_max_clear_cc_time_ms(-1.0),
|
||||
_cur_clear_cc_time_ms(0.0),
|
||||
_cum_clear_cc_time_ms(0.0),
|
||||
_num_cc_clears(0L),
|
||||
_last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
|
||||
_last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
|
||||
@ -212,11 +207,11 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_gc_worker_times_ms.set(i, worker_time);
|
||||
|
||||
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
|
||||
_last_satb_filtering_times_ms.get(i) +
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
_last_satb_filtering_times_ms.get(i) +
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
|
||||
double worker_other_time = worker_time - worker_known_time;
|
||||
_last_gc_worker_other_times_ms.set(i, worker_other_time);
|
||||
@ -285,15 +280,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
}
|
||||
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
|
||||
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
|
||||
if (Verbose && G1Log::finest()) {
|
||||
print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
|
||||
print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
|
||||
print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
|
||||
print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
|
||||
if (_num_cc_clears > 0) {
|
||||
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
|
||||
}
|
||||
}
|
||||
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
|
||||
print_stats(1, "Other", misc_time_ms);
|
||||
if (_cur_verify_before_time_ms > 0.0) {
|
||||
@ -311,19 +297,3 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
print_stats(2, "Verify After", _cur_verify_after_time_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
|
||||
if (!(Verbose && G1Log::finest())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) {
|
||||
_min_clear_cc_time_ms = ms;
|
||||
}
|
||||
if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) {
|
||||
_max_clear_cc_time_ms = ms;
|
||||
}
|
||||
_cur_clear_cc_time_ms = ms;
|
||||
_cum_clear_cc_time_ms += ms;
|
||||
_num_cc_clears++;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -133,13 +133,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _cur_ref_proc_time_ms;
|
||||
double _cur_ref_enq_time_ms;
|
||||
|
||||
// Card Table Count Cache stats
|
||||
double _min_clear_cc_time_ms; // min
|
||||
double _max_clear_cc_time_ms; // max
|
||||
double _cur_clear_cc_time_ms; // clearing time during current pause
|
||||
double _cum_clear_cc_time_ms; // cummulative clearing time
|
||||
jlong _num_cc_clears; // number of times the card count cache has been cleared
|
||||
|
||||
double _cur_collection_start_sec;
|
||||
double _root_region_scan_wait_time_ms;
|
||||
|
||||
@ -227,8 +220,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_root_region_scan_wait_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_cc_clear_time_ms(double ms);
|
||||
|
||||
void record_young_free_cset_time_ms(double time_ms) {
|
||||
_recorded_young_free_cset_time_ms = time_ms;
|
||||
}
|
||||
|
148
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
Normal file
148
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
||||
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
|
||||
|
||||
void G1HotCardCache::initialize() {
|
||||
if (default_use_cache()) {
|
||||
_use_cache = true;
|
||||
|
||||
_hot_cache_size = (1 << G1ConcRSLogCacheSize);
|
||||
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
|
||||
|
||||
_n_hot = 0;
|
||||
_hot_cache_idx = 0;
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
int n_workers = (ParallelGCThreads > 0 ?
|
||||
_g1h->workers()->total_workers() : 1);
|
||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
|
||||
_card_counts.initialize();
|
||||
}
|
||||
}
|
||||
|
||||
G1HotCardCache::~G1HotCardCache() {
|
||||
if (default_use_cache()) {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
|
||||
}
|
||||
}
|
||||
|
||||
jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
|
||||
uint count = _card_counts.add_card_count(card_ptr);
|
||||
if (!_card_counts.is_hot(count)) {
|
||||
// The card is not hot so do not store it in the cache;
|
||||
// return it for immediate refining.
|
||||
return card_ptr;
|
||||
}
|
||||
|
||||
// Otherwise, the card is hot.
|
||||
jbyte* res = NULL;
|
||||
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_n_hot == _hot_cache_size) {
|
||||
res = _hot_cache[_hot_cache_idx];
|
||||
_n_hot--;
|
||||
}
|
||||
|
||||
// Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
|
||||
_hot_cache[_hot_cache_idx] = card_ptr;
|
||||
_hot_cache_idx++;
|
||||
|
||||
if (_hot_cache_idx == _hot_cache_size) {
|
||||
// Wrap around
|
||||
_hot_cache_idx = 0;
|
||||
}
|
||||
_n_hot++;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void G1HotCardCache::drain(int worker_i,
|
||||
G1RemSet* g1rs,
|
||||
DirtyCardQueue* into_cset_dcq) {
|
||||
if (!default_use_cache()) {
|
||||
assert(_hot_cache == NULL, "Logic");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
assert(!use_cache(), "cache should be disabled");
|
||||
int start_idx;
|
||||
|
||||
while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
|
||||
int end_idx = start_idx + _hot_cache_par_chunk_size;
|
||||
|
||||
if (start_idx ==
|
||||
Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
|
||||
// The current worker has successfully claimed the chunk [start_idx..end_idx)
|
||||
end_idx = MIN2(end_idx, _n_hot);
|
||||
for (int i = start_idx; i < end_idx; i++) {
|
||||
jbyte* card_ptr = _hot_cache[i];
|
||||
if (card_ptr != NULL) {
|
||||
if (g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||
// The part of the heap spanned by the card contains references
|
||||
// that point into the current collection set.
|
||||
// We need to record the card pointer in the DirtyCardQueueSet
|
||||
// that we use for such cards.
|
||||
//
|
||||
// The only time we care about recording cards that contain
|
||||
// references that point into the collection set is during
|
||||
// RSet updating while within an evacuation pause.
|
||||
// In this case worker_i should be the id of a GC worker thread
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
|
||||
err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
|
||||
|
||||
into_cset_dcq->enqueue(card_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The existing entries in the hot card cache, which were just refined
|
||||
// above, are discarded prior to re-enabling the cache near the end of the GC.
|
||||
}
|
||||
|
||||
void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
|
||||
_card_counts.resize(heap_capacity);
|
||||
}
|
||||
|
||||
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
|
||||
_card_counts.clear_region(hr);
|
||||
}
|
||||
|
||||
void G1HotCardCache::reset_card_counts() {
|
||||
_card_counts.clear_all();
|
||||
}
|
128
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
Normal file
128
hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1_globals.hpp"
|
||||
#include "gc_implementation/g1/g1CardCounts.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class DirtyCardQueue;
|
||||
class G1CollectedHeap;
|
||||
class G1RemSet;
|
||||
class HeapRegion;
|
||||
|
||||
// An evicting cache of cards that have been logged by the G1 post
|
||||
// write barrier. Placing a card in the cache delays the refinement
|
||||
// of the card until the card is evicted, or the cache is drained
|
||||
// during the next evacuation pause.
|
||||
//
|
||||
// The first thing the G1 post write barrier does is to check whether
|
||||
// the card containing the updated pointer is already dirty and, if
|
||||
// so, skips the remaining code in the barrier.
|
||||
//
|
||||
// Delaying the refinement of a card will make the card fail the
|
||||
// first is_dirty check in the write barrier, skipping the remainder
|
||||
// of the write barrier.
|
||||
//
|
||||
// This can significantly reduce the overhead of the write barrier
|
||||
// code, increasing throughput.
|
||||
|
||||
class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The card cache table
|
||||
jbyte** _hot_cache;
|
||||
|
||||
int _hot_cache_size;
|
||||
int _n_hot;
|
||||
int _hot_cache_idx;
|
||||
|
||||
int _hot_cache_par_chunk_size;
|
||||
volatile int _hot_cache_par_claimed_idx;
|
||||
|
||||
bool _use_cache;
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
bool default_use_cache() const {
|
||||
return (G1ConcRSLogCacheSize > 0);
|
||||
}
|
||||
|
||||
public:
|
||||
G1HotCardCache(G1CollectedHeap* g1h);
|
||||
~G1HotCardCache();
|
||||
|
||||
void initialize();
|
||||
|
||||
bool use_cache() { return _use_cache; }
|
||||
|
||||
void set_use_cache(bool b) {
|
||||
_use_cache = (b ? default_use_cache() : false);
|
||||
}
|
||||
|
||||
// Returns the card to be refined or NULL.
|
||||
//
|
||||
// Increments the count for given the card. if the card is not 'hot',
|
||||
// it is returned for immediate refining. Otherwise the card is
|
||||
// added to the hot card cache.
|
||||
// If there is enough room in the hot card cache for the card we're
|
||||
// adding, NULL is returned and no further action in needed.
|
||||
// If we evict a card from the cache to make room for the new card,
|
||||
// the evicted card is then returned for refinement.
|
||||
jbyte* insert(jbyte* card_ptr);
|
||||
|
||||
// Refine the cards that have delayed as a result of
|
||||
// being in the cache.
|
||||
void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
||||
|
||||
// Set up for parallel processing of the cards in the hot cache
|
||||
void reset_hot_cache_claimed_index() {
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
|
||||
// Resets the hot card cache and discards the entries.
|
||||
void reset_hot_cache() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
|
||||
_hot_cache_idx = 0; _n_hot = 0;
|
||||
}
|
||||
|
||||
bool hot_cache_is_empty() { return _n_hot == 0; }
|
||||
|
||||
// Resizes the card counts table to match the given capacity
|
||||
void resize_card_counts(size_t heap_capacity);
|
||||
|
||||
// Zeros the values in the card counts table for entire committed heap
|
||||
void reset_card_counts();
|
||||
|
||||
// Zeros the values in the card counts table for the given region
|
||||
void reset_card_counts(HeapRegion* hr);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1HotCardCache.hpp"
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
@ -247,7 +248,7 @@ public:
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
|
||||
|
||||
if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
|
||||
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||
// 'card_ptr' contains references that point into the collection
|
||||
// set. We need to record the card in the DCQS
|
||||
// (G1CollectedHeap::into_cset_dirty_card_queue_set())
|
||||
@ -288,9 +289,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||
#if CARD_REPEAT_HISTO
|
||||
ct_freq_update_histo_and_reset();
|
||||
#endif
|
||||
if (worker_i == 0) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
}
|
||||
|
||||
// We cache the value of 'oc' closure into the appropriate slot in the
|
||||
// _cset_rs_update_cl for this worker
|
||||
@ -396,7 +394,7 @@ public:
|
||||
// RSet updating,
|
||||
// * the post-write barrier shouldn't be logging updates to young
|
||||
// regions (but there is a situation where this can happen - see
|
||||
// the comment in G1RemSet::concurrentRefineOneCard below -
|
||||
// the comment in G1RemSet::refine_card() below -
|
||||
// that should not be applicable here), and
|
||||
// * during actual RSet updating, the filtering of cards in young
|
||||
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
|
||||
@ -502,8 +500,6 @@ void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
claim_val);
|
||||
}
|
||||
|
||||
|
||||
|
||||
G1TriggerClosure::G1TriggerClosure() :
|
||||
_triggered(false) { }
|
||||
|
||||
@ -524,13 +520,91 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
_record_refs_into_cset(record_refs_into_cset),
|
||||
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
// Returns true if the given card contains references that point
|
||||
// into the collection set, if we're checking for such references;
|
||||
// false otherwise.
|
||||
|
||||
bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
|
||||
// If the card is no longer dirty, nothing to do.
|
||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||
// No need to return that this card contains refs that point
|
||||
// into the collection set.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Construct the region representing the card.
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
assert(r != NULL, "unexpected null");
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
// post-barrier is supposed to filter them out and never to enqueue
|
||||
// them? When we allocate a new region as the "allocation region" we
|
||||
// actually dirty its cards after we release the lock, since card
|
||||
// dirtying while holding the lock was a performance bottleneck. So,
|
||||
// as a result, it is possible for other threads to actually
|
||||
// allocate objects in the region (after the acquire the lock)
|
||||
// before all the cards on the region are dirtied. This is unlikely,
|
||||
// and it doesn't happen often, but it can happen. So, the extra
|
||||
// check below filters out those cards.
|
||||
if (r->is_young()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// While we are processing RSet buffers during the collection, we
|
||||
// actually don't want to scan any cards on the collection set,
|
||||
// since we don't want to update remebered sets with entries that
|
||||
// point into the collection set, given that live objects from the
|
||||
// collection set are about to move and such entries will be stale
|
||||
// very soon. This change also deals with a reliability issue which
|
||||
// involves scanning a card in the collection set and coming across
|
||||
// an array that was being chunked and looking malformed. Note,
|
||||
// however, that if evacuation fails, we have to scan any objects
|
||||
// that were not moved and create any missing entries.
|
||||
if (r->in_collection_set()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The result from the hot card cache insert call is either:
|
||||
// * pointer to the current card
|
||||
// (implying that the current card is not 'hot'),
|
||||
// * null
|
||||
// (meaning we had inserted the card ptr into the "hot" card cache,
|
||||
// which had some headroom),
|
||||
// * a pointer to a "hot" card that was evicted from the "hot" cache.
|
||||
//
|
||||
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
if (hot_card_cache->use_cache()) {
|
||||
assert(!check_for_refs_into_cset, "sanity");
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
|
||||
|
||||
card_ptr = hot_card_cache->insert(card_ptr);
|
||||
if (card_ptr == NULL) {
|
||||
// There was no eviction. Nothing to do.
|
||||
return false;
|
||||
}
|
||||
|
||||
start = _ct_bs->addr_for(card_ptr);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Not in the G1 heap
|
||||
return false;
|
||||
}
|
||||
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
// freed, reallocated and tagged as young while in the cache.
|
||||
// Hence we could see its young type change at any time.
|
||||
}
|
||||
|
||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||
// a card beyond the heap. This is not safe without a perm
|
||||
@ -610,140 +684,17 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
_conc_refine_cards++;
|
||||
}
|
||||
|
||||
return trigger_cl.triggered();
|
||||
}
|
||||
// This gets set to true if the card being refined has
|
||||
// references that point into the collection set.
|
||||
bool has_refs_into_cset = trigger_cl.triggered();
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
// If the card is no longer dirty, nothing to do.
|
||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||
// No need to return that this card contains refs that point
|
||||
// into the collection set.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Construct the region representing the card.
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
// post-barrier is supposed to filter them out and never to enqueue
|
||||
// them? When we allocate a new region as the "allocation region" we
|
||||
// actually dirty its cards after we release the lock, since card
|
||||
// dirtying while holding the lock was a performance bottleneck. So,
|
||||
// as a result, it is possible for other threads to actually
|
||||
// allocate objects in the region (after the acquire the lock)
|
||||
// before all the cards on the region are dirtied. This is unlikely,
|
||||
// and it doesn't happen often, but it can happen. So, the extra
|
||||
// check below filters out those cards.
|
||||
if (r->is_young()) {
|
||||
return false;
|
||||
}
|
||||
// While we are processing RSet buffers during the collection, we
|
||||
// actually don't want to scan any cards on the collection set,
|
||||
// since we don't want to update remebered sets with entries that
|
||||
// point into the collection set, given that live objects from the
|
||||
// collection set are about to move and such entries will be stale
|
||||
// very soon. This change also deals with a reliability issue which
|
||||
// involves scanning a card in the collection set and coming across
|
||||
// an array that was being chunked and looking malformed. Note,
|
||||
// however, that if evacuation fails, we have to scan any objects
|
||||
// that were not moved and create any missing entries.
|
||||
if (r->in_collection_set()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Should we defer processing the card?
|
||||
//
|
||||
// Previously the result from the insert_cache call would be
|
||||
// either card_ptr (implying that card_ptr was currently "cold"),
|
||||
// null (meaning we had inserted the card ptr into the "hot"
|
||||
// cache, which had some headroom), or a "hot" card ptr
|
||||
// extracted from the "hot" cache.
|
||||
//
|
||||
// Now that the _card_counts cache in the ConcurrentG1Refine
|
||||
// instance is an evicting hash table, the result we get back
|
||||
// could be from evicting the card ptr in an already occupied
|
||||
// bucket (in which case we have replaced the card ptr in the
|
||||
// bucket with card_ptr and "defer" is set to false). To avoid
|
||||
// having a data structure (updates to which would need a lock)
|
||||
// to hold these unprocessed dirty cards, we need to immediately
|
||||
// process card_ptr. The actions needed to be taken on return
|
||||
// from cache_insert are summarized in the following table:
|
||||
//
|
||||
// res defer action
|
||||
// --------------------------------------------------------------
|
||||
// null false card evicted from _card_counts & replaced with
|
||||
// card_ptr; evicted ptr added to hot cache.
|
||||
// No need to process res; immediately process card_ptr
|
||||
//
|
||||
// null true card not evicted from _card_counts; card_ptr added
|
||||
// to hot cache.
|
||||
// Nothing to do.
|
||||
//
|
||||
// non-null false card evicted from _card_counts & replaced with
|
||||
// card_ptr; evicted ptr is currently "cold" or
|
||||
// caused an eviction from the hot cache.
|
||||
// Immediately process res; process card_ptr.
|
||||
//
|
||||
// non-null true card not evicted from _card_counts; card_ptr is
|
||||
// currently cold, or caused an eviction from hot
|
||||
// cache.
|
||||
// Immediately process res; no need to process card_ptr.
|
||||
|
||||
|
||||
jbyte* res = card_ptr;
|
||||
bool defer = false;
|
||||
|
||||
// This gets set to true if the card being refined has references
|
||||
// that point into the collection set.
|
||||
bool oops_into_cset = false;
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
|
||||
if (res != NULL && (res != card_ptr || defer)) {
|
||||
start = _ct_bs->addr_for(res);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r != NULL) {
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
// freed, reallocated and tagged as young while in the cache.
|
||||
// Hence we could see its young type change at any time.
|
||||
//
|
||||
// Process card pointer we get back from the hot card cache. This
|
||||
// will check whether the region containing the card is young
|
||||
// _after_ checking that the region has been allocated from.
|
||||
oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
|
||||
false /* check_for_refs_into_cset */);
|
||||
// The above call to concurrentRefineOneCard_impl is only
|
||||
// performed if the hot card cache is enabled. This cache is
|
||||
// disabled during an evacuation pause - which is the only
|
||||
// time when we need know if the card contains references
|
||||
// that point into the collection set. Also when the hot card
|
||||
// cache is enabled, this code is executed by the concurrent
|
||||
// refine threads - rather than the GC worker threads - and
|
||||
// concurrentRefineOneCard_impl will return false.
|
||||
assert(!oops_into_cset, "should not see true here");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!defer) {
|
||||
oops_into_cset =
|
||||
concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
|
||||
// We should only be detecting that the card contains references
|
||||
// that point into the collection set if the current thread is
|
||||
// a GC worker thread.
|
||||
assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
|
||||
// We should only be detecting that the card contains references
|
||||
// that point into the collection set if the current thread is
|
||||
// a GC worker thread.
|
||||
assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
|
||||
"invalid result at non safepoint");
|
||||
}
|
||||
return oops_into_cset;
|
||||
|
||||
return has_refs_into_cset;
|
||||
}
|
||||
|
||||
class HRRSStatsIter: public HeapRegionClosure {
|
||||
@ -846,13 +797,16 @@ void G1RemSet::prepare_for_verify() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
dcqs.concatenate_logs();
|
||||
}
|
||||
bool cg1r_use_cache = _cg1r->use_cache();
|
||||
_cg1r->set_use_cache(false);
|
||||
|
||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||
bool use_hot_card_cache = hot_card_cache->use_cache();
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
updateRS(&into_cset_dcq, 0);
|
||||
_g1->into_cset_dirty_card_queue_set().clear();
|
||||
_cg1r->set_use_cache(cg1r_use_cache);
|
||||
|
||||
hot_card_cache->set_use_cache(use_hot_card_cache);
|
||||
assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,14 +66,6 @@ protected:
|
||||
// references into the collection set.
|
||||
OopsInHeapRegionClosure** _cset_rs_update_cl;
|
||||
|
||||
// The routine that performs the actual work of refining a dirty
|
||||
// card.
|
||||
// If check_for_refs_into_refs is true then a true result is returned
|
||||
// if the card contains oops that have references into the current
|
||||
// collection set.
|
||||
bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
|
||||
public:
|
||||
// This is called to reset dual hash tables after the gc pause
|
||||
// is finished and the initial hash table is no longer being
|
||||
@ -90,8 +82,7 @@ public:
|
||||
// function can be helpful in partitioning the work to be done. It
|
||||
// should be the same as the "i" passed to the calling thread's
|
||||
// work(i) function. In the sequential case this param will be ingored.
|
||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||
int worker_i);
|
||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
// call. Must call each of these once before and after (in sequential
|
||||
@ -124,14 +115,13 @@ public:
|
||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
uint worker_num, int claim_val);
|
||||
|
||||
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
|
||||
// join and leave around parts that must be atomic wrt GC. (NULL means
|
||||
// being done at a safepoint.)
|
||||
// Refine the card corresponding to "card_ptr".
|
||||
// If check_for_refs_into_cset is true, a true result is returned
|
||||
// if the given card contains oops that have references into the
|
||||
// current collection set.
|
||||
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
virtual bool refine_card(jbyte* card_ptr,
|
||||
int worker_i,
|
||||
bool check_for_refs_into_cset);
|
||||
|
||||
// Print any relevant summary info.
|
||||
virtual void print_summary_info();
|
||||
|
@ -163,16 +163,12 @@
|
||||
"Select green, yellow and red zones adaptively to meet the " \
|
||||
"the pause requirements.") \
|
||||
\
|
||||
develop(intx, G1ConcRSLogCacheSize, 10, \
|
||||
product(uintx, G1ConcRSLogCacheSize, 10, \
|
||||
"Log base 2 of the length of conc RS hot-card cache.") \
|
||||
\
|
||||
develop(intx, G1ConcRSHotCardLimit, 4, \
|
||||
product(uintx, G1ConcRSHotCardLimit, 4, \
|
||||
"The threshold that defines (>=) a hot card.") \
|
||||
\
|
||||
develop(intx, G1MaxHotCardCountSizePercent, 25, \
|
||||
"The maximum size of the hot card count cache as a " \
|
||||
"percentage of the number of cards for the maximum heap.") \
|
||||
\
|
||||
develop(bool, G1PrintOopAppls, false, \
|
||||
"When true, print applications of closures to external locs.") \
|
||||
\
|
||||
@ -247,10 +243,6 @@
|
||||
"If non-0 is the number of parallel rem set update threads, " \
|
||||
"otherwise the value is determined ergonomically.") \
|
||||
\
|
||||
develop(intx, G1CardCountCacheExpandThreshold, 16, \
|
||||
"Expand the card count cache if the number of collisions for " \
|
||||
"a particular entry exceeds this value.") \
|
||||
\
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
"Verify card table cleanup.") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -282,7 +282,8 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
|
||||
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
|
||||
}
|
||||
|
||||
_fine_grain_regions = new PerRegionTablePtr[_max_fine_entries];
|
||||
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
|
||||
if (_fine_grain_regions == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
||||
@ -706,10 +707,11 @@ size_t OtherRegionsTable::mem_size() const {
|
||||
// Cast away const in this case.
|
||||
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
|
||||
size_t sum = 0;
|
||||
PerRegionTable * cur = _first_all_fine_prts;
|
||||
while (cur != NULL) {
|
||||
sum += cur->mem_size();
|
||||
cur = cur->next();
|
||||
// all PRTs are of the same size so it is sufficient to query only one of them.
|
||||
if (_first_all_fine_prts != NULL) {
|
||||
assert(_last_all_fine_prts != NULL &&
|
||||
_first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
|
||||
sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
|
||||
}
|
||||
sum += (sizeof(PerRegionTable*) * _max_fine_entries);
|
||||
sum += (_coarse_map.size_in_words() * HeapWordSize);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user