This commit is contained in:
J. Duke 2017-07-05 20:46:25 +02:00
commit 6adfa342cf
257 changed files with 4155 additions and 25626 deletions
.hgtags-top-repo
common/bin
corba
hotspot
jdk

@ -320,3 +320,4 @@ c706ef5ea5da00078dc5e4334660315f7d99c15b jdk9-b71
8fd6eeb878606e39c908f12535f34ebbfd225a4a jdk9-b75
d82072b699b880a1f647a5e2d7c0f86cec958941 jdk9-b76
7972dc8f2a47f0c4cd8f02fa5662af41f028aa14 jdk9-b77
8c40d4143ee13bdf8170c68cc384c36ab1e9fadb jdk9-b78

@ -42,7 +42,6 @@ STRIP_BEFORE_COMPARE="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -54,7 +53,6 @@ ACCEPTED_BIN_DIFF="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -62,9 +60,7 @@ ACCEPTED_BIN_DIFF="
./lib/i386/client/libjvm.so
./lib/i386/libattach.so
./lib/i386/libdt_socket.so
./lib/i386/libhprof.so
./lib/i386/libinstrument.so
./lib/i386/libjava_crw_demo.so
./lib/i386/libjsdt.so
./lib/i386/libmanagement.so
./lib/i386/libnpt.so
@ -118,7 +114,6 @@ STRIP_BEFORE_COMPARE="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -130,16 +125,13 @@ ACCEPTED_BIN_DIFF="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
./demo/jvmti/waiters/lib/libwaiters.so
./lib/amd64/libattach.so
./lib/amd64/libdt_socket.so
./lib/amd64/libhprof.so
./lib/amd64/libinstrument.so
./lib/amd64/libjava_crw_demo.so
./lib/amd64/libjsdt.so
./lib/amd64/libjsig.so
./lib/amd64/libmanagement.so
@ -197,7 +189,6 @@ STRIP_BEFORE_COMPARE="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -217,7 +208,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -232,7 +222,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./lib/amd64/libdcpr.so
./lib/amd64/libdt_socket.so
./lib/amd64/libfontmanager.so
./lib/amd64/libhprof.so
./lib/amd64/libinstrument.so
./lib/amd64/libj2gss.so
./lib/amd64/libj2pcsc.so
@ -240,7 +229,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./lib/amd64/libj2ucrypto.so
./lib/amd64/libjaas_unix.so
./lib/amd64/libjava.so
./lib/amd64/libjava_crw_demo.so
./lib/amd64/libjawt.so
./lib/amd64/libjdwp.so
./lib/amd64/libjfr.so
@ -330,7 +318,6 @@ STRIP_BEFORE_COMPARE="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -353,7 +340,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./demo/jvmti/gctest/lib/libgctest.so
./demo/jvmti/heapTracker/lib/libheapTracker.so
./demo/jvmti/heapViewer/lib/libheapViewer.so
./demo/jvmti/hprof/lib/libhprof.so
./demo/jvmti/minst/lib/libminst.so
./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so
@ -369,7 +355,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./lib/sparcv9/libdcpr.so
./lib/sparcv9/libdt_socket.so
./lib/sparcv9/libfontmanager.so
./lib/sparcv9/libhprof.so
./lib/sparcv9/libinstrument.so
./lib/sparcv9/libj2gss.so
./lib/sparcv9/libj2pcsc.so
@ -377,7 +362,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./lib/sparcv9/libj2ucrypto.so
./lib/sparcv9/libjaas_unix.so
./lib/sparcv9/libjava.so
./lib/sparcv9/libjava_crw_demo.so
./lib/sparcv9/libjawt.so
./lib/sparcv9/libjdwp.so
./lib/sparcv9/libjfr.so
@ -473,7 +457,6 @@ ACCEPTED_SMALL_SIZE_DIFF="
./demo/jvmti/heapTracker/lib/heapTracker.dll
./demo/jvmti/minst/lib/minst.dll
./bin/attach.dll
./bin/java_crw_demo.dll
./bin/jsoundds.dll
./bin/server/jvm.dll
./bin/appletviewer.exe
@ -611,9 +594,7 @@ ACCEPTED_BIN_DIFF="
./Contents/Home/lib/libawt_lwawt.dylib
./Contents/Home/lib/libdeploy.dylib
./Contents/Home/lib/libdt_socket.dylib
./Contents/Home/lib/libhprof.dylib
./Contents/Home/lib/libinstrument.dylib
./Contents/Home/lib/libjava_crw_demo.dylib
./Contents/Home/lib/libjdwp.dylib
./Contents/Home/lib/libjsdt.dylib
./Contents/Home/lib/libjsig.dylib
@ -635,9 +616,7 @@ ACCEPTED_BIN_DIFF="
./lib/libawt_lwawt.dylib
./lib/libdeploy.dylib
./lib/libdt_socket.dylib
./lib/libhprof.dylib
./lib/libinstrument.dylib
./lib/libjava_crw_demo.dylib
./lib/libjdwp.dylib
./lib/libjsdt.dylib
./lib/libjsig.dylib

@ -320,3 +320,4 @@ f9f3706bd24c42c07cb260fe05730a749b8e52f4 jdk9-b72
960b56805abd8460598897481820bd6a75f979e7 jdk9-b75
d8126bc88fa5cd1ae4e44d86a4b1280ca1c9e2aa jdk9-b76
8bb2441c0fec8b28f7bf11a0ca3ec1642e7ef457 jdk9-b77
182bb7accc5253bcfefd8edc1d4997ec8f9f8694 jdk9-b78

@ -480,3 +480,4 @@ fff6b54e9770ac4c12c2fb4cab5aa7672affa4bd jdk9-b74
2f354281e9915275693c4e519a959b8a6f22d3a3 jdk9-b75
0bc8d1656d6f2b1fdfe803c1305a108bb9939f35 jdk9-b76
e66c3813789debfc06f206afde1bf7a84cb08451 jdk9-b77
20dc06b04fe5ec373879414d60ef82ac70faef98 jdk9-b78

@ -416,7 +416,8 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
int jmp_off = __ offset();
__ jmp(_patch_site_entry);
// Add enough nops so deoptimization can overwrite the jmp above with a call
// and not destroy the world.
// and not destroy the world. We cannot use fat nops here, since the concurrent
// code rewrite may transiently create the illegal instruction sequence.
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop();
}

@ -345,9 +345,7 @@ int LIR_Assembler::check_icache() {
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
__ nop();
}
__ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
}
int offset = __ offset();
__ inline_cache_check(receiver, IC_Klass);
@ -2861,9 +2859,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
case lir_virtual_call: // currently, sparc-specific for niagara
default: ShouldNotReachHere();
}
while (offset++ % BytesPerWord != 0) {
__ nop();
}
__ align(BytesPerWord, offset);
}
}
@ -2902,10 +2898,7 @@ void LIR_Assembler::emit_static_call_stub() {
int start = __ offset();
if (os::is_MP()) {
// make sure that the displacement word of the call ends up word aligned
int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
while (offset++ % BytesPerWord != 0) {
__ nop();
}
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
}
__ relocate(static_stub_Relocation::spec(call_pc));
__ mov_metadata(rbx, (Metadata*)NULL);

@ -970,8 +970,12 @@ void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
}
void MacroAssembler::align(int modulus) {
if (offset() % modulus != 0) {
nop(modulus - (offset() % modulus));
align(modulus, offset());
}
void MacroAssembler::align(int modulus, int target) {
if (target % modulus != 0) {
nop(modulus - (target % modulus));
}
}

@ -192,6 +192,7 @@ class MacroAssembler: public Assembler {
// Alignment
void align(int modulus);
void align(int modulus, int target);
// A 5 byte nop that is safe for patching (see patch_verified_entry)
void fat_nop();

@ -108,7 +108,7 @@ StubQueue* AbstractInterpreter::_code = NULL;
#define GEN_SIZE(Type) \
switch(gen_variant) { \
case GEN_OFFSET: \
printf("#define SIZE_%-35s %ld\n", \
printf("#define SIZE_%-35s %ld\n", \
#Type, sizeof(Type)); \
break; \
case GEN_INDEX: \
@ -134,7 +134,7 @@ StubQueue* AbstractInterpreter::_code = NULL;
}
void gen_prologue(GEN_variant gen_variant) {
const char *suffix;
const char *suffix = "Undefined-Suffix";
switch(gen_variant) {
case GEN_OFFSET: suffix = ".h"; break;
@ -228,10 +228,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
printf("\n");
GEN_OFFS(Method, _constMethod);
GEN_OFFS(Method, _constants);
GEN_OFFS(Method, _access_flags);
printf("\n");
GEN_OFFS(ConstMethod, _constants);
GEN_OFFS(ConstMethod, _flags);
GEN_OFFS(ConstMethod, _code_size);
GEN_OFFS(ConstMethod, _name_index);
@ -264,7 +264,7 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _dependencies_offset);
GEN_OFFS(nmethod, _oops_offset);
GEN_OFFS(nmethod, _metadata_offset);
GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ extern pointer __1cJCodeCacheG_heaps_;
extern pointer __1cIUniverseO_collectedHeap_;
extern pointer __1cHnmethodG__vtbl_;
extern pointer __1cNMethodG__vtbl_;
extern pointer __1cGMethodG__vtbl_;
extern pointer __1cKBufferBlobG__vtbl_;
#define copyin_ptr(ADDR) *(pointer*) copyin((pointer) (ADDR), sizeof(pointer))
@ -164,7 +164,7 @@ dtrace:helper:ustack:
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
/*
* Get Java heap bounds
*/
@ -457,12 +457,15 @@ dtrace:helper:ustack:
this->nameSymbol = copyin_ptr(this->constantPool +
this->nameIndex * sizeof (pointer) + SIZE_ConstantPool);
/* The symbol is a CPSlot and has lower bit set to indicate metadata */
this->nameSymbol &= (~1); /* remove metadata lsb */
this->nameSymbolLength = copyin_uint16(this->nameSymbol +
OFFSET_Symbol_length);
this->signatureSymbol = copyin_ptr(this->constantPool +
this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool);
this->signatureSymbol &= (~1); /* remove metadata lsb */
this->signatureSymbolLength = copyin_uint16(this->signatureSymbol +
OFFSET_Symbol_length);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -227,7 +227,7 @@ static void delete_attach_file(pid_t pid) {
/* attach to given JVM */
jvm_t* jvm_attach(pid_t pid) {
jvm_t* jvm;
int door_fd, attach_fd, i;
int door_fd, attach_fd, i = 0;
jvm = (jvm_t*) calloc(1, sizeof(jvm_t));
if (jvm == NULL) {
@ -292,14 +292,13 @@ const char* jvm_get_last_error() {
/* detach the givenb JVM */
int jvm_detach(jvm_t* jvm) {
if (jvm) {
int res;
int res = 0;
if (jvm->door_fd != -1) {
if (file_close(jvm->door_fd) != 0) {
set_jvm_error(JVM_ERR_CANT_CLOSE_DOOR);
res = -1;
} else {
clear_jvm_error();
res = 0;
}
}
free(jvm);

@ -882,7 +882,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
/* Finds a PcDesc with real-pc equal to N->pc */
static int pc_desc_at(Nmethod_t *N)
{
uint64_t pc_diff;
uint64_t pc_diff = 999;
int32_t offs;
int32_t err;

@ -217,9 +217,9 @@ static bool is_statbuf_secure(struct stat *statp) {
//
return false;
}
// See if the uid of the directory matches the effective uid of the process.
//
if (statp->st_uid != geteuid()) {
// If user is not root then see if the uid of the directory matches the effective uid of the process.
uid_t euid = geteuid();
if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
//
return false;

@ -85,7 +85,7 @@ StubQueue* AbstractInterpreter::_code = NULL;
#define GEN_OFFS_NAME(Type,Name,OutputType) \
switch(gen_variant) { \
case GEN_OFFSET: \
printf("#define OFFSET_%-33s %d\n", \
printf("#define OFFSET_%-33s %ld\n", \
#OutputType #Name, offset_of(Type, Name)); \
break; \
case GEN_INDEX: \
@ -103,7 +103,7 @@ StubQueue* AbstractInterpreter::_code = NULL;
#define GEN_SIZE(Type) \
switch(gen_variant) { \
case GEN_OFFSET: \
printf("#define SIZE_%-35s %d\n", \
printf("#define SIZE_%-35s %ld\n", \
#Type, sizeof(Type)); \
break; \
case GEN_INDEX: \
@ -129,7 +129,7 @@ StubQueue* AbstractInterpreter::_code = NULL;
}
void gen_prologue(GEN_variant gen_variant) {
const char *suffix;
const char *suffix = "Undefined-Suffix";
switch(gen_variant) {
case GEN_OFFSET: suffix = ".h"; break;
@ -211,7 +211,7 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(ConstantPool, _pool_holder);
printf("\n");
GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used));
GEN_VALUE(OFFSET_HeapBlockHeader_used, (int) offset_of(HeapBlock::Header, _used));
GEN_OFFS(oopDesc, _metadata);
printf("\n");
@ -275,7 +275,7 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(NarrowPtrStruct, _shift);
printf("\n");
GEN_VALUE(SIZE_HeapBlockHeader, sizeof(HeapBlock::Header));
GEN_VALUE(SIZE_HeapBlockHeader, (int) sizeof(HeapBlock::Header));
GEN_SIZE(oopDesc);
GEN_SIZE(ConstantPool);
printf("\n");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -227,7 +227,7 @@ static void delete_attach_file(pid_t pid) {
/* attach to given JVM */
jvm_t* jvm_attach(pid_t pid) {
jvm_t* jvm;
int door_fd, attach_fd, i;
int door_fd, attach_fd, i = 0;
jvm = (jvm_t*) calloc(1, sizeof(jvm_t));
if (jvm == NULL) {
@ -292,14 +292,13 @@ const char* jvm_get_last_error() {
/* detach the givenb JVM */
int jvm_detach(jvm_t* jvm) {
if (jvm) {
int res;
int res = 0;
if (jvm->door_fd != -1) {
if (file_close(jvm->door_fd) != 0) {
set_jvm_error(JVM_ERR_CANT_CLOSE_DOOR);
res = -1;
} else {
clear_jvm_error();
res = 0;
}
}
free(jvm);

@ -882,7 +882,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
/* Finds a PcDesc with real-pc equal to N->pc */
static int pc_desc_at(Nmethod_t *N)
{
uint64_t pc_diff;
uint64_t pc_diff = 999;
int32_t offs;
int32_t err;

@ -182,75 +182,6 @@ extern "C" {
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
// Thread Local Storage
// This is common to all Solaris platforms so it is defined here,
// in this common file.
// The declarations are in the os_cpu threadLS*.hpp files.
//
// Static member initialization for TLS
Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
#ifndef PRODUCT
#define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
int ThreadLocalStorage::_tcacheHit = 0;
int ThreadLocalStorage::_tcacheMiss = 0;
void ThreadLocalStorage::print_statistics() {
int total = _tcacheMiss+_tcacheHit;
tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
_tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
}
#undef _PCT
#endif // PRODUCT
Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
int index) {
Thread *thread = get_thread_slow();
if (thread != NULL) {
address sp = os::current_stack_pointer();
guarantee(thread->_stack_base == NULL ||
(sp <= thread->_stack_base &&
sp >= thread->_stack_base - thread->_stack_size) ||
is_error_reported(),
"sp must be inside of selected thread stack");
thread->set_self_raw_id(raw_id); // mark for quick retrieval
_get_thread_cache[index] = thread;
}
return thread;
}
static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
#define NO_CACHED_THREAD ((Thread*)all_zero)
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
// Store the new value before updating the cache to prevent a race
// between get_thread_via_cache_slowly() and this store operation.
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
// Update thread cache with new thread if setting on thread create,
// or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
uintptr_t raw = pd_raw_thread_id();
int ix = pd_cache_index(raw);
_get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
}
void ThreadLocalStorage::pd_init() {
for (int i = 0; i < _pd_cache_size; i++) {
_get_thread_cache[i] = NO_CACHED_THREAD;
}
}
// Invalidate all the caches (happens to be the same as pd_init).
void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
#undef NO_CACHED_THREAD
// END Thread Local Storage
static inline size_t adjust_stack_size(address base, size_t size) {
if ((ssize_t)size < 0) {
// 4759953: Compensate for ridiculous stack size.
@ -1289,67 +1220,6 @@ int os::current_process_id() {
return (int)(_initial_pid ? _initial_pid : getpid());
}
int os::allocate_thread_local_storage() {
// %%% in Win32 this allocates a memory segment pointed to by a
// register. Dan Stein can implement a similar feature in
// Solaris. Alternatively, the VM can do the same thing
// explicitly: malloc some storage and keep the pointer in a
// register (which is part of the thread's context) (or keep it
// in TLS).
// %%% In current versions of Solaris, thr_self and TSD can
// be accessed via short sequences of displaced indirections.
// The value of thr_self is available as %g7(36).
// The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
// assuming that the current thread already has a value bound to k.
// It may be worth experimenting with such access patterns,
// and later having the parameters formally exported from a Solaris
// interface. I think, however, that it will be faster to
// maintain the invariant that %g2 always contains the
// JavaThread in Java code, and have stubs simply
// treat %g2 as a caller-save register, preserving it in a %lN.
thread_key_t tk;
if (thr_keycreate(&tk, NULL)) {
fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
"(%s)", strerror(errno)));
}
return int(tk);
}
void os::free_thread_local_storage(int index) {
// %%% don't think we need anything here
// if (pthread_key_delete((pthread_key_t) tk)) {
// fatal("os::free_thread_local_storage: pthread_key_delete failed");
// }
}
// libthread allocate for tsd_common is a version specific
// small number - point is NO swap space available
#define SMALLINT 32
void os::thread_local_storage_at_put(int index, void* value) {
// %%% this is used only in threadLocalStorage.cpp
if (thr_setspecific((thread_key_t)index, value)) {
if (errno == ENOMEM) {
vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
"thr_setspecific: out of swap space");
} else {
fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
"(%s)", strerror(errno)));
}
} else {
ThreadLocalStorage::set_thread_in_slot((Thread *) value);
}
}
// This function could be called before TLS is initialized, for example, when
// VM receives an async signal or when VM causes a fatal error during
// initialization. Return NULL if thr_getspecific() fails.
void* os::thread_local_storage_at(int index) {
// %%% this is used only in threadLocalStorage.cpp
void* r = NULL;
return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
}
// gethrtime() should be monotonic according to the documentation,
// but some virtualized platforms are known to break this guarantee.
// getTimeNanos() must be guaranteed not to move backwards, so we

@ -219,9 +219,9 @@ static bool is_statbuf_secure(struct stat *statp) {
//
return false;
}
// See if the uid of the directory matches the effective uid of the process.
//
if (statp->st_uid != geteuid()) {
// If user is not root then see if the uid of the directory matches the effective uid of the process.
uid_t euid = geteuid();
if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
//
return false;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,19 +39,12 @@
// For SPARC, to avoid excessive register window spill-fill faults,
// we aggressively inline these routines.
inline Thread* ThreadLocalStorage::thread() {
// don't use specialized code if +UseMallocOnly -- may confuse Purify et al.
debug_only(if (UseMallocOnly) return get_thread_slow(););
inline void ThreadLocalStorage::set_thread(Thread* thread) {
_thr_current = thread;
}
uintptr_t raw = pd_raw_thread_id();
int ix = pd_cache_index(raw);
Thread* candidate = ThreadLocalStorage::_get_thread_cache[ix];
if (candidate->self_raw_id() == raw) {
// hit
return candidate;
} else {
return ThreadLocalStorage::get_thread_via_cache_slowly(raw, ix);
}
inline Thread* ThreadLocalStorage::thread() {
return _thr_current;
}
#endif // OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,19 +26,26 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
// Provides an entry point we can link against and
// a buffer we can emit code into. The buffer is
// filled by ThreadLocalStorage::generate_code_for_get_thread
// and called from ThreadLocalStorage::thread()
// True thread-local variable
__thread Thread * ThreadLocalStorage::_thr_current = NULL;
#include <sys/systeminfo.h>
// Implementations needed to support the shared API
// The portable TLS mechanism (get_thread_via_cache) is enough on SPARC.
// There is no need for hand-assembling a special function.
void ThreadLocalStorage::generate_code_for_get_thread() {
void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
bool ThreadLocalStorage::_initialized = false;
void ThreadLocalStorage::init() {
_initialized = true;
}
void ThreadLocalStorage::set_thread_in_slot (Thread * self) {}
bool ThreadLocalStorage::is_initialized() {
return _initialized;
}
Thread* ThreadLocalStorage::get_thread_slow() {
return thread();
}
extern "C" Thread* get_thread() {
return ThreadLocalStorage::thread();

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,47 +25,15 @@
#ifndef OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
#define OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
public:
// Java Thread - force inlining
static inline Thread* thread() ;
// Solaris specific implementation involves simple, direct use
// of a compiler-based thread-local variable
private:
static Thread* _get_thread_cache[]; // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
static __thread Thread * _thr_current;
NOT_PRODUCT(static int _tcacheHit;)
NOT_PRODUCT(static int _tcacheMiss;)
static bool _initialized; // needed for shared API
public:
// Print cache hit/miss statistics
static void print_statistics() PRODUCT_RETURN;
enum Constants {
_pd_cache_size = 256*2 // projected typical # of threads * 2
};
static void set_thread_in_slot (Thread *) ;
static uintptr_t pd_raw_thread_id() {
return _raw_thread_id();
}
static int pd_cache_index(uintptr_t raw_id) {
// Hash function: From email from Dave:
// The hash function deserves an explanation. %g7 points to libthread's
// "thread" structure. On T1 the thread structure is allocated on the
// user's stack (yes, really!) so the ">>20" handles T1 where the JVM's
// stack size is usually >= 1Mb. The ">>9" is for T2 where Roger allocates
// globs of thread blocks contiguously. The "9" has to do with the
// expected size of the T2 thread structure. If these constants are wrong
// the worst thing that'll happen is that the hit rate for heavily threaded
// apps won't be as good as it could be. If you want to burn another
// shift+xor you could mix together _all of the %g7 bits to form the hash,
// but I think that's excessive. Making the change above changed the
// T$ miss rate on SpecJBB (on a 16X system) from about 3% to imperceptible.
uintptr_t ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
return ix;
}
static inline Thread* thread();
#endif // OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,11 +23,10 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/thread.inline.hpp"
void MacroAssembler::int3() {
push(rax);
@ -39,98 +38,32 @@ void MacroAssembler::int3() {
pop(rax);
}
#define __ _masm->
#ifndef _LP64
static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
// slow call to of thr_getspecific
// int thr_getspecific(thread_key_t key, void **value);
// Consider using pthread_getspecific instead.
__ push(0); // allocate space for return value
if (thread != rax) __ push(rax); // save rax, if caller still wants it
__ push(rcx); // save caller save
__ push(rdx); // save caller save
if (thread != rax) {
__ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value
} else {
__ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value
}
__ push(thread); // and pass the address
__ push(ThreadLocalStorage::thread_index()); // the key
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
__ increment(rsp, 2 * wordSize);
__ pop(rdx);
__ pop(rcx);
if (thread != rax) __ pop(rax);
__ pop(thread);
}
#else
static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
// slow call to of thr_getspecific
// int thr_getspecific(thread_key_t key, void **value);
// Consider using pthread_getspecific instead.
if (thread != rax) {
__ push(rax);
}
__ push(0); // space for return value
__ push(rdi);
__ push(rsi);
__ lea(rsi, Address(rsp, 16)); // pass return value address
__ push(rdx);
__ push(rcx);
__ push(r8);
__ push(r9);
__ push(r10);
// XXX
__ mov(r10, rsp);
__ andptr(rsp, -16);
__ push(r10);
__ push(r11);
__ movl(rdi, ThreadLocalStorage::thread_index());
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
__ pop(r11);
__ pop(rsp);
__ pop(r10);
__ pop(r9);
__ pop(r8);
__ pop(rcx);
__ pop(rdx);
__ pop(rsi);
__ pop(rdi);
__ pop(thread); // load return value
if (thread != rax) {
__ pop(rax);
}
}
#endif //LP64
// This is simply a call to ThreadLocalStorage::thread()
void MacroAssembler::get_thread(Register thread) {
int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment);
// Try to emit a Solaris-specific fast TSD/TLS accessor.
ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
// Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
emit_int8 (segment);
// ExternalAddress doesn't work because it can't take NULL
AddressLiteral null(0, relocInfo::none);
movptr (thread, null);
movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
return ;
} else
if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
// mov r, gs:[tlsOffset]
emit_int8 (segment);
AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
movptr (thread, tls_off);
return ;
if (thread != rax) {
push(rax);
}
push(rdi);
push(rsi);
push(rdx);
push(rcx);
push(r8);
push(r9);
push(r10);
push(r11);
slow_call_thr_specific(this, thread);
call(RuntimeAddress(CAST_FROM_FN_PTR(address, ThreadLocalStorage::thread)));
pop(r11);
pop(r10);
pop(r9);
pop(r8);
pop(rcx);
pop(rdx);
pop(rsi);
pop(rdi);
if (thread != rax) {
movl(thread, rax);
pop(rax);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,167 +26,27 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef AMD64
extern "C" Thread* fs_load(ptrdiff_t tlsOffset);
extern "C" intptr_t fs_thread();
#else
// From solaris_i486.s
extern "C" Thread* gs_load(ptrdiff_t tlsOffset);
extern "C" intptr_t gs_thread();
#endif // AMD64
// True thread-local variable
__thread Thread * ThreadLocalStorage::_thr_current = NULL;
// tlsMode encoding:
//
// pd_tlsAccessUndefined : uninitialized
// pd_tlsAccessSlow : not available
// pd_tlsAccessIndirect :
// old-style indirect access - present in "T1" libthread.
// use thr_slot_sync_allocate() to attempt to allocate a slot.
// pd_tlsAccessDirect :
// new-style direct access - present in late-model "T2" libthread.
// Allocate the offset (slot) via _thr_slot_offset() or by
// defining an IE- or LE-mode TLS/TSD slot in the launcher and then passing
// that offset into libjvm.so.
// See http://sac.eng/Archives/CaseLog/arc/PSARC/2003/159/.
//
// Note that we have a capability gap - some early model T2 forms
// (e.g., unpatched S9) have neither _thr_slot_sync_allocate() nor
// _thr_slot_offset(). In that case we revert to the usual
// thr_getspecific accessor.
//
// Implementations needed to support the shared API
static ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_tlsAccessUndefined ;
static ptrdiff_t tlsOffset = 0 ;
static thread_key_t tlsKey ;
void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
typedef int (*TSSA_Entry) (ptrdiff_t *, int, int) ;
typedef ptrdiff_t (*TSO_Entry) (int) ;
bool ThreadLocalStorage::_initialized = false;
ThreadLocalStorage::pd_tlsAccessMode ThreadLocalStorage::pd_getTlsAccessMode ()
{
guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
return tlsMode ;
void ThreadLocalStorage::init() {
_initialized = true;
}
ptrdiff_t ThreadLocalStorage::pd_getTlsOffset () {
guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
return tlsOffset ;
bool ThreadLocalStorage::is_initialized() {
return _initialized;
}
// TODO: Consider the following improvements:
//
// 1. Convert from thr_*specific* to pthread_*specific*.
// The pthread_ forms are slightly faster. Also, the
// pthread_ forms have a pthread_key_delete() API which
// would aid in clean JVM shutdown and the eventual goal
// of permitting a JVM to reinstantiate itself withing a process.
//
// 2. See ThreadLocalStorage::init(). We end up allocating
// two TLS keys during VM startup. That's benign, but we could collapse
// down to one key without too much trouble.
//
// 3. MacroAssembler::get_thread() currently emits calls to thr_getspecific().
// Modify get_thread() to call Thread::current() instead.
//
// 4. Thread::current() currently uses a cache keyed by %gs:[0].
// (The JVM has PSARC permission to use %g7/%gs:[0]
// as an opaque temporally unique thread identifier).
// For C++ access to a thread's reflexive "self" pointer we
// should consider using one of the following:
// a. a radix tree keyed by %esp - as in EVM.
// This requires two loads (the 2nd dependent on the 1st), but
// is easily inlined and doesn't require a "miss" slow path.
// b. a fast TLS/TSD slot allocated by _thr_slot_offset
// or _thr_slot_sync_allocate.
//
// 5. 'generate_code_for_get_thread' is a misnomer.
// We should change it to something more general like
// pd_ThreadSelf_Init(), for instance.
//
static void AllocateTLSOffset ()
{
int rslt ;
TSSA_Entry tssa ;
TSO_Entry tso ;
ptrdiff_t off ;
guarantee (tlsMode == ThreadLocalStorage::pd_tlsAccessUndefined, "tlsMode not set") ;
tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
tlsOffset = 0 ;
#ifndef AMD64
tssa = (TSSA_Entry) dlsym (RTLD_DEFAULT, "thr_slot_sync_allocate") ;
if (tssa != NULL) {
off = -1 ;
rslt = (*tssa)(&off, NULL, NULL) ; // (off,dtor,darg)
if (off != -1) {
tlsOffset = off ;
tlsMode = ThreadLocalStorage::pd_tlsAccessIndirect ;
return ;
}
}
rslt = thr_keycreate (&tlsKey, NULL) ;
if (rslt != 0) {
tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ; // revert to slow mode
return ;
}
tso = (TSO_Entry) dlsym (RTLD_DEFAULT, "_thr_slot_offset") ;
if (tso != NULL) {
off = (*tso)(tlsKey) ;
if (off >= 0) {
tlsOffset = off ;
tlsMode = ThreadLocalStorage::pd_tlsAccessDirect ;
return ;
}
}
// Failure: Too bad ... we've allocated a TLS slot we don't need and there's
// no provision in the ABI for returning the slot.
//
// If we didn't find a slot then then:
// 1. We might be on liblwp.
// 2. We might be on T2 libthread, but all "fast" slots are already
// consumed
// 3. We might be on T1, and all TSD (thr_slot_sync_allocate) slots are
// consumed.
// 4. We might be on T2 libthread, but it's be re-architected
// so that fast slots are no longer g7-relative.
//
tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
return ;
#endif // AMD64
Thread* ThreadLocalStorage::get_thread_slow() {
return thread();
}
void ThreadLocalStorage::generate_code_for_get_thread() {
AllocateTLSOffset() ;
}
void ThreadLocalStorage::set_thread_in_slot(Thread *thread) {
guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
if (tlsMode == pd_tlsAccessIndirect) {
#ifdef AMD64
intptr_t tbase = fs_thread();
#else
intptr_t tbase = gs_thread();
#endif // AMD64
*((Thread**) (tbase + tlsOffset)) = thread ;
} else
if (tlsMode == pd_tlsAccessDirect) {
thr_setspecific (tlsKey, (void *) thread) ;
// set with thr_setspecific and then readback with gs_load to validate.
#ifdef AMD64
guarantee (thread == fs_load(tlsOffset), "tls readback failure") ;
#else
guarantee (thread == gs_load(tlsOffset), "tls readback failure") ;
#endif // AMD64
}
}
extern "C" Thread* get_thread() {
return ThreadLocalStorage::thread();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,61 +25,15 @@
#ifndef OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
#define OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
// Processor dependent parts of ThreadLocalStorage
// Solaris specific implementation involves simple, direct use
// of a compiler-based thread-local variable
private:
static Thread* _get_thread_cache[]; // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
static __thread Thread * _thr_current;
NOT_PRODUCT(static int _tcacheHit;)
NOT_PRODUCT(static int _tcacheMiss;)
static bool _initialized; // needed for shared API
public:
// Cache hit/miss statistics
static void print_statistics() PRODUCT_RETURN;
enum Constants {
#ifdef AMD64
_pd_cache_size = 256*2 // projected typical # of threads * 2
#else
_pd_cache_size = 128*2 // projected typical # of threads * 2
#endif // AMD64
};
enum pd_tlsAccessMode {
pd_tlsAccessUndefined = -1,
pd_tlsAccessSlow = 0,
pd_tlsAccessIndirect = 1,
pd_tlsAccessDirect = 2
} ;
static void set_thread_in_slot (Thread *) ;
static pd_tlsAccessMode pd_getTlsAccessMode () ;
static ptrdiff_t pd_getTlsOffset () ;
static uintptr_t pd_raw_thread_id() {
#ifdef _GNU_SOURCE
#ifdef AMD64
uintptr_t rv;
__asm__ __volatile__ ("movq %%fs:0, %0" : "=r"(rv));
return rv;
#else
return gs_thread();
#endif // AMD64
#else //_GNU_SOURCE
return _raw_thread_id();
#endif //_GNU_SOURCE
}
static int pd_cache_index(uintptr_t raw_id) {
// Copied from the sparc version. Dave said it should also work fine
// for solx86.
int ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
return ix;
}
// Java Thread
static inline Thread* thread();
#endif // OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP

@ -33,7 +33,9 @@
#include "runtime/os.hpp"
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
// we must have enough patching space so that call can be inserted
// We must have enough patching space so that call can be inserted.
// We cannot use fat nops here, since the concurrent code rewrite may transiently
// create the illegal instruction sequence.
while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
_masm->nop();
}
@ -592,9 +594,7 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
void LIR_Assembler::emit_op0(LIR_Op0* op) {
switch (op->code()) {
case lir_word_align: {
while (code_offset() % BytesPerWord != 0) {
_masm->nop();
}
_masm->align(BytesPerWord);
break;
}

@ -620,12 +620,12 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for parallelizing survivor space rescan
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
const size_t max_plab_samples =
((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
_young_gen->max_survivor_size() / (ThreadLocalAllocBuffer::min_size() * HeapWordSize);
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
_survivor_chunk_capacity = 2*max_plab_samples;
_survivor_chunk_capacity = max_plab_samples;
for (uint i = 0; i < ParallelGCThreads; i++) {
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
@ -641,12 +641,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_inter_sweep_timer.start(); // start of time
}
size_t CMSCollector::plab_sample_minimum_size() {
// The default value of MinTLABSize is 2k, but there is
// no way to get the default value if the flag has been overridden.
return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
}
const char* ConcurrentMarkSweepGeneration::name() const {
return "concurrent mark-sweep generation";
}

@ -739,10 +739,6 @@ class CMSCollector: public CHeapObj<mtGC> {
size_t* _cursor;
ChunkArray* _survivor_plab_array;
// A bounded minimum size of PLABs, should not return too small values since
// this will affect the size of the data structures used for parallel young gen rescan
size_t plab_sample_minimum_size();
// Support for marking stack overflow handling
bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
bool par_take_from_overflow_list(size_t num,

@ -300,5 +300,3 @@ HeapRegion* OldGCAllocRegion::release() {
}
return G1AllocRegion::release();
}

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1MarkSweep.hpp"
@ -67,11 +67,11 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_g1h->_old_set.remove(retained_region);
_g1h->old_set_remove(retained_region);
bool during_im = _g1h->collector_state()->during_initial_mark_pause();
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region);
_g1h->hr_printer()->reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
}
}
@ -116,15 +116,85 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
G1PLAB::G1PLAB(size_t gclab_word_size) :
PLAB(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
HeapRegion* hr = mutator_alloc_region(context)->get();
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
} else {
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
}
}
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context) {
switch (dest.value()) {
case InCSetState::Young:
return survivor_attempt_allocation(word_size, context);
case InCSetState::Old:
return old_attempt_allocation(word_size, context);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
_g1h->dirty_young_block(result, word_size);
}
return result;
}
HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()),
_allocator(allocator),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
}
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1PLAB* alloc_buf = alloc_buffer(dest, context);
alloc_buf->retire();
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
@ -136,14 +206,18 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
assert(obj != NULL, "buffer was definitely big enough...");
return obj;
} else {
return _g1h->par_allocate_during_gc(dest, word_sz, context);
return _allocator->par_allocate_during_gc(dest, word_sz, context);
}
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
G1PLABAllocator(allocator),
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
@ -151,7 +225,7 @@ G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
void G1DefaultPLABAllocator::retire_alloc_buffers() {
for (uint state = 0; state < InCSetState::Num; state++) {
G1PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
@ -160,7 +234,7 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
}
}
void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
wasted = 0;
undo_wasted = 0;
for (uint state = 0; state < InCSetState::Num; state++) {
@ -190,8 +264,8 @@ bool G1ArchiveAllocator::alloc_new_region() {
}
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
hr->set_archive();
_g1h->_old_set.add(hr);
_g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
_g1h->old_set_add(hr);
_g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
_allocated_regions.append(hr);
_allocation_region = hr;

@ -33,17 +33,36 @@
class EvacuationInfo;
// Base class for G1 allocators.
// Interface to keep track of which regions G1 is currently allocating into. Provides
// some accessors (e.g. allocating into them, or getting their occupancy).
// Also keeps track of retained regions across GCs.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
G1CollectedHeap* _g1h;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
// Accessors to the allocation regions.
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
public:
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
virtual ~G1Allocator() { }
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
#ifdef ASSERT
// Do we currently have an active mutator region to allocate into?
bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
#endif
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
@ -51,24 +70,35 @@ public:
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
virtual size_t used_in_alloc_regions() = 0;
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
// Management of retained regions.
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
virtual HeapRegion* new_heap_region(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) {
return new HeapRegion(hrs_index, sharedOffsetArray, mr);
}
// Allocate blocks of memory during mutator time.
inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
size_t unsafe_max_tlab_alloc(AllocationContext_t context);
// Allocate blocks of memory during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context);
virtual size_t used_in_alloc_regions() = 0;
};
// The default allocator for G1.
// The default allocation region manager for G1. Provides a single mutator, survivor
// and old generation allocation region.
// Can retain the (single) old generation allocation region across GCs.
class G1DefaultAllocator : public G1Allocator {
protected:
// Alloc region used to satisfy mutator allocation requests.
@ -152,10 +182,14 @@ public:
}
};
class G1ParGCAllocator : public CHeapObj<mtGC> {
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
// statistics.
class G1PLABAllocator : public CHeapObj<mtGC> {
friend class G1ParScanThreadState;
protected:
G1CollectedHeap* _g1h;
G1Allocator* _allocator;
// The survivor alignment in effect in bytes.
// == 0 : don't align survivors
@ -182,11 +216,10 @@ protected:
}
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { }
virtual ~G1ParGCAllocator() { }
G1PLABAllocator(G1Allocator* allocator);
virtual ~G1PLABAllocator() { }
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
static G1PLABAllocator* create_allocator(G1Allocator* allocator);
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
@ -219,18 +252,18 @@ public:
return allocate_direct_or_new_plab(dest, word_sz, context);
}
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
// and old generation allocation.
class G1DefaultPLABAllocator : public G1PLABAllocator {
G1PLAB _surviving_alloc_buffer;
G1PLAB _tenured_alloc_buffer;
G1PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
G1DefaultPLABAllocator(G1Allocator* _allocator);
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),

@ -0,0 +1,46 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
#define SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t context) {
return mutator_alloc_region(context)->attempt_allocation(word_size, false /* bot_updates */);
}
HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
err_msg("Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get())));
return result;
}
HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationContext_t context) {
return mutator_alloc_region(context)->attempt_allocation_force(word_size, false /* bot_updates */);
}
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP

@ -30,6 +30,6 @@ G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultAllocator(g1h);
}
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultParGCAllocator(g1h);
G1PLABAllocator* G1PLABAllocator::create_allocator(G1Allocator* allocator) {
return new G1DefaultPLABAllocator(allocator);
}

@ -31,7 +31,7 @@
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
@ -815,22 +815,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
{
MutexLockerEx x(Heap_lock);
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation_locked(word_size, context);
if (result != NULL) {
return result;
}
// If we reach here, attempt_allocation_locked() above failed to
// allocate a new region. So the mutator alloc region should be NULL.
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
if (GC_locker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation_force(word_size, context);
if (result != NULL) {
return result;
}
@ -890,8 +884,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation(word_size, context);
if (result != NULL) {
return result;
}
@ -1109,6 +1102,29 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
}
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->attempt_allocation(word_size, context);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
assert_heap_not_locked();
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
@ -1231,13 +1247,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
AllocationContext_t context,
bool expect_null_mutator_alloc_region) {
assert_at_safepoint(true /* should_be_vm_thread */);
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
!expect_null_mutator_alloc_region,
assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL");
if (!is_humongous(word_size)) {
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
return _allocator->attempt_allocation_locked(word_size, context);
} else {
HeapWord* result = humongous_obj_allocate(word_size, context);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
@ -2373,7 +2387,6 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap::used() const {
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
@ -2632,6 +2645,11 @@ bool G1CollectedHeap::is_in_exact(const void* p) const {
}
#endif
bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
// Iteration functions.
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@ -2833,20 +2851,8 @@ size_t G1CollectedHeap::max_tlab_size() const {
}
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
size_t max_tlab = max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
} else {
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
}
AllocationContext_t context = AllocationContext::current();
return _allocator->unsafe_max_tlab_alloc(context);
}
size_t G1CollectedHeap::max_capacity() const {
@ -4279,18 +4285,18 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) {
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
}
_evacuation_failed_info_array[queue_num].register_copy_failure(obj->size());
_evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
// We want to call the "for_promotion_failure" version only in the
// case of a promotion failure.
if (m->must_be_preserved_for_promotion_failure(obj)) {
OopAndMarkOop elem(obj, m);
_preserved_objs[queue_num].push(elem);
_preserved_objs[worker_id].push(elem);
}
}
@ -4334,7 +4340,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
const InCSetState state = _g1->in_cset_state(obj);
if (state.is_in_cset()) {
@ -4443,9 +4449,6 @@ protected:
ParallelTaskTerminator _terminator;
uint _n_workers;
Mutex _stats_lock;
Mutex* stats_lock() { return &_stats_lock; }
public:
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
: AbstractGangTask("G1 collection"),
@ -4453,8 +4456,7 @@ public:
_queues(task_queues),
_root_processor(root_processor),
_terminator(n_workers, _queues),
_n_workers(n_workers),
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
_n_workers(n_workers)
{}
RefToScanQueueSet* queues() { return _queues; }
@ -4581,8 +4583,8 @@ public:
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
if (PrintTerminationStats) {
MutexLocker x(stats_lock());
pss.print_termination_stats(worker_id);
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
pss.print_termination_stats();
}
assert(pss.queue_is_empty(), "should be empty");
@ -5009,7 +5011,7 @@ public:
bool G1STWIsAliveClosure::do_object_b(oop p) {
// An object is reachable if it is outside the collection set,
// or is inside and copied.
return !_g1->obj_in_cs(p) || p->is_forwarded();
return !_g1->is_in_cset(p) || p->is_forwarded();
}
// Non Copying Keep Alive closure
@ -5498,7 +5500,9 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
}
// The individual threads will set their evac-failure closures.
if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
if (PrintTerminationStats) {
G1ParScanThreadState::print_termination_stats_hdr();
}
workers()->run_task(&g1_par_task);
end_par_time_sec = os::elapsedTime();
@ -6491,7 +6495,6 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
return NULL;
}
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {

@ -27,7 +27,6 @@
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocRegion.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1BiasedArray.hpp"
@ -187,13 +186,11 @@ class G1CollectedHeap : public CollectedHeap {
friend class MutatorAllocRegion;
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
friend class G1Allocator;
friend class G1ArchiveAllocator;
// Closures used in implementation.
friend class G1ParScanThreadState;
friend class G1ParTask;
friend class G1ParGCAllocator;
friend class G1PLABAllocator;
friend class G1PrepareCompactClosure;
// Other related classes.
@ -248,7 +245,7 @@ private:
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
// Class that handles the different kinds of allocations.
// Handles non-humongous allocations in the G1CollectedHeap.
G1Allocator* _allocator;
// Outside of GC pauses, the number of bytes used in all regions other
@ -280,22 +277,6 @@ private:
// start of each GC.
bool _expand_heap_after_alloc_failure;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
// It releases the mutator alloc region.
void release_mutator_alloc_region();
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.
void abandon_gc_alloc_regions();
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
@ -551,31 +532,6 @@ protected:
AllocationContext_t context,
bool expect_null_mutator_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void dirty_young_block(HeapWord* start, size_t word_size);
// Allocate blocks during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
inline HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
// These methods are the "callbacks" from the G1AllocRegion class.
// For mutator alloc regions.
@ -589,10 +545,6 @@ protected:
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, InCSetState dest);
// Allocate the highest free region in the reserved heap. This will commit
// regions as necessary.
HeapRegion* alloc_highest_free_region();
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references should be
@ -725,6 +677,13 @@ public:
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Allocates a new heap region instance.
HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
// Allocate the highest free region in the reserved heap. This will commit
// regions as necessary.
HeapRegion* alloc_highest_free_region();
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
@ -738,6 +697,12 @@ public:
bool par,
bool locked = false);
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void dirty_young_block(HeapWord* start, size_t word_size);
// Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this
@ -887,7 +852,7 @@ protected:
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m);
void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to
@ -1216,6 +1181,7 @@ public:
}
}
inline void old_set_add(HeapRegion* hr);
inline void old_set_remove(HeapRegion* hr);
size_t non_young_capacity_bytes() {
@ -1263,7 +1229,7 @@ public:
// Return "TRUE" iff the given object address is within the collection
// set. Slow implementation.
inline bool obj_in_cs(oop obj);
bool obj_in_cs(oop obj);
inline bool is_in_cset(const HeapRegion *hr);
inline bool is_in_cset(oop obj);

@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
@ -57,20 +56,6 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context) {
switch (dest.value()) {
case InCSetState::Young:
return survivor_attempt_allocation(word_size, context);
case InCSetState::Old:
return old_attempt_allocation(word_size, context);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@ -122,71 +107,14 @@ inline void G1CollectedHeap::increment_gc_time_stamp() {
OrderAccess::fence();
}
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
_old_set.add(hr);
}
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
assert_heap_not_locked();
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/heapRegion.inline.hpp"
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jlong* totals,
@ -31,3 +32,8 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jint len) {
return false;
}
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot_shared(), mr);
}

@ -31,6 +31,7 @@
#include "gc/g1/g1ErgoVerbose.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1Log.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "runtime/arguments.hpp"

@ -48,7 +48,7 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
_par_scan_state = par_scan_state;
_worker_id = par_scan_state->queue_num();
_worker_id = par_scan_state->worker_id();
assert(_worker_id < ParallelGCThreads,
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads));

@ -31,6 +31,7 @@
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/prefetch.inline.hpp"

@ -31,13 +31,13 @@
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp)
: _g1h(g1h),
_refs(g1h->task_queue(queue_num)),
_refs(g1h->task_queue(worker_id)),
_dcq(&g1h->dirty_card_queue_set()),
_ct_bs(g1h->g1_barrier_set()),
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_hash_seed(17), _worker_id(worker_id),
_term_attempts(0),
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false), _scanner(g1h, rp),
@ -59,7 +59,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
// The dest for Young is used when the objects are aged enough to
@ -71,37 +71,29 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
}
G1ParScanThreadState::~G1ParScanThreadState() {
_g1_par_allocator->retire_alloc_buffers();
delete _g1_par_allocator;
_plab_allocator->retire_alloc_buffers();
delete _plab_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
}
void
G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
{
void G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) {
st->print_raw_cr("GC Termination Stats");
st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
" ------waste (KiB)------");
st->print_raw_cr("thr ms ms % ms % attempts"
" total alloc undo");
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
" ------- ------- -------");
st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
st->print_raw_cr("thr ms ms % ms % attempts total alloc undo");
st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
}
void
G1ParScanThreadState::print_termination_stats(int i,
outputStream* const st) const
{
void G1ParScanThreadState::print_termination_stats(outputStream* const st) const {
const double elapsed_ms = elapsed_time() * 1000.0;
const double s_roots_ms = strong_roots_time() * 1000.0;
const double term_ms = term_time() * 1000.0;
size_t alloc_buffer_waste = 0;
size_t undo_waste = 0;
_g1_par_allocator->waste(alloc_buffer_waste, undo_waste);
st->print_cr("%3d %9.2f %9.2f %6.2f "
_plab_allocator->waste(alloc_buffer_waste, undo_waste);
st->print_cr("%3u %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
_worker_id, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
alloc_buffer_waste * HeapWordSize / K,
@ -167,8 +159,9 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
// Right now we only have two types of regions (young / old) so
// let's keep the logic here simple. We can generalize it when necessary.
if (dest->is_young()) {
HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
word_sz, context);
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
word_sz,
context);
if (obj_ptr == NULL) {
return NULL;
}
@ -209,12 +202,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age);
HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
if (obj_ptr == NULL) {
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
if (obj_ptr == NULL) {
@ -233,7 +226,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
@ -274,7 +267,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
"sanity");
G1StringDedup::enqueue_from_evacuation(is_from_young,
is_to_young,
queue_num(),
_worker_id,
obj);
}
@ -295,7 +288,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
}
return obj;
} else {
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return forward_ptr;
}
}
@ -314,7 +307,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
_g1h->hr_printer()->evac_failure(r);
}
_g1h->preserve_mark_during_evac_failure(_queue_num, old, m);
_g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
_scanner.set_region(r);
old->oop_iterate_backwards(&_scanner);

@ -46,7 +46,7 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocator* _g1_par_allocator;
G1PLABAllocator* _plab_allocator;
ageTable _age_table;
InCSetState _dest[InCSetState::Num];
@ -55,7 +55,7 @@ class G1ParScanThreadState : public StackObj {
G1ParScanClosure _scanner;
int _hash_seed;
uint _queue_num;
uint _worker_id;
size_t _term_attempts;
@ -85,7 +85,7 @@ class G1ParScanThreadState : public StackObj {
}
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp);
~G1ParScanThreadState();
ageTable* age_table() { return &_age_table; }
@ -112,8 +112,7 @@ class G1ParScanThreadState : public StackObj {
}
}
int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
uint worker_id() { return _worker_id; }
size_t term_attempts() const { return _term_attempts; }
void note_term_attempt() { _term_attempts++; }
@ -139,8 +138,11 @@ class G1ParScanThreadState : public StackObj {
return os::elapsedTime() - _start;
}
// Print the header for the per-thread termination statistics.
static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
// Print actual per-thread termination statistics.
void print_termination_stats(outputStream* const st = gclog_or_tty) const;
size_t* surviving_young_words() {
// We add on to hide entry 0 which accumulates surviving words for

@ -56,7 +56,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
update_rs(from, p, _worker_id);
}
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
@ -136,7 +136,7 @@ inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
StarTask stolen_task;
while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
assert(verify_task(stolen_task), "sanity");
dispatch_reference(stolen_task);

@ -34,6 +34,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/mutex.hpp"

@ -497,20 +497,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _rem_set;
}
bool in_collection_set() const;
inline bool in_collection_set() const;
HeapRegion* next_in_collection_set() {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->in_collection_set(),
"Malformed CS.");
return _next_in_special_set;
}
void set_next_in_collection_set(HeapRegion* r) {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
_next_in_special_set = r;
}
inline HeapRegion* next_in_collection_set() const;
inline void set_next_in_collection_set(HeapRegion* r);
void set_allocation_context(AllocationContext_t context) {
_allocation_context = context;

@ -26,7 +26,7 @@
#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/space.hpp"
#include "oops/oop.inline.hpp"
@ -200,4 +200,18 @@ inline bool HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
inline HeapRegion* HeapRegion::next_in_collection_set() const {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->in_collection_set(),
"Malformed CS.");
return _next_in_special_set;
}
void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
_next_in_special_set = r;
}
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP

@ -70,7 +70,7 @@ HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
assert(reserved().contains(mr), "invariant");
return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
return g1h->new_heap_region(hrm_index, mr);
}
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {

@ -94,8 +94,9 @@ void VM_G1IncCollectionPause::doit() {
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
false /* expect_null_cur_alloc_region */);
_result = g1h->attempt_allocation_at_safepoint(_word_size,
allocation_context(),
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
// pause then we will consider this pause successful.
@ -147,8 +148,9 @@ void VM_G1IncCollectionPause::doit() {
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
// An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
true /* expect_null_cur_alloc_region */);
_result = g1h->attempt_allocation_at_safepoint(_word_size,
allocation_context(),
true /* expect_null_cur_alloc_region */);
} else {
assert(_result == NULL, "invariant");
if (!_pause_succeeded) {

@ -1303,7 +1303,7 @@ void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) {
if (handler_index < 0) {
if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) {
tty->cr();
tty->print_cr("argument handler #%d at "PTR_FORMAT" for fingerprint " UINT64_FORMAT,
tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT,
_handlers->length(),
handler,
fingerprint);
@ -1313,7 +1313,7 @@ void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) {
} else {
if (PrintSignatureHandlers) {
tty->cr();
tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: "PTR_FORMAT", new : "PTR_FORMAT")",
tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")",
_handlers->length(),
fingerprint,
_handlers->at(handler_index),

@ -379,7 +379,8 @@ int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
if (!resolved_method->is_abstract() &&
(InstanceKlass::cast(klass())->default_methods() != NULL)) {
int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(),
name, signature, Klass::find_overpass, Klass::find_static);
name, signature, Klass::find_overpass,
Klass::find_static, Klass::find_private);
if (index >= 0 ) {
vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
}
@ -1189,7 +1190,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
assert(resolved_method->method_holder()->is_linked(), "must be linked");
// do lookup based on receiver klass using the vtable index
if (resolved_method->method_holder()->is_interface()) { // miranda method
if (resolved_method->method_holder()->is_interface()) { // default or miranda method
vtable_index = vtable_index_of_interface_method(resolved_klass,
resolved_method);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
@ -1198,7 +1199,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index));
} else {
// at this point we are sure that resolved_method is virtual and not
// a miranda method; therefore, it must have a valid vtable index.
// a default or miranda method; therefore, it must have a valid vtable index.
assert(!resolved_method->has_itable_index(), "");
vtable_index = resolved_method->vtable_index();
// We could get a negative vtable_index for final methods,

@ -1381,12 +1381,14 @@ static int binary_search(Array<Method*>* methods, Symbol* name) {
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
return find_method_impl(name, signature, find_overpass, find_static);
return find_method_impl(name, signature, find_overpass, find_static, find_private);
}
Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode) const {
return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode);
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) const {
return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
}
// find_instance_method looks up the name/signature in the local methods array
@ -1394,7 +1396,7 @@ Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature,
Method* InstanceKlass::find_instance_method(
Array<Method*>* methods, Symbol* name, Symbol* signature) {
Method* meth = InstanceKlass::find_method_impl(methods, name, signature,
find_overpass, skip_static);
find_overpass, skip_static, find_private);
assert(((meth == NULL) || !meth->is_static()), "find_instance_method should have skipped statics");
return meth;
}
@ -1405,22 +1407,51 @@ Method* InstanceKlass::find_instance_method(Symbol* name, Symbol* signature) {
return InstanceKlass::find_instance_method(methods(), name, signature);
}
// Find looks up the name/signature in the local methods array
// and filters on the overpass, static and private flags
// This returns the first one found
// note that the local methods array can have up to one overpass, one static
// and one instance (private or not) with the same name/signature
Method* InstanceKlass::find_local_method(Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) const {
return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
}
// Find looks up the name/signature in the local methods array
// and filters on the overpass, static and private flags
// This returns the first one found
// note that the local methods array can have up to one overpass, one static
// and one instance (private or not) with the same name/signature
Method* InstanceKlass::find_local_method(Array<Method*>* methods,
Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) {
return InstanceKlass::find_method_impl(methods, name, signature, overpass_mode, static_mode, private_mode);
}
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(
Array<Method*>* methods, Symbol* name, Symbol* signature) {
return InstanceKlass::find_method_impl(methods, name, signature, find_overpass, find_static);
return InstanceKlass::find_method_impl(methods, name, signature, find_overpass, find_static, find_private);
}
Method* InstanceKlass::find_method_impl(
Array<Method*>* methods, Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode, StaticLookupMode static_mode) {
int hit = find_method_index(methods, name, signature, overpass_mode, static_mode);
Array<Method*>* methods, Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
PrivateLookupMode private_mode) {
int hit = find_method_index(methods, name, signature, overpass_mode, static_mode, private_mode);
return hit >= 0 ? methods->at(hit): NULL;
}
bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static) {
return (m->signature() == signature) &&
bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private) {
return ((m->signature() == signature) &&
(!skipping_overpass || !m->is_overpass()) &&
(!skipping_static || !m->is_static());
(!skipping_static || !m->is_static()) &&
(!skipping_private || !m->is_private()));
}
// Used directly for default_methods to find the index into the
@ -1430,17 +1461,25 @@ bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_o
// the search continues to find a potential non-overpass match. This capability
// is important during method resolution to prefer a static method, for example,
// over an overpass method.
// There is the possibility in any _method's array to have the same name/signature
// for a static method, an overpass method and a local instance method
// To correctly catch a given method, the search criteria may need
// to explicitly skip the other two. For local instance methods, it
// is often necessary to skip private methods
int InstanceKlass::find_method_index(
Array<Method*>* methods, Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode, StaticLookupMode static_mode) {
Array<Method*>* methods, Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
PrivateLookupMode private_mode) {
bool skipping_overpass = (overpass_mode == skip_overpass);
bool skipping_static = (static_mode == skip_static);
bool skipping_private = (private_mode == skip_private);
int hit = binary_search(methods, name);
if (hit != -1) {
Method* m = methods->at(hit);
// Do linear search to find matching signature. First, quick check
// for common case, ignoring overpasses if requested.
if (method_matches(m, signature, skipping_overpass, skipping_static)) return hit;
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return hit;
// search downwards through overloaded methods
int i;
@ -1448,18 +1487,18 @@ int InstanceKlass::find_method_index(
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
}
// search upwards
for (i = hit + 1; i < methods->length(); ++i) {
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
}
// not found
#ifdef ASSERT
int index = (skipping_overpass || skipping_static) ? -1 : linear_search(methods, name, signature);
int index = (skipping_overpass || skipping_static || skipping_private) ? -1 : linear_search(methods, name, signature);
assert(index == -1, err_msg("binary search should have found entry %d", index));
#endif
}
@ -1489,7 +1528,7 @@ Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, O
OverpassLookupMode overpass_local_mode = overpass_mode;
Klass* klass = const_cast<InstanceKlass*>(this);
while (klass != NULL) {
Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static);
Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static, find_private);
if (method != NULL) {
return method;
}

@ -503,12 +503,28 @@ class InstanceKlass: public Klass {
Method* find_instance_method(Symbol* name, Symbol* signature);
static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
// true if method matches signature and conforms to skipping_X conditions.
static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static);
// find a local method (returns NULL if not found)
Method* find_local_method(Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) const;
// find a local method index in default_methods (returns -1 if not found)
static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode);
// find a local method from given methods array (returns NULL if not found)
static Method* find_local_method(Array<Method*>* methods,
Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode);
// true if method matches signature and conforms to skipping_X conditions.
static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private);
// find a local method index in methods or default_methods (returns -1 if not found)
static int find_method_index(Array<Method*>* methods,
Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode);
// lookup operation (returns NULL if not found)
Method* uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const;
@ -1153,9 +1169,14 @@ private:
// find a local method (returns NULL if not found)
Method* find_method_impl(Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode) const;
static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode, StaticLookupMode static_mode);
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) const;
static Method* find_method_impl(Array<Method*>* methods,
Symbol* name, Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode);
// Free CHeap allocated fields.
void release_C_heap_structures();

@ -161,6 +161,7 @@ protected:
enum DefaultsLookupMode { find_defaults, skip_defaults };
enum OverpassLookupMode { find_overpass, skip_overpass };
enum StaticLookupMode { find_static, skip_static };
enum PrivateLookupMode { find_private, skip_private };
bool is_klass() const volatile { return true; }

@ -683,7 +683,6 @@ bool klassVtable::is_miranda_entry_at(int i) {
if (mhk->is_interface()) {
assert(m->is_public(), "should be public");
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
// the search could find a miranda or a default method
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
return true;
}
@ -691,25 +690,57 @@ bool klassVtable::is_miranda_entry_at(int i) {
return false;
}
// check if a method is a miranda method, given a class's methods table,
// its default_method table and its super
// Miranda methods are calculated twice:
// first: before vtable size calculation: including abstract and superinterface default
// Check if a method is a miranda method, given a class's methods array,
// its default_method table and its super class.
// "Miranda" means an abstract non-private method that would not be
// overridden for the local class.
// A "miranda" method should only include non-private interface
// instance methods, i.e. not private methods, not static methods,
// not default methods (concrete interface methods), not overpass methods.
// If a given class already has a local (including overpass) method, a
// default method, or any of its superclasses has the same which would have
// overridden an abstract method, then this is not a miranda method.
//
// Miranda methods are checked multiple times.
// Pass 1: during class load/class file parsing: before vtable size calculation:
// include superinterface abstract and default methods (non-private instance).
// We include potential default methods to give them space in the vtable.
// During the first run, the default_methods list is empty
// This is seen by default method creation
// Second: recalculated during vtable initialization: only include abstract methods.
// During the first run, the current instanceKlass has not yet been
// created, the superclasses and superinterfaces do have instanceKlasses
// but may not have vtables, the default_methods list is empty, no overpasses.
// This is seen by default method creation.
//
// Pass 2: recalculated during vtable initialization: only include abstract methods.
// The goal of pass 2 is to walk through the superinterfaces to see if any of
// the superinterface methods (which were all abstract pre-default methods)
// need to be added to the vtable.
// With the addition of default methods, we have three new challenges:
// overpasses, static interface methods and private interface methods.
// Static and private interface methods do not get added to the vtable and
// are not seen by the method resolution process, so we skip those.
// Overpass methods are already in the vtable, so vtable lookup will
// find them and we don't need to add a miranda method to the end of
// the vtable. So we look for overpass methods and if they are found we
// return false. Note that we inherit our superclasses vtable, so
// the superclass' search also needs to use find_overpass so that if
// one is found we return false.
// False means - we don't need a miranda method added to the vtable.
//
// During the second run, default_methods is set up, so concrete methods from
// superinterfaces with matching names/signatures to default_methods are already
// in the default_methods list and do not need to be appended to the vtable
// as mirandas
// This is seen by link resolution and selection.
// "miranda" means not static, not defined by this class.
// private methods in interfaces do not belong in the miranda list.
// the caller must make sure that the method belongs to an interface implemented by the class
// Miranda methods only include public interface instance methods
// Not private methods, not static methods, not default == concrete abstract
// Miranda methods also do not include overpass methods in interfaces
// as mirandas. Abstract methods may already have been handled via
// overpasses - either local or superclass overpasses, which may be
// in the vtable already.
//
// Pass 3: They are also checked by link resolution and selection,
// for invocation on a method (not interface method) reference that
// resolves to a method with an interface as its method_holder.
// Used as part of walking from the bottom of the vtable to find
// the vtable index for the miranda method.
//
// Part of the Miranda Rights in the US mean that if you do not have
// an attorney one will be appointed for you.
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, Klass* super) {
if (m->is_static() || m->is_private() || m->is_overpass()) {
@ -717,44 +748,36 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
}
Symbol* name = m->name();
Symbol* signature = m->signature();
Method* mo;
if ((mo = InstanceKlass::find_instance_method(class_methods, name, signature)) == NULL) {
// did not find it in the method table of the current class
if ((default_methods == NULL) ||
InstanceKlass::find_method(default_methods, name, signature) == NULL) {
if (super == NULL) {
// super doesn't exist
return true;
}
mo = InstanceKlass::cast(super)->lookup_method(name, signature);
while (mo != NULL && mo->access_flags().is_static()
&& mo->method_holder() != NULL
&& mo->method_holder()->super() != NULL)
{
mo = mo->method_holder()->super()->uncached_lookup_method(name, signature, Klass::find_overpass);
}
if (mo == NULL || mo->access_flags().is_private() ) {
// super class hierarchy does not implement it or protection is different
return true;
}
}
} else {
// if the local class has a private method, the miranda will not
// override it, so a vtable slot is needed
if (mo->access_flags().is_private()) {
// Second round, weed out any superinterface methods that turned
// into default methods, i.e. were concrete not abstract in the end
if ((default_methods == NULL) ||
InstanceKlass::find_method(default_methods, name, signature) == NULL) {
return true;
}
}
// First look in local methods to see if already covered
if (InstanceKlass::find_local_method(class_methods, name, signature,
Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL)
{
return false;
}
return false;
// Check local default methods
if ((default_methods != NULL) &&
(InstanceKlass::find_method(default_methods, name, signature) != NULL))
{
return false;
}
InstanceKlass* cursuper;
// Iterate on all superclasses, which should have instanceKlasses
// Note that we explicitly look for overpasses at each level.
// Overpasses may or may not exist for supers for pass 1,
// they should have been created for pass 2 and later.
for (cursuper = InstanceKlass::cast(super); cursuper != NULL; cursuper = (InstanceKlass*)cursuper->super())
{
if (cursuper->find_local_method(name, signature,
Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
return false;
}
}
return true;
}
// Scans current_interface_methods for miranda methods that do not

@ -393,7 +393,7 @@ uint PhaseCFG::build_cfg() {
VectorSet visited(a);
// Allocate stack with enough space to avoid frequent realloc
Node_Stack nstack(a, C->unique() >> 1);
Node_Stack nstack(a, C->live_nodes() >> 1);
nstack.push(_root, 0);
uint sum = 0; // Counter for blocks

@ -802,7 +802,7 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons
Compile *C = igvn->C;
Arena *a = Thread::current()->resource_area();
Node_Array node_map = new Node_Array(a);
Node_Stack stack(a, C->unique() >> 4);
Node_Stack stack(a, C->live_nodes() >> 4);
PhiNode *nphi = slice_memory(at);
igvn->register_new_node_with_optimizer( nphi );
node_map.map(_idx, nphi);

@ -3315,7 +3315,7 @@ bool Compile::final_graph_reshaping() {
// Visit everybody reachable!
// Allocate stack of size C->unique()/2 to avoid frequent realloc
Node_Stack nstack(unique() >> 1);
Node_Stack nstack(live_nodes() >> 1);
final_graph_reshaping_walk(nstack, root(), frc);
// Check for unreachable (from below) code (i.e., infinite loops).

@ -507,7 +507,7 @@ void PhaseIdealLoop::Dominators() {
// 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent.
int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
// Allocate stack of size C->unique()/8 to avoid frequent realloc
GrowableArray <Node *> dfstack(pil->C->unique() >> 3);
GrowableArray <Node *> dfstack(pil->C->live_nodes() >> 3);
Node *b = pil->C->root();
int dfsnum = 1;
dfsorder[b->_idx] = dfsnum; // Cache parent's dfsnum for a later use

@ -107,8 +107,8 @@ static bool is_dominator(Block* d, Block* n) {
//------------------------------schedule_pinned_nodes--------------------------
// Set the basic block for Nodes pinned into blocks
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
// Allocate node stack of size C->unique()+8 to avoid frequent realloc
GrowableArray <Node *> spstack(C->unique() + 8);
// Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
GrowableArray <Node *> spstack(C->live_nodes() + 8);
spstack.push(_root);
while (spstack.is_nonempty()) {
Node* node = spstack.pop();
@ -1310,7 +1310,7 @@ void PhaseCFG::global_code_motion() {
visited.Clear();
Node_List stack(arena);
// Pre-grow the list
stack.map((C->unique() >> 1) + 16, NULL);
stack.map((C->live_nodes() >> 1) + 16, NULL);
if (!schedule_early(visited, stack)) {
// Bailout without retry
C->record_method_not_compilable("early schedule failed");

@ -1282,7 +1282,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
Arena* arena = Thread::current()->resource_area();
Node_Stack stack(arena, C->unique() >> 2);
Node_Stack stack(arena, C->live_nodes() >> 2);
Node_List rpo_list;
VectorSet visited(arena);
visited.set(loop_head->_idx);

@ -2231,7 +2231,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// _nodes array holds the earliest legal controlling CFG node.
// Allocate stack with enough space to avoid frequent realloc
int stack_size = (C->unique() >> 1) + 16; // (unique>>1)+16 from Java2D stats
int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
Node_Stack nstack( a, stack_size );
visited.Clear();
@ -2691,7 +2691,7 @@ void PhaseIdealLoop::recompute_dom_depth() {
}
}
if (_dom_stk == NULL) {
uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size.
uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
if (init_size < 10) init_size = 10;
_dom_stk = new GrowableArray<uint>(init_size);
}
@ -2781,8 +2781,8 @@ IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermo
// The sort is of size number-of-control-children, which generally limits
// it to size 2 (i.e., I just choose between my 2 target loops).
void PhaseIdealLoop::build_loop_tree() {
// Allocate stack of size C->unique()/2 to avoid frequent realloc
GrowableArray <Node *> bltstack(C->unique() >> 1);
// Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
Node *n = C->root();
bltstack.push(n);
int pre_order = 1;
@ -3672,7 +3672,7 @@ void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node*
void PhaseIdealLoop::dump( ) const {
ResourceMark rm;
Arena* arena = Thread::current()->resource_area();
Node_Stack stack(arena, C->unique() >> 2);
Node_Stack stack(arena, C->live_nodes() >> 2);
Node_List rpo_list;
VectorSet visited(arena);
visited.set(C->top()->_idx);

@ -2050,7 +2050,7 @@ bool Matcher::is_bmi_pattern(Node *n, Node *m) {
// Set bits if Node is shared or otherwise a root
void Matcher::find_shared( Node *n ) {
// Allocate stack of size C->unique() * 2 to avoid frequent realloc
MStack mstack(C->unique() * 2);
MStack mstack(C->live_nodes() * 2);
// Mark nodes as address_visited if they are inputs to an address expression
VectorSet address_visited(Thread::current()->resource_area());
mstack.push(n, Visit); // Don't need to pre-visit root node

@ -1799,7 +1799,7 @@ static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int
static void dump_nodes(const Node* start, int d, bool only_ctrl) {
if (NotANode(start)) return;
GrowableArray <Node *> nstack(Compile::current()->unique());
GrowableArray <Node *> nstack(Compile::current()->live_nodes());
collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false);
int end = nstack.length();

@ -791,7 +791,7 @@ void PhaseGVN::dead_loop_check( Node *n ) {
//------------------------------PhaseIterGVN-----------------------------------
// Initialize hash table to fresh and clean for +VerifyOpto
PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
_stack(C->unique() >> 1),
_stack(C->live_nodes() >> 1),
_delay_transform(false) {
}
@ -808,7 +808,11 @@ PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
// Initialize with previous PhaseGVN info from Parser
PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
_worklist(*C->for_igvn()),
_stack(C->unique() >> 1),
// TODO: Before incremental inlining it was allocated only once and it was fine. Now that
// the constructor is used in incremental inlining, this consumes too much memory:
// _stack(C->live_nodes() >> 1),
// So, as a band-aid, we replace this by:
_stack(C->comp_arena(), 32),
_delay_transform(false)
{
uint max;
@ -1638,7 +1642,7 @@ Node *PhaseCCP::transform( Node *n ) {
_nodes.map( n->_idx, new_node ); // Flag as having been cloned
// Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
GrowableArray <Node *> trstack(C->unique() >> 1);
GrowableArray <Node *> trstack(C->live_nodes() >> 1);
trstack.push(new_node); // Process children of cloned node
while ( trstack.is_nonempty() ) {

@ -2295,13 +2295,13 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs *java_tool_options_args,
}
// Checks if name in command-line argument -agent{lib,path}:name[=options]
// represents a valid HPROF of JDWP agent. is_path==true denotes that we
// represents a valid JDWP agent. is_path==true denotes that we
// are dealing with -agentpath (case where name is a path), otherwise with
// -agentlib
bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
bool valid_jdwp_agent(char *name, bool is_path) {
char *_name;
const char *_hprof = "hprof", *_jdwp = "jdwp";
size_t _len_hprof, _len_jdwp, _len_prefix;
const char *_jdwp = "jdwp";
size_t _len_jdwp, _len_prefix;
if (is_path) {
if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) {
@ -2316,13 +2316,9 @@ bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
}
_name += _len_prefix;
_len_hprof = strlen(_hprof);
_len_jdwp = strlen(_jdwp);
if (strncmp(_name, _hprof, _len_hprof) == 0) {
_name += _len_hprof;
}
else if (strncmp(_name, _jdwp, _len_jdwp) == 0) {
if (strncmp(_name, _jdwp, _len_jdwp) == 0) {
_name += _len_jdwp;
}
else {
@ -2336,7 +2332,7 @@ bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
return true;
}
if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) {
if (strcmp(name, _jdwp) == 0) {
return true;
}
@ -2427,9 +2423,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2);
}
#if !INCLUDE_JVMTI
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
if (strcmp(name, "jdwp") == 0) {
jio_fprintf(defaultStream::error_stream(),
"Profiling and debugging agents are not supported in this VM\n");
"Debugging agents are not supported in this VM\n");
return JNI_ERR;
}
#endif // !INCLUDE_JVMTI
@ -2449,9 +2445,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
options = os::strdup_check_oom(pos + 1, mtInternal);
}
#if !INCLUDE_JVMTI
if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) {
if (valid_jdwp_agent(name, is_absolute_path)) {
jio_fprintf(defaultStream::error_stream(),
"Profiling and debugging agents are not supported in this VM\n");
"Debugging agents are not supported in this VM\n");
return JNI_ERR;
}
#endif // !INCLUDE_JVMTI
@ -3305,7 +3301,9 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
if (scp_assembly_required) {
// Assemble the bootclasspath elements into the final path.
Arguments::set_sysclasspath(scp_p->combined_path());
char *combined_path = scp_p->combined_path();
Arguments::set_sysclasspath(combined_path);
FREE_C_HEAP_ARRAY(char, combined_path);
}
// This must be done after all arguments have been processed.

@ -1271,6 +1271,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
bool has_jimage = (os::stat(jimage, &st) == 0);
if (has_jimage) {
Arguments::set_sysclasspath(jimage);
FREE_C_HEAP_ARRAY(char, jimage);
return true;
}
FREE_C_HEAP_ARRAY(char, jimage);
@ -1282,6 +1283,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
sysclasspath = expand_entries_to_path(modules_dir, fileSep, pathSep);
}
}
FREE_C_HEAP_ARRAY(char, modules_dir);
// fallback to classes
if (sysclasspath == NULL)
@ -1289,6 +1291,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
if (sysclasspath == NULL) return false;
Arguments::set_sysclasspath(sysclasspath);
FREE_C_HEAP_ARRAY(char, sysclasspath);
return true;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,11 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
// Solaris no longer has this kind of ThreadLocalStorage implementation.
// This will be removed from all platforms in the near future.
#ifndef SOLARIS
// static member initialization
int ThreadLocalStorage::_thread_index = -1;
@ -54,3 +59,5 @@ void ThreadLocalStorage::init() {
bool ThreadLocalStorage::is_initialized() {
return (thread_index() != -1);
}
#endif // SOLARIS

@ -38,10 +38,14 @@ extern "C" Thread* get_thread();
extern "C" uintptr_t _raw_thread_id();
class ThreadLocalStorage : AllStatic {
// Exported API
public:
static void set_thread(Thread* thread);
static Thread* get_thread_slow();
static void invalidate_all() { pd_invalidate_all(); }
static void init();
static bool is_initialized();
// Machine dependent stuff
#ifdef TARGET_OS_ARCH_linux_x86
@ -81,17 +85,12 @@ class ThreadLocalStorage : AllStatic {
# include "threadLS_bsd_zero.hpp"
#endif
#ifndef SOLARIS
public:
// Accessor
static inline int thread_index() { return _thread_index; }
static inline void set_thread_index(int index) { _thread_index = index; }
// Initialization
// Called explicitly from VMThread::activate_system instead of init_globals.
static void init();
static bool is_initialized();
private:
static int _thread_index;
@ -100,6 +99,9 @@ class ThreadLocalStorage : AllStatic {
// Processor dependent parts of set_thread and initialization
static void pd_set_thread(Thread* thread);
static void pd_init();
#endif // SOLARIS
// Invalidate any thread cacheing or optimization schemes.
static void pd_invalidate_all();

@ -0,0 +1,86 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @summary Simple jar builder
* Input: jarName className1 className2 ...
* do not specify extensions, just the names
* E.g. prot_domain ProtDomainA ProtDomainB
* Output: A jar containing compiled classes, placed in a test classes folder
*/
import jdk.test.lib.*;
import java.io.File;
import java.util.ArrayList;
import sun.tools.jar.Main;
public class BasicJarBuilder {
private static final String classDir = System.getProperty("test.classes");
public static void build(String jarName, String ...classNames)
throws Exception {
createSimpleJar(classDir, classDir + File.separator + jarName +
".jar", classNames);
}
private static void createSimpleJar(String jarclassDir, String jarName,
String[] classNames) throws Exception {
ArrayList<String> args = new ArrayList<String>();
args.add("cf");
args.add(jarName);
addClassArgs(args, jarclassDir, classNames);
createJar(args);
}
private static void addClassArgs(ArrayList<String> args, String jarclassDir,
String[] classNames) {
for (String name : classNames) {
args.add("-C");
args.add(jarclassDir);
args.add(name + ".class");
}
}
private static void createJar(ArrayList<String> args) {
Main jarTool = new Main(System.out, System.err, "jar");
if (!jarTool.run(args.toArray(new String[1]))) {
throw new RuntimeException("jar operation failed");
}
}
// helpers
public static String getTestJar(String jar) {
File dir = new File(System.getProperty("test.classes", "."));
File jarFile = new File(dir, jar);
if (!jarFile.exists()) {
throw new RuntimeException("Cannot find " + jarFile.getPath());
}
if (!jarFile.isFile()) {
throw new RuntimeException("Not a regular file: " + jarFile.getPath());
}
return jarFile.getPath();
}
}

@ -0,0 +1,80 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Check to make sure that shared strings in the bootstrap CDS archive
* are actually shared
* Feature support: G1GC only, compressed oops/kptrs, 64-bit os, not on windows
* @requires (sun.arch.data.model != "32") & (os.family != "windows")
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires (vm.gc=="G1" | vm.gc=="null")
* @library /testlibrary /../../test/lib
* @modules java.base/sun.misc
* java.management
* @ignore - 8133180
* @build SharedStringsWb SharedStrings BasicJarBuilder
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main SharedStrings
*/
import jdk.test.lib.*;
public class SharedStrings {
public static void main(String[] args) throws Exception {
BasicJarBuilder.build("whitebox", "sun/hotspot/WhiteBox");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./SharedStrings.jsa",
"-XX:+PrintSharedSpaces",
// Needed for bootclasspath match, for CDS to work with WhiteBox API
"-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"),
"-Xshare:dump");
new OutputAnalyzer(pb.start())
.shouldContain("Loading classes to share")
.shouldContain("Shared string table stats")
.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./SharedStrings.jsa",
// these are required modes for shared strings
"-XX:+UseCompressedOops", "-XX:+UseG1GC",
// needed for access to white box test API
"-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"),
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
"-Xshare:on", "-showversion", "SharedStringsWb");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
}
}
}

@ -0,0 +1,49 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import sun.hotspot.WhiteBox;
// This class is used by the test SharedStrings.java
// It should be launched in CDS mode
public class SharedStringsWb {
public static void main(String[] args) throws Exception {
WhiteBox wb = WhiteBox.getWhiteBox();
if (wb.areSharedStringsIgnored()) {
System.out.println("Shared strings are ignored, assuming PASS");
return;
}
// The string "java" is known to be interened and added to CDS archive
String s = "java";
String internedS = s.intern();
if (wb.isShared(internedS)) {
System.out.println("Found shared string, result: PASS");
} else {
throw new RuntimeException("String is not shared, result: FAIL");
}
}
}

@ -0,0 +1,272 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @bug 8087342
* @summary Test linkresolver search static, instance and overpass duplicates
* @run main/othervm -Xverify:none TestStaticandInstance
*/
import java.util.*;
import jdk.internal.org.objectweb.asm.*;
import static jdk.internal.org.objectweb.asm.Opcodes.*;
public class TestStaticandInstance {
static final String stringC = "C";
static final String stringD = "D";
static final String stringI = "I";
public static void main(String args[]) throws Throwable {
ClassLoader cl = new ClassLoader() {
public Class<?> loadClass(String name) throws ClassNotFoundException {
Class retClass;
if ((retClass = findLoadedClass(name)) != null) {
return retClass;
}
if (stringC.equals(name)) {
byte[] classFile=dumpC();
return defineClass(stringC, classFile, 0, classFile.length);
}
if (stringD.equals(name)) {
byte[] classFile=dumpD();
return defineClass(stringD, classFile, 0, classFile.length);
}
if (stringI.equals(name)) {
byte[] classFile=dumpI();
return defineClass(stringI, classFile, 0, classFile.length);
}
return super.loadClass(name);
}
};
Class classC = cl.loadClass(stringC);
Class classI = cl.loadClass(stringI);
try {
int staticret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallStatic").invoke(null);
if (staticret != 1) {
throw new RuntimeException("invokestatic failed to call correct method");
}
System.out.println("staticret: " + staticret); // should be 1
int invokeinterfaceret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallInterface").invoke(null);
if (invokeinterfaceret != 0) {
throw new RuntimeException(String.format("Expected java.lang.AbstractMethodError, got %d", invokeinterfaceret));
}
System.out.println("invokeinterfaceret: AbstractMethodError");
int invokevirtualret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallVirtual").invoke(null);
if (invokevirtualret != 0) {
throw new RuntimeException(String.format("Expected java.lang.IncompatibleClassChangeError, got %d", invokevirtualret));
}
System.out.println("invokevirtualret: IncompatibleClassChangeError");
} catch (java.lang.Throwable e) {
throw new RuntimeException("Unexpected exception: " + e.getMessage());
}
}
/*
interface I {
public int m(); // abstract
default int q() { return 3; } // trigger defmeth processing: C gets AME overpass
}
// C gets static, private and AME overpass m()I with -Xverify:none
class C implements I {
static int m() { return 1;} // javac with "n()" and patch to "m()"
private int m() { return 2;} // javac with public and patch to private
}
public class D {
public static int CallStatic() {
int staticret = C.m(); // javac with "C.n" and patch to "C.m"
return staticret;
}
public static int CallInterface() throws AbstractMethodError{
try {
I myI = new C();
return myI.m();
} catch (java.lang.AbstractMethodError e) {
return 0; // for success
}
}
public static int CallVirtual() {
try {
C myC = new C();
return myC.m();
} catch (java.lang.IncompatibleClassChangeError e) {
return 0; // for success
}
}
}
*/
public static byte[] dumpC() {
ClassWriter cw = new ClassWriter(0);
FieldVisitor fv;
MethodVisitor mv;
AnnotationVisitor av0;
cw.visit(52, ACC_SUPER, "C", null, "java/lang/Object", new String[] { "I" });
{
mv = cw.visitMethod(0, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_STATIC, "m", "()I", null, null);
mv.visitCode();
mv.visitInsn(ICONST_1);
mv.visitInsn(IRETURN);
mv.visitMaxs(1, 0);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PRIVATE, "m", "()I", null, null);
mv.visitCode();
mv.visitInsn(ICONST_2);
mv.visitInsn(IRETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
public static byte[] dumpD () {
ClassWriter cw = new ClassWriter(0);
FieldVisitor fv;
MethodVisitor mv;
AnnotationVisitor av0;
cw.visit(52, ACC_PUBLIC + ACC_SUPER, "D", null, "java/lang/Object", null);
{
mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallStatic", "()I", null, null);
mv.visitCode();
mv.visitMethodInsn(INVOKESTATIC, "C", "m", "()I", false);
mv.visitVarInsn(ISTORE, 0);
mv.visitVarInsn(ILOAD, 0);
mv.visitInsn(IRETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallInterface", "()I", null, new String[] { "java/lang/AbstractMethodError" });
mv.visitCode();
Label l0 = new Label();
Label l1 = new Label();
Label l2 = new Label();
mv.visitTryCatchBlock(l0, l1, l2, "java/lang/AbstractMethodError");
mv.visitLabel(l0);
mv.visitTypeInsn(NEW, "C");
mv.visitInsn(DUP);
mv.visitMethodInsn(INVOKESPECIAL, "C", "<init>", "()V", false);
mv.visitVarInsn(ASTORE, 0);
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKEINTERFACE, "I", "m", "()I", true);
mv.visitLabel(l1);
mv.visitInsn(IRETURN);
mv.visitLabel(l2);
mv.visitFrame(Opcodes.F_SAME1, 0, null, 1, new Object[] {"java/lang/AbstractMethodError"});
mv.visitVarInsn(ASTORE, 0);
mv.visitInsn(ICONST_0);
mv.visitInsn(IRETURN);
mv.visitMaxs(2, 1);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallVirtual", "()I", null, null);
mv.visitCode();
Label l0 = new Label();
Label l1 = new Label();
Label l2 = new Label();
mv.visitTryCatchBlock(l0, l1, l2, "java/lang/IncompatibleClassChangeError");
mv.visitLabel(l0);
mv.visitTypeInsn(NEW, "C");
mv.visitInsn(DUP);
mv.visitMethodInsn(INVOKESPECIAL, "C", "<init>", "()V", false);
mv.visitVarInsn(ASTORE, 0);
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKEVIRTUAL, "C", "m", "()I", false);
mv.visitLabel(l1);
mv.visitInsn(IRETURN);
mv.visitLabel(l2);
mv.visitFrame(Opcodes.F_SAME1, 0, null, 1, new Object[] {"java/lang/IncompatibleClassChangeError"});
mv.visitVarInsn(ASTORE, 0);
mv.visitInsn(ICONST_0);
mv.visitInsn(IRETURN);
mv.visitMaxs(2, 1);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
public static byte[] dumpI() {
ClassWriter cw = new ClassWriter(0);
FieldVisitor fv;
MethodVisitor mv;
AnnotationVisitor av0;
cw.visit(52, ACC_ABSTRACT + ACC_INTERFACE, "I", null, "java/lang/Object", null);
{
mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
mv.visitEnd();
}
{
mv = cw.visitMethod(ACC_PUBLIC, "q", "()I", null, null);
mv.visitCode();
mv.visitInsn(ICONST_3);
mv.visitInsn(IRETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
}

@ -320,3 +320,4 @@ f376824d4940f45719d91838f3f6249f873440db jdk9-b72
4dd09cb5f7c2a2a23a9958ea7a602dd74d5709b2 jdk9-b75
4526c0da8fb362eebd7e88f4d44e86858cf9b80b jdk9-b76
7fd081100f48828431e7c1bff65c906ee759069b jdk9-b77
0940ce86c614458f5bdd72278b190abbf36b7b45 jdk9-b78

@ -1,37 +0,0 @@
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include CopyCommon.gmk
################################################################################
HPROF_SRC := $(JDK_TOPDIR)/src/jdk.hprof.agent/share/native/libhprof/jvm.hprof.txt
$(LIB_DST_DIR)/jvm.hprof.txt: $(HPROF_SRC)
$(call install-file)
TARGETS := $(LIB_DST_DIR)/jvm.hprof.txt
################################################################################

@ -21,4 +21,4 @@
# or visit www.oracle.com if you need additional information or have any
# questions.
#
tzdata2015e
tzdata2015f

@ -561,7 +561,7 @@ Zone Africa/Tripoli 0:52:44 - LMT 1920
# From Alex Krivenyshev (2008-07-11):
# Seems that English language article "The revival of daylight saving
# time: Energy conservation?"-# No. 16578 (07/11/2008) was originally
# time: Energy conservation?"- No. 16578 (07/11/2008) was originally
# published on Monday, June 30, 2008...
#
# I guess that article in French "Le gouvernement avance l'introduction
@ -693,7 +693,7 @@ Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis
# Here is a link to official document from Royaume du Maroc Premier Ministre,
# Ministère de la Modernisation des Secteurs Publics
#
# Under Article 1 of Royal Decree No. 455-67 of Act 23 safar 1387 (2 june 1967)
# Under Article 1 of Royal Decree No. 455-67 of Act 23 safar 1387 (2 June 1967)
# concerning the amendment of the legal time, the Ministry of Modernization of
# Public Sectors announced that the official time in the Kingdom will be
# advanced 60 minutes from Sunday 31 May 2009 at midnight.

@ -29,7 +29,7 @@
# tz@iana.org for general use in the future). For more, please see
# the file CONTRIBUTING in the tz distribution.
# From Paul Eggert (2014-10-31):
# From Paul Eggert (2015-08-08):
#
# Unless otherwise specified, the source for data through 1990 is:
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
@ -66,7 +66,7 @@
# 2:00 EET EEST Eastern European Time
# 2:00 IST IDT Israel
# 3:00 AST ADT Arabia*
# 3:30 IRST IRDT Iran
# 3:30 IRST IRDT Iran*
# 4:00 GST Gulf*
# 5:30 IST India
# 7:00 ICT Indochina, most times and locations*
@ -75,10 +75,11 @@
# 8:00 CST China
# 8:00 IDT Indochina, 1943-45, 1947-55, 1960-75 (some locations)*
# 8:00 JWST Western Standard Time (Japan, 1896/1937)*
# 8:30 KST KDT Korea when at +0830*
# 9:00 JCST Central Standard Time (Japan, 1896/1937)
# 9:00 WIT east Indonesia (Waktu Indonesia Timur)
# 9:00 JST JDT Japan
# 9:00 KST KDT Korea
# 9:00 KST KDT Korea when at +09
# 9:30 ACST Australian Central Standard Time
#
# See the 'europe' file for Russia and Turkey in Asia.
@ -1050,7 +1051,7 @@ Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
#
# From Roozbeh Pournader (2007-11-05):
# This is quoted from Official Gazette of the Islamic Republic of
# Iran, Volume 63, Number 18242, dated Tuesday 1386/6/24
# Iran, Volume 63, No. 18242, dated Tuesday 1386/6/24
# [2007-10-16]. I am doing the best translation I can:...
# The official time of the country will be moved forward for one hour
# on the 24 hours of the first day of the month of Farvardin and will
@ -1580,7 +1581,7 @@ Zone Asia/Amman 2:23:44 - LMT 1931
# - Qyzylorda switched from +5:00 to +6:00 on 1992-01-19 02:00.
# - Oral switched from +5:00 to +4:00 in spring 1989.
# From Kazakhstan Embassy's News Bulletin #11
# From Kazakhstan Embassy's News Bulletin No. 11
# <http://www.kazsociety.org.uk/news/2005/03/30.htm> (2005-03-21):
# The Government of Kazakhstan passed a resolution March 15 abolishing
# daylight saving time citing lack of economic benefits and health
@ -1734,6 +1735,17 @@ Rule ROK 1987 1988 - Oct Sun>=8 3:00 0 S
#
# For Pyongyang we have no information; guess no changes since World War II.
# From Steffen Thorsen (2015-08-07):
# According to many news sources, North Korea is going to change to
# the 8:30 time zone on August 15, one example:
# http://www.bbc.com/news/world-asia-33815049
#
# From Paul Eggert (2015-08-07):
# No transition time is specified; assume 00:00.
# There is no common English-language abbreviation for this time zone.
# Use %z rather than invent one. We can't assume %z works everywhere yet,
# so for now substitute its output manually.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Seoul 8:27:52 - LMT 1908 Apr 1
8:30 - KST 1912 Jan 1
@ -1746,7 +1758,8 @@ Zone Asia/Pyongyang 8:23:00 - LMT 1908 Apr 1
8:30 - KST 1912 Jan 1
9:00 - JCST 1937 Oct 1
9:00 - JST 1945 Aug 24
9:00 - KST
9:00 - KST 2015 Aug 15
8:30 - KST
###############################################################################

@ -216,11 +216,14 @@
# republished in Finest Hour (Spring 2002) 1(114):26
# http://www.winstonchurchill.org/images/finesthour/Vol.01%20No.114.pdf
# From Paul Eggert (1996-09-03):
# From Paul Eggert (2015-08-08):
# The OED Supplement says that the English originally said "Daylight Saving"
# when they were debating the adoption of DST in 1908; but by 1916 this
# term appears only in quotes taken from DST's opponents, whereas the
# proponents (who eventually won the argument) are quoted as using "Summer".
# The term "Summer Time" was introduced by Herbert Samuel, Home Secretary; see:
# Viscount Samuel. Leisure in a Democracy. Cambridge University Press
# ISBN 978-1-107-49471-8 (1949, reissued 2015), p 8.
# From Arthur David Olson (1989-01-19):
# A source at the British Information Office in New York avers that it's
@ -366,7 +369,7 @@
# From an anonymous contributor (1996-06-02):
# The law governing time in Ireland is under Statutory Instrument SI 395/94,
# which gives force to European Union 7th Council Directive # 94/21/EC.
# which gives force to European Union 7th Council Directive No. 94/21/EC.
# Under this directive, the Minister for Justice in Ireland makes appropriate
# regulations. I spoke this morning with the Secretary of the Department of
# Justice (tel +353 1 678 9711) who confirmed to me that the correct name is
@ -615,11 +618,11 @@ Rule Russia 1921 only - Feb 14 23:00 1:00 MSD
Rule Russia 1921 only - Mar 20 23:00 2:00 MSM # Midsummer
Rule Russia 1921 only - Sep 1 0:00 1:00 MSD
Rule Russia 1921 only - Oct 1 0:00 0 -
# Act No.925 of the Council of Ministers of the USSR (1980-10-24):
# Act No. 925 of the Council of Ministers of the USSR (1980-10-24):
Rule Russia 1981 1984 - Apr 1 0:00 1:00 S
Rule Russia 1981 1983 - Oct 1 0:00 0 -
# Act No.967 of the Council of Ministers of the USSR (1984-09-13), repeated in
# Act No.227 of the Council of Ministers of the USSR (1989-03-14):
# Act No. 967 of the Council of Ministers of the USSR (1984-09-13), repeated in
# Act No. 227 of the Council of Ministers of the USSR (1989-03-14):
Rule Russia 1984 1991 - Sep lastSun 2:00s 0 -
Rule Russia 1985 1991 - Mar lastSun 2:00s 1:00 S
#
@ -851,7 +854,7 @@ Zone Europe/Brussels 0:17:30 - LMT 1880
# Bulgaria
#
# From Plamen Simenov via Steffen Thorsen (1999-09-09):
# A document of Government of Bulgaria (No.94/1997) says:
# A document of Government of Bulgaria (No. 94/1997) says:
# EET -> EETDST is in 03:00 Local time in last Sunday of March ...
# EETDST -> EET is in 04:00 Local time in last Sunday of October
#
@ -868,7 +871,7 @@ Zone Europe/Sofia 1:33:16 - LMT 1880
1:00 C-Eur CE%sT 1945
1:00 - CET 1945 Apr 2 3:00
2:00 - EET 1979 Mar 31 23:00
2:00 Bulg EE%sT 1982 Sep 26 2:00
2:00 Bulg EE%sT 1982 Sep 26 3:00
2:00 C-Eur EE%sT 1991
2:00 E-Eur EE%sT 1997
2:00 EU EE%sT
@ -1085,8 +1088,8 @@ Zone America/Thule -4:35:08 - LMT 1916 Jul 28 # Pituffik air base
# after that.
# From Mart Oruaas (2000-01-29):
# Regulation no. 301 (1999-10-12) obsoletes previous regulation
# no. 206 (1998-09-22) and thus sticks Estonia to +02:00 GMT for all
# Regulation No. 301 (1999-10-12) obsoletes previous regulation
# No. 206 (1998-09-22) and thus sticks Estonia to +02:00 GMT for all
# the year round. The regulation is effective 1999-11-01.
# From Toomas Soome (2002-02-21):
@ -1107,7 +1110,7 @@ Zone Europe/Tallinn 1:39:00 - LMT 1880
3:00 Russia MSK/MSD 1989 Mar 26 2:00s
2:00 1:00 EEST 1989 Sep 24 2:00s
2:00 C-Eur EE%sT 1998 Sep 22
2:00 EU EE%sT 1999 Nov 1
2:00 EU EE%sT 1999 Oct 31 4:00
2:00 - EET 2002 Feb 21
2:00 EU EE%sT
@ -1550,21 +1553,21 @@ Link Europe/Rome Europe/San_Marino
# correct data in juridical acts and I found some juridical documents about
# changes in the counting of time in Latvia from 1981....
#
# Act No.35 of the Council of Ministers of Latvian SSR of 1981-01-22 ...
# according to the Act No.925 of the Council of Ministers of USSR of 1980-10-24
# Act No. 35 of the Council of Ministers of Latvian SSR of 1981-01-22 ...
# according to the Act No. 925 of the Council of Ministers of USSR of 1980-10-24
# ...: all year round the time of 2nd time zone + 1 hour, in addition turning
# the hands of the clock 1 hour forward on 1 April at 00:00 (GMT 31 March 21:00)
# and 1 hour backward on the 1 October at 00:00 (GMT 30 September 20:00).
#
# Act No.592 of the Council of Ministers of Latvian SSR of 1984-09-24 ...
# according to the Act No.967 of the Council of Ministers of USSR of 1984-09-13
# Act No. 592 of the Council of Ministers of Latvian SSR of 1984-09-24 ...
# according to the Act No. 967 of the Council of Ministers of USSR of 1984-09-13
# ...: all year round the time of 2nd time zone + 1 hour, in addition turning
# the hands of the clock 1 hour forward on the last Sunday of March at 02:00
# (GMT 23:00 on the previous day) and 1 hour backward on the last Sunday of
# September at 03:00 (GMT 23:00 on the previous day).
#
# Act No.81 of the Council of Ministers of Latvian SSR of 1989-03-22 ...
# according to the Act No.227 of the Council of Ministers of USSR of 1989-03-14
# Act No. 81 of the Council of Ministers of Latvian SSR of 1989-03-22 ...
# according to the Act No. 227 of the Council of Ministers of USSR of 1989-03-14
# ...: since the last Sunday of March 1989 in Lithuanian SSR, Latvian SSR,
# Estonian SSR and Kaliningrad region of Russian Federation all year round the
# time of 2nd time zone (Moscow time minus one hour). On the territory of Latvia
@ -1581,7 +1584,7 @@ Link Europe/Rome Europe/San_Marino
# From Andrei Ivanov (2000-03-06):
# This year Latvia will not switch to Daylight Savings Time (as specified in
# The Regulations of the Cabinet of Ministers of the Rep. of Latvia of
# 29-Feb-2000 (#79) <http://www.lv-laiks.lv/wwwraksti/2000/071072/vd4.htm>,
# 29-Feb-2000 (No. 79) <http://www.lv-laiks.lv/wwwraksti/2000/071072/vd4.htm>,
# in Latvian for subscribers only).
# From RFE/RL Newsline
@ -1786,6 +1789,18 @@ Zone Europe/Malta 0:58:04 - LMT 1893 Nov 2 0:00s # Valletta
# News from Moldova (in russian):
# http://ru.publika.md/link_317061.html
# From Roman Tudos (2015-07-02):
# http://lex.justice.md/index.php?action=view&view=doc&lang=1&id=355077
# From Paul Eggert (2015-07-01):
# The abovementioned official link to IGO1445-868/2014 states that
# 2014-10-26's fallback transition occurred at 03:00 local time. Also,
# http://www.trm.md/en/social/la-30-martie-vom-trece-la-ora-de-vara
# says the 2014-03-30 spring-forward transition was at 02:00 local time.
# Guess that since 1997 Moldova has switched one hour before the EU.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Moldova 1997 max - Mar lastSun 2:00 1:00 S
Rule Moldova 1997 max - Oct lastSun 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Chisinau 1:55:20 - LMT 1880
@ -1800,7 +1815,7 @@ Zone Europe/Chisinau 1:55:20 - LMT 1880
2:00 Russia EE%sT 1992
2:00 E-Eur EE%sT 1997
# See Romania commentary for the guessed 1997 transition to EU rules.
2:00 EU EE%sT
2:00 Moldova EE%sT
# Monaco
# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's
@ -2146,7 +2161,7 @@ Zone Europe/Bucharest 1:44:24 - LMT 1891 Oct
# Russia
# From Alexander Krivenyshev (2011-09-15):
# Based on last Russian Government Decree # 725 on August 31, 2011
# Based on last Russian Government Decree No. 725 on August 31, 2011
# (Government document
# http://www.government.ru/gov/results/16355/print/
# in Russian)
@ -2156,7 +2171,7 @@ Zone Europe/Bucharest 1:44:24 - LMT 1891 Oct
# http://www.worldtimezone.com/dst_news/dst_news_russia36.htm
# From Sanjeev Gupta (2011-09-27):
# Scans of [Decree #23 of January 8, 1992] are available at:
# Scans of [Decree No. 23 of January 8, 1992] are available at:
# http://government.consultant.ru/page.aspx?1223966
# They are in Cyrillic letters (presumably Russian).
@ -2167,19 +2182,19 @@ Zone Europe/Bucharest 1:44:24 - LMT 1891 Oct
# One source is
# http://government.ru/gov/results/16355/
# which, according to translate.google.com, begins "Decree of August 31,
# 2011 No 725" and contains no other dates or "effective date" information.
# 2011 No. 725" and contains no other dates or "effective date" information.
#
# Another source is
# http://www.rg.ru/2011/09/06/chas-zona-dok.html
# which, according to translate.google.com, begins "Resolution of the
# Government of the Russian Federation on August 31, 2011 N 725" and also
# contains "Date first official publication: September 6, 2011 Posted on:
# in the 'RG' - Federal Issue number 5573 September 6, 2011" but which
# in the 'RG' - Federal Issue No. 5573 September 6, 2011" but which
# does not contain any "effective date" information.
#
# Another source is
# http://en.wikipedia.org/wiki/Oymyakonsky_District#cite_note-RuTime-7
# which, in note 8, contains "Resolution #725 of August 31, 2011...
# which, in note 8, contains "Resolution No. 725 of August 31, 2011...
# Effective as of after 7 days following the day of the official publication"
# but which does not contain any reference to September 6, 2011.
#
@ -2387,7 +2402,7 @@ Zone Europe/Simferopol 2:16:24 - LMT 1880
# changed in May.
2:00 E-Eur EE%sT 1994 May
# From IATA SSIM (1994/1997), which also says that Kerch is still like Kiev.
3:00 E-Eur MSK/MSD 1996 Mar 31 3:00s
3:00 E-Eur MSK/MSD 1996 Mar 31 0:00s
3:00 1:00 MSD 1996 Oct 27 3:00s
# IATA SSIM (1997-09) says Crimea switched to EET/EEST.
# Assume it happened in March by not changing the clocks.
@ -2522,7 +2537,7 @@ Zone Asia/Novosibirsk 5:31:40 - LMT 1919 Dec 14 6:00
# from current Russia Zone 6 - Krasnoyarsk Time Zone (KRA) UTC +0700
# to Russia Zone 5 - Novosibirsk Time Zone (NOV) UTC +0600
#
# This is according to Government of Russia decree # 740, on September
# This is according to Government of Russia decree No. 740, on September
# 14, 2009 "Application in the territory of the Kemerovo region the Fifth
# time zone." ("Russia Zone 5" or old "USSR Zone 5" is GMT +0600)
#
@ -2945,7 +2960,7 @@ Zone Africa/Ceuta -0:21:16 - LMT 1901
Zone Atlantic/Canary -1:01:36 - LMT 1922 Mar # Las Palmas de Gran C.
-1:00 - CANT 1946 Sep 30 1:00 # Canaries T
0:00 - WET 1980 Apr 6 0:00s
0:00 1:00 WEST 1980 Sep 28 0:00s
0:00 1:00 WEST 1980 Sep 28 1:00u
0:00 EU WE%sT
# IATA SSIM (1996-09) says the Canaries switch at 2:00u, not 1:00u.
# Ignore this for now, as the Canaries are part of the EU.
@ -3235,7 +3250,7 @@ Link Europe/Istanbul Asia/Istanbul # Istanbul is in both continents.
# From Igor Karpov, who works for the Ukrainian Ministry of Justice,
# via Garrett Wollman (2003-01-27):
# BTW, I've found the official document on this matter. It's government
# regulations number 509, May 13, 1996. In my poor translation it says:
# regulations No. 509, May 13, 1996. In my poor translation it says:
# "Time in Ukraine is set to second timezone (Kiev time). Each last Sunday
# of March at 3am the time is changing to 4am and each last Sunday of
# October the time at 4am is changing to 3am"
@ -3244,7 +3259,7 @@ Link Europe/Istanbul Asia/Istanbul # Istanbul is in both continents.
# On September 20, 2011 the deputies of the Verkhovna Rada agreed to
# abolish the transfer clock to winter time.
#
# Bill number 8330 of MP from the Party of Regions Oleg Nadoshi got
# Bill No. 8330 of MP from the Party of Regions Oleg Nadoshi got
# approval from 266 deputies.
#
# Ukraine abolishes transfer back to the winter time (in Russian)

@ -79,5 +79,5 @@ Leap 2008 Dec 31 23:59:60 + S
Leap 2012 Jun 30 23:59:60 + S
Leap 2015 Jun 30 23:59:60 + S
# Updated through IERS Bulletin C49
# File expires on: 28 December 2015
# Updated through IERS Bulletin C50
# File expires on: 28 June 2016

@ -1258,10 +1258,19 @@ Zone America/Goose_Bay -4:01:40 - LMT 1884 # Happy Valley-Goose Bay
# west Labrador, Nova Scotia, Prince Edward I
# From Paul Eggert (2006-03-22):
# From Brian Inglis (2015-07-20):
# From the historical weather station records available at:
# https://weatherspark.com/history/28351/1971/Sydney-Nova-Scotia-Canada
# Sydney shares the same time history as Glace Bay, so was
# likely to be the same across the island....
# Sydney, as the capital and most populous location, or Cape Breton, would
# have been better names for the zone had we known this in 1996.
# From Paul Eggert (2015-07-20):
# Shanks & Pottenger write that since 1970 most of this region has been like
# Halifax. Many locales did not observe peacetime DST until 1972;
# Glace Bay, NS is the largest that we know of.
# the Cape Breton area, represented by Glace Bay, is the largest we know of
# (Glace Bay was perhaps not the best name choice but no point changing now).
# Shanks & Pottenger also write that Liverpool, NS was the only town
# in Canada to observe DST in 1971 but not 1970; for now we'll assume
# this is a typo.
@ -1819,13 +1828,13 @@ Zone America/Edmonton -7:33:52 - LMT 1906 Sep
# Exact date in October unknown; Sunday October 1 is a reasonable guess.
# 3. June 1918: switch to Pacific Daylight Time (GMT-7)
# Exact date in June unknown; Sunday June 2 is a reasonable guess.
# note#1:
# note 1:
# On Oct 27/1918 when daylight saving ended in the rest of Canada,
# Creston did not change its clocks.
# note#2:
# note 2:
# During WWII when the Federal Government legislated a mandatory clock change,
# Creston did not oblige.
# note#3:
# note 3:
# There is no guarantee that Creston will remain on Mountain Standard Time
# (UTC-7) forever.
# The subject was debated at least once this year by the town Council.

@ -154,7 +154,7 @@ Rule Arg 2000 only - Mar 3 0:00 0 -
# Timezone Law (which never was effectively applied) will (would?) be
# in effect.... The article is at
# http://ar.clarin.com/diario/2001-06-06/e-01701.htm
# ... The Law itself is "Ley No 25155", sanctioned on 1999-08-25, enacted
# ... The Law itself is "Ley No. 25155", sanctioned on 1999-08-25, enacted
# 1999-09-17, and published 1999-09-21. The official publication is at:
# http://www.boletin.jus.gov.ar/BON/Primera/1999/09-Septiembre/21/PDF/BO21-09-99LEG.PDF
# Regretfully, you have to subscribe (and pay) for the on-line version....
@ -198,15 +198,11 @@ Rule Arg 2000 only - Mar 3 0:00 0 -
# http://www.worldtimezone.com/dst_news/dst_news_argentina03.html
# http://www.impulsobaires.com.ar/nota.php?id=57832 (in spanish)
# From Rodrigo Severo (2008-10-06):
# Here is some info available at a Gentoo bug related to TZ on Argentina's DST:
# ...
# ------- Comment #1 from [jmdocile] 2008-10-06 16:28 0000 -------
# Hi, there is a problem with timezone-data-2008e and maybe with
# timezone-data-2008f
# Argentinian law [Number] 25.155 is no longer valid.
# From Juan Manuel Docile in https://bugs.gentoo.org/240339 (2008-10-07)
# via Rodrigo Severo:
# Argentinian law No. 25.155 is no longer valid.
# http://www.infoleg.gov.ar/infolegInternet/anexos/60000-64999/60036/norma.htm
# The new one is law [Number] 26.350
# The new one is law No. 26.350
# http://www.infoleg.gov.ar/infolegInternet/anexos/135000-139999/136191/norma.htm
# So there is no summer time in Argentina for now.
@ -794,7 +790,7 @@ Zone America/La_Paz -4:32:36 - LMT 1890
# [ and in a second message (same day): ]
# I found the decree.
#
# DECRETO No- 7.584, DE 13 DE OUTUBRO DE 2011
# DECRETO No. 7.584, DE 13 DE OUTUBRO DE 2011
# Link :
# http://www.in.gov.br/visualiza/index.jsp?data=13/10/2011&jornal=1000&pagina=6&totalArquivos=6
@ -1148,7 +1144,7 @@ Zone America/Rio_Branco -4:31:12 - LMT 1914
# Conflicts between [1] and [2] were resolved as follows:
#
# - [1] says the 1910 transition was Jan 1, [2] says Jan 10 and cites
# Boletín Nº 1, Aviso Nº 1 (1910). Go with [2].
# Boletín No. 1, Aviso No. 1 (1910). Go with [2].
#
# - [1] says SMT was -4:42:45, [2] says Chile's official time from
# 1916 to 1919 was -4:42:46.3, the meridian of Chile's National
@ -1156,7 +1152,7 @@ Zone America/Rio_Branco -4:31:12 - LMT 1914
# Quinta Normal in Santiago. Go with [2], rounding it to -4:42:46.
#
# - [1] says the 1918 transition was Sep 1, [2] says Sep 10 and cites
# Boletín Nº 22, Aviso Nº 129/1918 (1918-08-23). Go with [2].
# Boletín No. 22, Aviso No. 129/1918 (1918-08-23). Go with [2].
#
# - [1] does not give times for transitions; assume they occur
# at midnight mainland time, the current common practice. However,
@ -1556,7 +1552,7 @@ Rule Para 1997 only - Feb lastSun 0:00 0 -
# (1999-09) reports no date; go with above sources and Gerd Knops (2001-02-27).
Rule Para 1998 2001 - Mar Sun>=1 0:00 0 -
# From Rives McDow (2002-02-28):
# A decree was issued in Paraguay (no. 16350) on 2002-02-26 that changed the
# A decree was issued in Paraguay (No. 16350) on 2002-02-26 that changed the
# dst method to be from the first Sunday in September to the first Sunday in
# April.
Rule Para 2002 2004 - Apr Sun>=1 0:00 0 -
@ -1736,8 +1732,19 @@ Rule Uruguay 2005 only - Oct 9 2:00 1:00 S
Rule Uruguay 2006 only - Mar 12 2:00 0 -
# From Jesper Nørgaard Welen (2006-09-06):
# http://www.presidencia.gub.uy/_web/decretos/2006/09/CM%20210_08%2006%202006_00001.PDF
Rule Uruguay 2006 max - Oct Sun>=1 2:00 1:00 S
Rule Uruguay 2007 max - Mar Sun>=8 2:00 0 -
#
# From Steffen Thorsen (2015-06-30):
# ... it looks like they will not be using DST the coming summer:
# http://www.elobservador.com.uy/gobierno-resolvio-que-no-habra-cambio-horario-verano-n656787
# http://www.republica.com.uy/este-ano-no-se-modificara-el-huso-horario-en-uruguay/523760/
# From Paul Eggert (2015-06-30):
# Apparently restaurateurs complained that DST caused people to go to the beach
# instead of out to dinner.
# From Pablo Camargo (2015-07-13):
# http://archivo.presidencia.gub.uy/sci/decretos/2015/06/cons_min_201.pdf
# [dated 2015-06-29; repeals Decree 311/006 dated 2006-09-04]
Rule Uruguay 2006 2014 - Oct Sun>=1 2:00 1:00 S
Rule Uruguay 2007 2015 - Mar Sun>=8 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28
-3:44:44 - MMT 1920 May 1 # Montevideo MT
@ -1746,6 +1753,10 @@ Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28
# Venezuela
#
# From Paul Eggert (2015-07-28):
# For the 1965 transition see Gaceta Oficial No. 27.619 (1964-12-15), p 205.533
# http://www.pgr.gob.ve/dmdocuments/1964/27619.pdf
#
# From John Stainforth (2007-11-28):
# ... the change for Venezuela originally expected for 2007-12-31 has
# been brought forward to 2007-12-09. The official announcement was
@ -1757,6 +1768,6 @@ Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Caracas -4:27:44 - LMT 1890
-4:27:40 - CMT 1912 Feb 12 # Caracas Mean Time?
-4:30 - VET 1965 # Venezuela Time
-4:30 - VET 1965 Jan 1 0:00 # Venezuela T.
-4:00 - VET 2007 Dec 9 3:00
-4:30 - VET

@ -129,8 +129,8 @@ BW -2439+02555 Africa/Gaborone
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
CA +4734-05243 America/St_Johns Newfoundland Time, including SE Labrador
CA +4439-06336 America/Halifax Atlantic Time - Nova Scotia (most places), PEI
CA +4612-05957 America/Glace_Bay Atlantic Time - Nova Scotia - places that did not observe DST 1966-1971
CA +4439-06336 America/Halifax Atlantic Time - Nova Scotia (peninsula), PEI
CA +4612-05957 America/Glace_Bay Atlantic Time - Nova Scotia (Cape Breton)
CA +4606-06447 America/Moncton Atlantic Time - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic Time - Labrador - most locations
CA +5125-05707 America/Blanc-Sablon Atlantic Standard Time - Quebec - Lower North Shore

@ -32,3 +32,4 @@ $(eval $(call FillCacheFind, $(wildcard $(JDK_TOPDIR)/src/java.base/*/native \
include CoreLibraries.gmk
include NetworkingLibraries.gmk
include NioLibraries.gmk
include SecurityLibraries.gmk

@ -80,7 +80,6 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
-framework ApplicationServices \
-framework JavaNativeFoundation \
-framework JavaRuntimeSupport \
-framework Security \
-framework SystemConfiguration \
$(LDFLAGS_JDKLIB_SUFFIX), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libosx, \

@ -1,94 +0,0 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include LibCommon.gmk
################################################################################
BUILD_LIBHPROF_SRC := $(call FindSrcDirsForLib, jdk.hprof.agent, hprof)
BUILD_LIBHPROF_CFLAGS := $(addprefix -I, $(BUILD_LIBHPROF_SRC)) \
-I$(JDK_TOPDIR)/src/demo/share/jvmti/java_crw_demo
BUILD_LIBHPROF_LDFLAGS :=
LIBHPROF_OPTIMIZATION := HIGHEST
ifneq ($(findstring $(OPENJDK_TARGET_OS), solaris linux), )
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
LIBHPROF_OPTIMIZATION := LOW
endif
endif
$(eval $(call SetupNativeCompilation,BUILD_LIBHPROF, \
LIBRARY := hprof, \
OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \
SRC := $(BUILD_LIBHPROF_SRC), \
OPTIMIZATION := $(LIBHPROF_OPTIMIZATION), \
CFLAGS := $(CFLAGS_JDKLIB) \
$(BUILD_LIBHPROF_CFLAGS), \
CFLAGS_debug := -DHPROF_LOGGING, \
MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libhprof/mapfile-vers, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_windows := wsock32.lib winmm.lib advapi32.lib, \
LDFLAGS_SUFFIX_linux := $(LIBDL), \
LDFLAGS_SUFFIX_macosx := $(LIBDL), \
LDFLAGS_SUFFIX_solaris := -lsocket -lnsl $(LIBDL) -lc, \
VERSIONINFO_RESOURCE := $(GLOBAL_VERSION_INFO_RESOURCE), \
RC_FLAGS := $(RC_FLAGS) \
-D "JDK_FNAME=hprof.dll" \
-D "JDK_INTERNAL_NAME=hprof" \
-D "JDK_FTYPE=0x2L", \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libhprof_jvmti, \
DEBUG_SYMBOLS := true))
TARGETS += $(BUILD_LIBHPROF)
################################################################################
LIBJAVA_CRW_DEMO_SRC := $(JDK_TOPDIR)/src/demo/share/jvmti/java_crw_demo
$(eval $(call SetupNativeCompilation,BUILD_LIBJAVA_CRW_DEMO, \
LIBRARY := java_crw_demo, \
OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \
SRC := $(LIBJAVA_CRW_DEMO_SRC), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(addprefix -I, $(LIBJAVA_CRW_DEMO_SRC)), \
MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libjava_crw_demo/mapfile-vers, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_SUFFIX_solaris := -lc, \
VERSIONINFO_RESOURCE := $(GLOBAL_VERSION_INFO_RESOURCE), \
RC_FLAGS := $(RC_FLAGS) \
-D "JDK_FNAME=java_crw_demo.dll" \
-D "JDK_INTERNAL_NAME=java_crw_demo" \
-D "JDK_FTYPE=0x2L", \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava_crw_demo, \
DEBUG_SYMBOLS := true))
TARGETS += $(BUILD_LIBJAVA_CRW_DEMO)
################################################################################

@ -69,9 +69,6 @@ $(eval $(call SetupNativeCompilation,BUILD_LIBNIO, \
OPTIMIZATION := HIGH, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(BUILD_LIBNIO_CFLAGS), \
DISABLED_WARNINGS_gcc := type-limits, \
DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_microsoft := 4244 4996, \
MAPFILE := $(BUILD_LIBNIO_MAPFILE), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(BUILD_LIBNIO_LDFLAGS) \
$(call SET_SHARED_LIBRARY_ORIGIN), \

@ -0,0 +1,63 @@
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), macosx)
################################################################################
LIBOSXSECURITY_DIRS := $(JDK_TOPDIR)/src/java.base/macosx/native/libosxsecurity
LIBOSXSECURITY_CFLAGS := -I$(LIBOSXSECURITY_DIRS) \
$(LIBJAVA_HEADER_FLAGS) \
-I$(SUPPORT_OUTPUTDIR)/headers/java.base \
$(eval $(call SetupNativeCompilation,BUILD_LIBOSXSECURITY, \
LIBRARY := osxsecurity, \
OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \
SRC := $(LIBOSXSECURITY_DIRS), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(LIBOSXSECURITY_CFLAGS), \
DISABLED_WARNINGS_clang := deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
-L$(SUPPORT_OUTPUTDIR)/modules_libs/java.base \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_SUFFIX_macosx := \
-fobjc-link-runtime \
-framework JavaNativeFoundation \
-framework CoreServices \
-framework Security \
$(LDFLAGS_JDKLIB_SUFFIX), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libosxsecurity, \
DEBUG_SYMBOLS := $(DEBUG_ALL_BINARIES)))
$(BUILD_LIBOSXSECURITY): $(BUILD_LIBJAVA)
TARGETS += $(BUILD_LIBOSXSECURITY)
################################################################################
endif

@ -1,34 +0,0 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# Define public interface.
SUNWprivate_1.1 {
global:
Agent_OnLoad;
Agent_OnUnload;
local:
*;
};

@ -1,34 +0,0 @@
#
# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# Define public interface.
SUNWprivate_1.1 {
global:
java_crw_demo;
java_crw_demo_classname;
local:
*;
};

@ -19,7 +19,6 @@ java.xml.crypto
jdk.charsets
jdk.deploy
jdk.deploy.osx
jdk.hprof.agent
jdk.httpserver
jdk.jfr
jdk.management

@ -0,0 +1,50 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "jni.h"
#include "ProcessHandleImpl_unix.h"
#include <sys/procfs.h>
/*
* Implementation of native ProcessHandleImpl functions for AIX.
* See ProcessHandleImpl_unix.c for more details.
*/
void os_initNative(JNIEnv *env, jclass clazz) {}
jint os_getChildren(JNIEnv *env, jlong jpid, jlongArray jarray,
jlongArray jparentArray, jlongArray jstimesArray) {
return unix_getChildren(env, jpid, jarray, jparentArray, jstimesArray);
}
pid_t os_getParentPidAndTimings(JNIEnv *env, pid_t pid, jlong *total, jlong *start) {
return unix_getParentPidAndTimings(env, pid, total, start);
}
void os_getCmdlineAndUserInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
unix_getCmdlineAndUserInfo(env, jinfo, pid);
}

@ -0,0 +1,266 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "jni.h"
#include "jni_util.h"
#include "java_lang_ProcessHandleImpl.h"
#include "java_lang_ProcessHandleImpl_Info.h"
#include "ProcessHandleImpl_unix.h"
#include <fcntl.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <ctype.h>
/*
* Implementation of native ProcessHandleImpl functions for Linux.
* See ProcessHandleImpl_unix.c for more details.
*/
/* Signatures for internal OS specific functions. */
static long long getBoottime(JNIEnv *env);
/* A static offset in milliseconds since boot. */
static long long bootTime_ms;
static long clock_ticks_per_second;
static int pageSize;
void os_initNative(JNIEnv *env, jclass clazz) {
bootTime_ms = getBoottime(env);
clock_ticks_per_second = sysconf(_SC_CLK_TCK);
pageSize = sysconf(_SC_PAGESIZE);
}
jint os_getChildren(JNIEnv *env, jlong jpid, jlongArray jarray,
jlongArray jparentArray, jlongArray jstimesArray) {
return unix_getChildren(env, jpid, jarray, jparentArray, jstimesArray);
}
/**
* Read /proc/<pid>/stat and return the ppid, total cputime and start time.
* -1 is fail; >= 0 is parent pid
* 'total' will contain the running time of 'pid' in nanoseconds.
* 'start' will contain the start time of 'pid' in milliseconds since epoch.
*/
pid_t os_getParentPidAndTimings(JNIEnv *env, pid_t pid,
jlong *totalTime, jlong* startTime) {
FILE* fp;
char buffer[2048];
int statlen;
char fn[32];
char* s;
int parentPid;
long unsigned int utime = 0; // clock tics
long unsigned int stime = 0; // clock tics
long long unsigned int start = 0; // microseconds
/*
* Try to stat and then open /proc/%d/stat
*/
snprintf(fn, sizeof fn, "/proc/%d/stat", pid);
fp = fopen(fn, "r");
if (fp == NULL) {
return -1; // fail, no such /proc/pid/stat
}
/*
* The format is: pid (command) state ppid ...
* As the command could be anything we must find the right most
* ")" and then skip the white spaces that follow it.
*/
statlen = fread(buffer, 1, (sizeof buffer - 1), fp);
fclose(fp);
if (statlen < 0) {
return -1; // parent pid is not available
}
buffer[statlen] = '\0';
s = strchr(buffer, '(');
if (s == NULL) {
return -1; // parent pid is not available
}
// Found start of command, skip to end
s++;
s = strrchr(s, ')');
if (s == NULL) {
return -1; // parent pid is not available
}
s++;
// Scan the needed fields from status, retaining only ppid(4),
// utime (14), stime(15), starttime(22)
if (4 != sscanf(s, " %*c %d %*d %*d %*d %*d %*d %*u %*u %*u %*u %lu %lu %*d %*d %*d %*d %*d %*d %llu",
&parentPid, &utime, &stime, &start)) {
return 0; // not all values parsed; return error
}
*totalTime = (utime + stime) * (jlong)(1000000000 / clock_ticks_per_second);
*startTime = bootTime_ms + ((start * 1000) / clock_ticks_per_second);
return parentPid;
}
void os_getCmdlineAndUserInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
int fd;
int cmdlen = 0;
char *cmdline = NULL, *cmdEnd = NULL; // used for command line args and exe
char *args = NULL;
jstring cmdexe = NULL;
char fn[32];
struct stat stat_buf;
/*
* Try to open /proc/<pid>/cmdline
*/
snprintf(fn, sizeof fn, "/proc/%d/cmdline", pid);
if ((fd = open(fn, O_RDONLY)) < 0) {
return;
}
if (fstat(fd, &stat_buf) == 0) {
unix_getUserInfo(env, jinfo, stat_buf.st_uid);
}
do { // Block to break out of on errors
int i, truncated = 0;
int count;
char *s;
/*
* The path name read by readlink() is limited to PATH_MAX characters.
* The content of /proc/<pid>/cmdline is limited to PAGE_SIZE characters.
*/
cmdline = (char*)malloc((PATH_MAX > pageSize ? PATH_MAX : pageSize) + 1);
if (cmdline == NULL) {
break;
}
/*
* On Linux, the full path to the executable command is the link in
* /proc/<pid>/exe. But it is only readable for processes we own.
*/
snprintf(fn, sizeof fn, "/proc/%d/exe", pid);
if ((cmdlen = readlink(fn, cmdline, PATH_MAX)) > 0) {
// null terminate and create String to store for command
cmdline[cmdlen] = '\0';
cmdexe = JNU_NewStringPlatform(env, cmdline);
(*env)->ExceptionClear(env); // unconditionally clear any exception
}
/*
* The command-line arguments appear as a set of strings separated by
* null bytes ('\0'), with a further null byte after the last
* string. The last string is only null terminated if the whole command
* line is not exceeding (PAGE_SIZE - 1) characters.
*/
cmdlen = 0;
s = cmdline;
while ((count = read(fd, s, pageSize - cmdlen)) > 0) {
cmdlen += count;
s += count;
}
if (count < 0) {
break;
}
// We have to null-terminate because the process may have changed argv[]
// or because the content in /proc/<pid>/cmdline is truncated.
cmdline[cmdlen] = '\0';
if (cmdlen == pageSize && cmdline[pageSize - 1] != '\0') {
truncated = 1;
} else if (cmdlen == 0) {
// /proc/<pid>/cmdline was empty. This usually happens for kernel processes
// like '[kthreadd]'. We could try to read /proc/<pid>/comm in the future.
}
if (cmdlen > 0 && (cmdexe == NULL || truncated)) {
// We have no exact command or the arguments are truncated.
// In this case we save the command line from /proc/<pid>/cmdline.
args = (char*)malloc(pageSize + 1);
if (args != NULL) {
memcpy(args, cmdline, cmdlen + 1);
for (i = 0; i < cmdlen; i++) {
if (args[i] == '\0') {
args[i] = ' ';
}
}
}
}
i = 0;
if (!truncated) {
// Count the arguments
cmdEnd = &cmdline[cmdlen];
for (s = cmdline; *s != '\0' && (s < cmdEnd); i++) {
s += strnlen(s, (cmdEnd - s)) + 1;
}
}
unix_fillArgArray(env, jinfo, i, cmdline, cmdEnd, cmdexe, args);
} while (0);
if (cmdline != NULL) {
free(cmdline);
}
if (args != NULL) {
free(args);
}
if (fd >= 0) {
close(fd);
}
}
/**
* Read the boottime from /proc/stat.
*/
static long long getBoottime(JNIEnv *env) {
FILE *fp;
char *line = NULL;
size_t len = 0;
long long bootTime = 0;
fp = fopen("/proc/stat", "r");
if (fp == NULL) {
return -1;
}
while (getline(&line, &len, fp) != -1) {
if (sscanf(line, "btime %llu", &bootTime) == 1) {
break;
}
}
free(line);
if (fp != 0) {
fclose(fp);
}
return bootTime * 1000;
}

@ -106,7 +106,7 @@ public final class KeychainStore extends KeyStoreSpi {
AccessController.doPrivileged(
new PrivilegedAction<Void>() {
public Void run() {
System.loadLibrary("osx");
System.loadLibrary("osxsecurity");
return null;
}
});

@ -28,6 +28,8 @@
#include "java_lang_ProcessHandleImpl.h"
#include "java_lang_ProcessHandleImpl_Info.h"
#include "ProcessHandleImpl_unix.h"
#include <stdio.h>
#include <errno.h>
#include <signal.h>
@ -38,144 +40,15 @@
#include <sys/sysctl.h>
/**
* Implementations of ProcessHandleImpl functions for MAC OS X;
* are NOT common to all Unix variants.
* Implementation of native ProcessHandleImpl functions for MAC OS X.
* See ProcessHandleImpl_unix.c for more details.
*/
static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t pid);
static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid);
/*
* Common Unix function to lookup the uid and return the user name.
*/
extern jstring uidToUser(JNIEnv* env, uid_t uid);
/* Field id for jString 'command' in java.lang.ProcessHandle.Info */
static jfieldID ProcessHandleImpl_Info_commandID;
/* Field id for jString[] 'arguments' in java.lang.ProcessHandle.Info */
static jfieldID ProcessHandleImpl_Info_argumentsID;
/* Field id for jlong 'totalTime' in java.lang.ProcessHandle.Info */
static jfieldID ProcessHandleImpl_Info_totalTimeID;
/* Field id for jlong 'startTime' in java.lang.ProcessHandle.Info */
static jfieldID ProcessHandleImpl_Info_startTimeID;
/* Field id for jString 'user' in java.lang.ProcessHandleImpl.Info */
static jfieldID ProcessHandleImpl_Info_userID;
/* static value for clock ticks per second. */
static long clock_ticks_per_second;
/**************************************************************
* Static method to initialize field IDs and the ticks per second rate.
*
* Class: java_lang_ProcessHandleImpl_Info
* Method: initIDs
* Signature: ()V
*/
JNIEXPORT void JNICALL
Java_java_lang_ProcessHandleImpl_00024Info_initIDs(JNIEnv *env, jclass clazz) {
CHECK_NULL(ProcessHandleImpl_Info_commandID =
(*env)->GetFieldID(env, clazz, "command", "Ljava/lang/String;"));
CHECK_NULL(ProcessHandleImpl_Info_argumentsID =
(*env)->GetFieldID(env, clazz, "arguments", "[Ljava/lang/String;"));
CHECK_NULL(ProcessHandleImpl_Info_totalTimeID =
(*env)->GetFieldID(env, clazz, "totalTime", "J"));
CHECK_NULL(ProcessHandleImpl_Info_startTimeID =
(*env)->GetFieldID(env, clazz, "startTime", "J"));
CHECK_NULL(ProcessHandleImpl_Info_userID =
(*env)->GetFieldID(env, clazz, "user", "Ljava/lang/String;"));
}
/**************************************************************
* Static method to initialize the ticks per second rate.
*
* Class: java_lang_ProcessHandleImpl
* Method: initNative
* Signature: ()V
*/
JNIEXPORT void JNICALL
Java_java_lang_ProcessHandleImpl_initNative(JNIEnv *env, jclass clazz) {
clock_ticks_per_second = sysconf(_SC_CLK_TCK);
}
/*
* Check if a process is alive.
* Return the start time (ms since 1970) if it is available.
* If the start time is not available return 0.
* If the pid is invalid, return -1.
*
* Class: java_lang_ProcessHandleImpl
* Method: isAlive0
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL
Java_java_lang_ProcessHandleImpl_isAlive0(JNIEnv *env, jobject obj, jlong jpid) {
pid_t pid = (pid_t) jpid;
struct kinfo_proc kp;
size_t bufSize = sizeof kp;
// Read the process info for the specific pid
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) {
return (errno == EINVAL) ? -1 : 0;
} else {
return (bufSize == 0) ? -1 :
(jlong) (kp.kp_proc.p_starttime.tv_sec * 1000
+ kp.kp_proc.p_starttime.tv_usec / 1000);
}
}
/*
* Returns the parent pid of the requested pid.
*
* Class: java_lang_ProcessHandleImpl
* Method: parent0
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL
Java_java_lang_ProcessHandleImpl_parent0(JNIEnv *env,
jobject obj,
jlong jpid,
jlong startTime) {
pid_t pid = (pid_t) jpid;
pid_t ppid = -1;
if (pid == getpid()) {
ppid = getppid();
} else {
const pid_t pid = (pid_t) jpid;
struct kinfo_proc kp;
size_t bufSize = sizeof kp;
// Read the process info for the specific pid
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) {
JNU_ThrowByNameWithLastError(env,
"java/lang/RuntimeException", "sysctl failed");
return -1;
}
// If the buffer is full and for the pid requested then check the start
if (bufSize > 0 && kp.kp_proc.p_pid == pid) {
jlong start = (jlong) (kp.kp_proc.p_starttime.tv_sec * 1000
+ kp.kp_proc.p_starttime.tv_usec / 1000);
if (start == startTime || start == 0 || startTime == 0) {
ppid = kp.kp_eproc.e_ppid;
}
}
}
return (jlong) ppid;
}
void os_initNative(JNIEnv *env, jclass clazz) {}
/*
* Returns the children of the requested pid and optionally each parent.
*
* Class: java_lang_ProcessHandleImpl
* Method: getProcessPids0
* Signature: (J[J[J)I
*
* Use sysctl to accumulate any process whose parent pid is zero or matches.
* The resulting pids are stored into the array of longs.
* The number of pids is returned if they all fit.
@ -183,13 +56,8 @@ Java_java_lang_ProcessHandleImpl_parent0(JNIEnv *env,
* If the array is too short, excess pids are not stored and
* the desired length is returned.
*/
JNIEXPORT jint JNICALL
Java_java_lang_ProcessHandleImpl_getProcessPids0(JNIEnv *env,
jclass clazz,
jlong jpid,
jlongArray jarray,
jlongArray jparentArray,
jlongArray jstimesArray) {
jint os_getChildren(JNIEnv *env, jlong jpid, jlongArray jarray,
jlongArray jparentArray, jlongArray jstimesArray) {
jlong* pids = NULL;
jlong* ppids = NULL;
jlong* stimes = NULL;
@ -303,35 +171,17 @@ Java_java_lang_ProcessHandleImpl_getProcessPids0(JNIEnv *env,
return count;
}
/**************************************************************
* Implementation of ProcessHandleImpl_Info native methods.
*/
/*
* Fill in the Info object from the OS information about the process.
*
* Class: java_lang_ProcessHandleImpl
* Method: info0
* Signature: (J)I
*/
JNIEXPORT void JNICALL
Java_java_lang_ProcessHandleImpl_00024Info_info0(JNIEnv *env,
jobject jinfo,
jlong jpid) {
pid_t pid = (pid_t) jpid;
getStatInfo(env, jinfo, pid);
getCmdlineInfo(env, jinfo, pid);
}
/**
* Read /proc/<pid>/stat and fill in the fields of the Info object.
* The executable name, plus the user, system, and start times are gathered.
* Use sysctl and return the ppid, total cputime and start time.
* Return: -1 is fail; >= 0 is parent pid
* 'total' will contain the running time of 'pid' in nanoseconds.
* 'start' will contain the start time of 'pid' in milliseconds since epoch.
*/
static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t jpid) {
jlong totalTime; // nanoseconds
unsigned long long startTime; // milliseconds
pid_t os_getParentPidAndTimings(JNIEnv *env, pid_t jpid,
jlong *totalTime, jlong *startTime) {
const pid_t pid = (pid_t) jpid;
pid_t ppid = -1;
struct kinfo_proc kp;
size_t bufSize = sizeof kp;
@ -339,92 +189,70 @@ static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t jpid) {
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) {
if (errno == EINVAL) {
return;
} else {
JNU_ThrowByNameWithLastError(env,
"java/lang/RuntimeException", "sysctl failed");
}
return;
JNU_ThrowByNameWithLastError(env,
"java/lang/RuntimeException", "sysctl failed");
return -1;
}
if (bufSize > 0 && kp.kp_proc.p_pid == pid) {
*startTime = (jlong) (kp.kp_proc.p_starttime.tv_sec * 1000 +
kp.kp_proc.p_starttime.tv_usec / 1000);
ppid = kp.kp_eproc.e_ppid;
}
// Convert the UID to the username
jstring name = NULL;
CHECK_NULL((name = uidToUser(env, kp.kp_eproc.e_ucred.cr_uid)));
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_userID, name);
JNU_CHECK_EXCEPTION(env);
startTime = kp.kp_proc.p_starttime.tv_sec * 1000 +
kp.kp_proc.p_starttime.tv_usec / 1000;
(*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_startTimeID, startTime);
JNU_CHECK_EXCEPTION(env);
// Get cputime if for current process
if (pid == getpid()) {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) != 0) {
return;
if (getrusage(RUSAGE_SELF, &usage) == 0) {
jlong microsecs =
usage.ru_utime.tv_sec * 1000 * 1000 + usage.ru_utime.tv_usec +
usage.ru_stime.tv_sec * 1000 * 1000 + usage.ru_stime.tv_usec;
*totalTime = microsecs * 1000;
}
jlong microsecs =
usage.ru_utime.tv_sec * 1000 * 1000 + usage.ru_utime.tv_usec +
usage.ru_stime.tv_sec * 1000 * 1000 + usage.ru_stime.tv_usec;
totalTime = microsecs * 1000;
(*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_totalTimeID, totalTime);
JNU_CHECK_EXCEPTION(env);
}
return ppid;
}
/**
* Construct the argument array by parsing the arguments from the sequence of arguments.
* Return the uid of a process or -1 on error
*/
static int fillArgArray(JNIEnv *env, jobject jinfo, int nargs,
const char *cp, const char *argsEnd) {
jstring str = NULL;
jobject argsArray;
int i;
static uid_t getUID(pid_t pid) {
struct kinfo_proc kp;
size_t bufSize = sizeof kp;
if (nargs < 1) {
return 0;
}
// Create a String array for nargs-1 elements
CHECK_NULL_RETURN((argsArray = (*env)->NewObjectArray(env,
nargs - 1, JNU_ClassString(env), NULL)), -1);
// Read the process info for the specific pid
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
for (i = 0; i < nargs - 1; i++) {
// skip to the next argument; omits arg[0]
cp += strnlen(cp, (argsEnd - cp)) + 1;
if (cp > argsEnd || *cp == '\0') {
return -2; // Off the end pointer or an empty argument is an error
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) == 0) {
if (bufSize > 0 && kp.kp_proc.p_pid == pid) {
return kp.kp_eproc.e_ucred.cr_uid;
}
CHECK_NULL_RETURN((str = JNU_NewStringPlatform(env, cp)), -1);
(*env)->SetObjectArrayElement(env, argsArray, i, str);
JNU_CHECK_EXCEPTION_RETURN(env, -3);
}
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_argumentsID, argsArray);
JNU_CHECK_EXCEPTION_RETURN(env, -4);
return 0;
return (uid_t)-1;
}
/**
* Retrieve the command and arguments for the process and store them
* into the Info object.
*/
static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
void os_getCmdlineAndUserInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
int mib[3], maxargs, nargs, i;
size_t size;
char *args, *cp, *sp, *np;
// Get the UID first. This is done here because it is cheap to do it here
// on other platforms like Linux/Solaris/AIX where the uid comes from the
// same source like the command line info.
unix_getUserInfo(env, jinfo, getUID(pid));
// Get the maximum size of the arguments
mib[0] = CTL_KERN;
mib[1] = KERN_ARGMAX;
size = sizeof(maxargs);
if (sysctl(mib, 2, &maxargs, &size, NULL, 0) == -1) {
JNU_ThrowByNameWithLastError(env,
"java/lang/RuntimeException", "sysctl failed");
"java/lang/RuntimeException", "sysctl failed");
return;
}
@ -437,7 +265,7 @@ static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
do { // a block to break out of on error
char *argsEnd;
jstring str = NULL;
jstring cmdexe = NULL;
mib[0] = CTL_KERN;
mib[1] = KERN_PROCARGS2;
@ -445,7 +273,7 @@ static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
size = (size_t) maxargs;
if (sysctl(mib, 3, args, &size, NULL, 0) == -1) {
if (errno != EINVAL) {
JNU_ThrowByNameWithLastError(env,
JNU_ThrowByNameWithLastError(env,
"java/lang/RuntimeException", "sysctl failed");
}
break;
@ -456,11 +284,7 @@ static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
argsEnd = &args[size];
// Store the command executable path
if ((str = JNU_NewStringPlatform(env, cp)) == NULL) {
break;
}
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_commandID, str);
if ((*env)->ExceptionCheck(env)) {
if ((cmdexe = JNU_NewStringPlatform(env, cp)) == NULL) {
break;
}
@ -471,7 +295,7 @@ static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
}
}
fillArgArray(env, jinfo, nargs, cp, argsEnd);
unix_fillArgArray(env, jinfo, nargs, cp, argsEnd, cmdexe, NULL);
} while (0);
// Free the arg buffer
free(args);

@ -224,9 +224,35 @@ public interface ProcessHandle extends Comparable<ProcessHandle> {
*/
public Optional<String> command();
/**
* Returns the command line of the process.
* <p>
* If {@link #command command()} and {@link #arguments arguments()} return
* non-empty optionals, this is simply a convenience method which concatenates
* the values of the two functions separated by spaces. Otherwise it will return a
* best-effort, platform dependent representation of the command line.
*
* @apiNote Note that the returned executable pathname and the
* arguments may be truncated on some platforms due to system
* limitations.
* <p>
* The executable pathname may contain only the
* name of the executable without the full path information.
* It is undecideable whether white space separates different
* arguments or is part of a single argument.
*
* @return an {@code Optional<String>} of the command line
* of the process
*/
public Optional<String> commandLine();
/**
* Returns an array of Strings of the arguments of the process.
*
* @apiNote On some platforms, native applications are free to change
* the arguments array after startup and this method may only
* show the changed values.
*
* @return an {@code Optional<String[]>} of the arguments of the process
*/
public Optional<String[]> arguments();

@ -472,7 +472,7 @@ final class ProcessHandleImpl implements ProcessHandle {
/**
* Implementation of ProcessHandle.Info.
* Information snapshot about a process.
* The attributes of a process vary by operating system and not available
* The attributes of a process vary by operating system and are not available
* in all implementations. Additionally, information about other processes
* is limited by the operating system privileges of the process making the request.
* If a value is not available, either a {@code null} or {@code -1} is stored.
@ -496,6 +496,7 @@ final class ProcessHandleImpl implements ProcessHandle {
private native void info0(long pid);
String command;
String commandLine;
String[] arguments;
long startTime;
long totalTime;
@ -503,6 +504,7 @@ final class ProcessHandleImpl implements ProcessHandle {
Info() {
command = null;
commandLine = null;
arguments = null;
startTime = -1L;
totalTime = -1L;
@ -538,6 +540,15 @@ final class ProcessHandleImpl implements ProcessHandle {
return Optional.ofNullable(command);
}
@Override
public Optional<String> commandLine() {
if (command != null && arguments != null) {
return Optional.of(command + " " + String.join(" ", arguments));
} else {
return Optional.ofNullable(commandLine);
}
}
@Override
public Optional<String[]> arguments() {
return Optional.ofNullable(arguments);
@ -580,6 +591,11 @@ final class ProcessHandleImpl implements ProcessHandle {
sb.append("args: ");
sb.append(Arrays.toString(arguments));
}
if (commandLine != null) {
if (sb.length() != 0) sb.append(", ");
sb.append("cmdLine: ");
sb.append(commandLine);
}
if (startTime > 0) {
if (sb.length() != 0) sb.append(", ");
sb.append("startTime: ");

@ -86,10 +86,10 @@ class Shutdown {
* to be registered even if the shutdown is in progress.
* @params hook the hook to be registered
*
* @throw IllegalStateException
* if registerShutdownInProgress is false and shutdown is in progress; or
* if registerShutdownInProgress is true and the shutdown process
* already passes the given slot
* @throws IllegalStateException
* if registerShutdownInProgress is false and shutdown is in progress; or
* if registerShutdownInProgress is true and the shutdown process
* already passes the given slot
*/
static void add(int slot, boolean registerShutdownInProgress, Runnable hook) {
synchronized (lock) {

@ -65,10 +65,13 @@ public class ReferenceQueue<T> {
return false;
}
assert queue == this;
r.queue = ENQUEUED;
r.next = (head == null) ? r : head;
head = r;
queueLength++;
// Update r.queue *after* adding to list, to avoid race
// with concurrent enqueued checks and fast-path poll().
// Volatiles ensure ordering.
r.queue = ENQUEUED;
if (r instanceof FinalReference) {
sun.misc.VM.addFinalRefCount(1);
}
@ -80,10 +83,13 @@ public class ReferenceQueue<T> {
private Reference<? extends T> reallyPoll() { /* Must hold lock */
Reference<? extends T> r = head;
if (r != null) {
r.queue = NULL;
// Update r.queue *before* removing from list, to avoid
// race with concurrent enqueued checks and fast-path
// poll(). Volatiles ensure ordering.
@SuppressWarnings("unchecked")
Reference<? extends T> rn = r.next;
head = (rn == r) ? null : rn;
r.queue = NULL;
r.next = r;
queueLength--;
if (r instanceof FinalReference) {

@ -47,7 +47,7 @@ import java.io.IOException;
* If no content handler could be {@linkplain URLConnection#getContent() found},
* URLConnection will look for a content handler in a user-definable set of places.
* Users can define a vertical-bar delimited set of class prefixes
* to search through by defining the <i>{@value java.net.URLConnection#contentPathProp}</i>
* to search through by defining the <i>{@link java.net.URLConnection#contentPathProp}</i>
* property. The class name must be of the form:
* <blockquote>
* <i>{package-prefix}.{major}.{minor}</i>

Some files were not shown because too many files have changed in this diff Show More