6810672: Comment typos

I have collected some typos I have found while looking at the code.

Reviewed-by: kvn, never
This commit is contained in:
Christian Thalinger 2009-02-27 13:27:09 -08:00
parent 67a5668b16
commit 05d1de7727
120 changed files with 278 additions and 277 deletions

View File

@ -2465,7 +2465,7 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
// InterpreterRuntime::post_method_entry(); // InterpreterRuntime::post_method_entry();
// } // }
// if (DTraceMethodProbes) { // if (DTraceMethodProbes) {
// SharedRuntime::dtrace_method_entry(method, reciever); // SharedRuntime::dtrace_method_entry(method, receiver);
// } // }
void InterpreterMacroAssembler::notify_method_entry() { void InterpreterMacroAssembler::notify_method_entry() {

View File

@ -243,7 +243,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
// Regenerate the instruction sequence that performs the 64 bit // Regenerate the instruction sequence that performs the 64 bit
// sethi. This only does the sethi. The disp field (bottom 10 bits) // sethi. This only does the sethi. The disp field (bottom 10 bits)
// must be handled seperately. // must be handled separately.
static void set_data64_sethi(address instaddr, intptr_t x); static void set_data64_sethi(address instaddr, intptr_t x);
// combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st) // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)

View File

@ -189,7 +189,7 @@ reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
// double fp register numbers. FloatRegisterImpl in register_sparc.hpp // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
// wants 0-63, so we have to convert every time we want to use fp regs // wants 0-63, so we have to convert every time we want to use fp regs
// with the macroassembler, using reg_to_DoubleFloatRegister_object(). // with the macroassembler, using reg_to_DoubleFloatRegister_object().
// 255 is a flag meaning 'dont go here'. // 255 is a flag meaning "don't go here".
// I believe we can't handle callee-save doubles D32 and up until // I believe we can't handle callee-save doubles D32 and up until
// the place in the sparc stack crawler that asserts on the 255 is // the place in the sparc stack crawler that asserts on the 255 is
// fixed up. // fixed up.
@ -462,7 +462,7 @@ extern bool can_branch_register( Node *bol, Node *cmp );
// Macros to extract hi & lo halves from a long pair. // Macros to extract hi & lo halves from a long pair.
// G0 is not part of any long pair, so assert on that. // G0 is not part of any long pair, so assert on that.
// Prevents accidently using G1 instead of G0. // Prevents accidentally using G1 instead of G0.
#define LONG_HI_REG(x) (x) #define LONG_HI_REG(x) (x)
#define LONG_LO_REG(x) (x) #define LONG_LO_REG(x) (x)
@ -1431,7 +1431,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
#ifndef _LP64 #ifndef _LP64
// In the LP64 build, all registers can be moved as aligned/adjacent // In the LP64 build, all registers can be moved as aligned/adjacent
// pairs, so there's never any need to move the high bits seperately. // pairs, so there's never any need to move the high bits separately.
// The 32-bit builds have to deal with the 32-bit ABI which can force // The 32-bit builds have to deal with the 32-bit ABI which can force
// all sorts of silly alignment problems. // all sorts of silly alignment problems.
@ -1624,7 +1624,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Register temp_reg = G3; Register temp_reg = G3;
assert( G5_ic_reg != temp_reg, "conflicting registers" ); assert( G5_ic_reg != temp_reg, "conflicting registers" );
// Load klass from reciever // Load klass from receiver
__ load_klass(O0, temp_reg); __ load_klass(O0, temp_reg);
// Compare against expected klass // Compare against expected klass
__ cmp(temp_reg, G5_ic_reg); __ cmp(temp_reg, G5_ic_reg);
@ -4149,7 +4149,7 @@ operand cmpOp_commute() %{
//----------OPERAND CLASSES---------------------------------------------------- //----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used to simplify // Operand Classes are groups of operands that are used to simplify
// instruction definitions by not requiring the AD writer to specify seperate // instruction definitions by not requiring the AD writer to specify separate
// instructions for every form of operand when the instruction accepts // instructions for every form of operand when the instruction accepts
// multiple operand types with the same basic encoding and format. The classic // multiple operand types with the same basic encoding and format. The classic
// case of this is memory operands. // case of this is memory operands.
@ -6847,7 +6847,7 @@ instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
ins_pipe(sdiv_reg_reg); ins_pipe(sdiv_reg_reg);
%} %}
// Magic constant, reciprical of 10 // Magic constant, reciprocal of 10
instruct loadConI_x66666667(iRegIsafe dst) %{ instruct loadConI_x66666667(iRegIsafe dst) %{
effect( DEF dst ); effect( DEF dst );
@ -6857,7 +6857,7 @@ instruct loadConI_x66666667(iRegIsafe dst) %{
ins_pipe(ialu_hi_lo_reg); ins_pipe(ialu_hi_lo_reg);
%} %}
// Register Shift Right Arithmatic Long by 32-63 // Register Shift Right Arithmetic Long by 32-63
instruct sra_31( iRegI dst, iRegI src ) %{ instruct sra_31( iRegI dst, iRegI src ) %{
effect( DEF dst, USE src ); effect( DEF dst, USE src );
format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
@ -9048,7 +9048,7 @@ instruct storeL_reversed(memory dst, iRegL src) %{
// These must follow all instruction definitions as they use the names // These must follow all instruction definitions as they use the names
// defined in the instructions definitions. // defined in the instructions definitions.
// //
// peepmatch ( root_instr_name [preceeding_instruction]* ); // peepmatch ( root_instr_name [preceding_instruction]* );
// //
// peepconstraint %{ // peepconstraint %{
// (instruction_number.operand_name relational_op instruction_number.operand_name // (instruction_number.operand_name relational_op instruction_number.operand_name

View File

@ -1545,7 +1545,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Handle all the JSR stuff here, then exit. // Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the // It's much shorter and cleaner than intermingling with the
// non-JSR normal-branch stuff occuring below. // non-JSR normal-branch stuff occurring below.
if( is_jsr ) { if( is_jsr ) {
// compute return address as bci in Otos_i // compute return address as bci in Otos_i
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
@ -3079,7 +3079,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Label ok; Label ok;
// Check that entry is non-null. Null entries are probably a bytecode // Check that entry is non-null. Null entries are probably a bytecode
// problem. If the interface isn't implemented by the reciever class, // problem. If the interface isn't implemented by the receiver class,
// the VM should throw IncompatibleClassChangeError. linkResolver checks // the VM should throw IncompatibleClassChangeError. linkResolver checks
// this too but that's only if the entry isn't already resolved, so we // this too but that's only if the entry isn't already resolved, so we
// need to check again. // need to check again.

View File

@ -501,7 +501,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
LIRItem right(x->y(), this); LIRItem right(x->y(), this);
left.load_item(); left.load_item();
// dont load constants to save register // don't load constants to save register
right.load_nonconstant(); right.load_nonconstant();
rlock_result(x); rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);

View File

@ -523,7 +523,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
#ifdef _LP64 #ifdef _LP64
// Make sure stack is properly aligned and sized for the abi // Make sure stack is properly aligned and sized for the abi
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
#endif // _LP64 #endif // _LP64
@ -970,7 +970,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
#ifdef _LP64 #ifdef _LP64
// duplicate the alignment rsp got after setting stack_base // duplicate the alignment rsp got after setting stack_base
__ subptr(rax, frame::arg_reg_save_area_bytes); // windows __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
__ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
#endif // _LP64 #endif // _LP64
__ cmpptr(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
@ -1067,7 +1067,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
#ifdef _LP64 #ifdef _LP64
__ subptr(rsp, t); __ subptr(rsp, t);
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
#else #else
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t); __ subptr(rsp, t);

View File

@ -1350,7 +1350,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
{ {
Label L; Label L;
__ mov(rax, rsp); __ mov(rax, rsp);
__ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
__ cmpptr(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("improperly aligned stack"); __ stop("improperly aligned stack");

View File

@ -826,7 +826,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ subptr(rsp, t); __ subptr(rsp, t);
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
// get signature handler // get signature handler
{ {

View File

@ -1586,7 +1586,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Handle all the JSR stuff here, then exit. // Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the // It's much shorter and cleaner than intermingling with the
// non-JSR normal-branch stuff occuring below. // non-JSR normal-branch stuff occurring below.
if (is_jsr) { if (is_jsr) {
// Pre-load the next target bytecode into EBX // Pre-load the next target bytecode into EBX
__ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0)); __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));

View File

@ -1559,7 +1559,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Handle all the JSR stuff here, then exit. // Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the non-JSR // It's much shorter and cleaner than intermingling with the non-JSR
// normal-branch stuff occuring below. // normal-branch stuff occurring below.
if (is_jsr) { if (is_jsr) {
// Pre-load the next target bytecode into rbx // Pre-load the next target bytecode into rbx
__ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));

View File

@ -130,7 +130,7 @@ reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
// allocation. Highest priority is first. A useful heuristic is to // allocation. Highest priority is first. A useful heuristic is to
// give registers a low priority when they are required by machine // give registers a low priority when they are required by machine
// instructions, like EAX and EDX. Registers which are used as // instructions, like EAX and EDX. Registers which are used as
// pairs must fall on an even boundry (witness the FPR#L's in this list). // pairs must fall on an even boundary (witness the FPR#L's in this list).
// For the Intel integer registers, the equivalent Long pairs are // For the Intel integer registers, the equivalent Long pairs are
// EDX:EAX, EBX:ECX, and EDI:EBP. // EDX:EAX, EBX:ECX, and EDI:EBP.
alloc_class chunk0( ECX, EBX, EBP, EDI, EAX, EDX, ESI, ESP, alloc_class chunk0( ECX, EBX, EBP, EDI, EAX, EDX, ESI, ESP,
@ -5857,7 +5857,7 @@ operand cmpOp_commute() %{
//----------OPERAND CLASSES---------------------------------------------------- //----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify // Operand Classes are groups of operands that are used as to simplify
// instruction definitions by not requiring the AD writer to specify seperate // instruction definitions by not requiring the AD writer to specify separate
// instructions for every form of operand when the instruction accepts // instructions for every form of operand when the instruction accepts
// multiple operand types with the same basic encoding and format. The classic // multiple operand types with the same basic encoding and format. The classic
// case of this is memory operands. // case of this is memory operands.
@ -13220,7 +13220,7 @@ instruct safePoint_poll(eFlagsReg cr) %{
// These must follow all instruction definitions as they use the names // These must follow all instruction definitions as they use the names
// defined in the instructions definitions. // defined in the instructions definitions.
// //
// peepmatch ( root_instr_name [preceeding_instruction]* ); // peepmatch ( root_instr_name [preceding_instruction]* );
// //
// peepconstraint %{ // peepconstraint %{
// (instruction_number.operand_name relational_op instruction_number.operand_name // (instruction_number.operand_name relational_op instruction_number.operand_name

View File

@ -5483,7 +5483,7 @@ operand cmpOpUCF2() %{
//----------OPERAND CLASSES---------------------------------------------------- //----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify // Operand Classes are groups of operands that are used as to simplify
// instruction definitions by not requiring the AD writer to specify seperate // instruction definitions by not requiring the AD writer to specify separate
// instructions for every form of operand when the instruction accepts // instructions for every form of operand when the instruction accepts
// multiple operand types with the same basic encoding and format. The classic // multiple operand types with the same basic encoding and format. The classic
// case of this is memory operands. // case of this is memory operands.
@ -8363,7 +8363,7 @@ instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
//----------- DivL-By-Constant-Expansions-------------------------------------- //----------- DivL-By-Constant-Expansions--------------------------------------
// DivI cases are handled by the compiler // DivI cases are handled by the compiler
// Magic constant, reciprical of 10 // Magic constant, reciprocal of 10
instruct loadConL_0x6666666666666667(rRegL dst) instruct loadConL_0x6666666666666667(rRegL dst)
%{ %{
effect(DEF dst); effect(DEF dst);
@ -12082,7 +12082,7 @@ instruct RethrowException()
// These must follow all instruction definitions as they use the names // These must follow all instruction definitions as they use the names
// defined in the instructions definitions. // defined in the instructions definitions.
// //
// peepmatch ( root_instr_name [precerding_instruction]* ); // peepmatch ( root_instr_name [preceding_instruction]* );
// //
// peepconstraint %{ // peepconstraint %{
// (instruction_number.operand_name relational_op instruction_number.operand_name // (instruction_number.operand_name relational_op instruction_number.operand_name

View File

@ -419,7 +419,7 @@ main(int argc, char ** argv)
goto leave; goto leave;
} }
mainClass = LoadClass(env, classname); mainClass = LoadClass(env, classname);
if(mainClass == NULL) { /* exception occured */ if(mainClass == NULL) { /* exception occurred */
ReportExceptionDescription(env); ReportExceptionDescription(env);
message = "Could not find the main class. Program will exit."; message = "Could not find the main class. Program will exit.";
goto leave; goto leave;
@ -441,7 +441,7 @@ main(int argc, char ** argv)
goto leave; goto leave;
} }
mainClass = LoadClass(env, classname); mainClass = LoadClass(env, classname);
if(mainClass == NULL) { /* exception occured */ if(mainClass == NULL) { /* exception occurred */
ReportExceptionDescription(env); ReportExceptionDescription(env);
message = "Could not find the main class. Program will exit."; message = "Could not find the main class. Program will exit.";
goto leave; goto leave;

View File

@ -47,7 +47,7 @@
#ifdef JAVA_ARGS #ifdef JAVA_ARGS
/* /*
* ApplicationHome is prepended to each of these entries; the resulting * ApplicationHome is prepended to each of these entries; the resulting
* strings are concatenated (seperated by PATH_SEPARATOR) and used as the * strings are concatenated (separated by PATH_SEPARATOR) and used as the
* value of -cp option to the launcher. * value of -cp option to the launcher.
*/ */
#ifndef APP_CLASSPATH #ifndef APP_CLASSPATH

View File

@ -192,7 +192,7 @@ static pid_t filename_to_pid(const char* filename) {
// check if the given path is considered a secure directory for // check if the given path is considered a secure directory for
// the backing store files. Returns true if the directory exists // the backing store files. Returns true if the directory exists
// and is considered a secure location. Returns false if the path // and is considered a secure location. Returns false if the path
// is a symbolic link or if an error occured. // is a symbolic link or if an error occurred.
// //
static bool is_directory_secure(const char* path) { static bool is_directory_secure(const char* path) {
struct stat statbuf; struct stat statbuf;

View File

@ -419,7 +419,7 @@ main(int argc, char ** argv)
goto leave; goto leave;
} }
mainClass = LoadClass(env, classname); mainClass = LoadClass(env, classname);
if(mainClass == NULL) { /* exception occured */ if(mainClass == NULL) { /* exception occurred */
ReportExceptionDescription(env); ReportExceptionDescription(env);
message = "Could not find the main class. Program will exit."; message = "Could not find the main class. Program will exit.";
goto leave; goto leave;
@ -441,7 +441,7 @@ main(int argc, char ** argv)
goto leave; goto leave;
} }
mainClass = LoadClass(env, classname); mainClass = LoadClass(env, classname);
if(mainClass == NULL) { /* exception occured */ if(mainClass == NULL) { /* exception occurred */
ReportExceptionDescription(env); ReportExceptionDescription(env);
message = "Could not find the main class. Program will exit."; message = "Could not find the main class. Program will exit.";
goto leave; goto leave;

View File

@ -47,7 +47,7 @@
#ifdef JAVA_ARGS #ifdef JAVA_ARGS
/* /*
* ApplicationHome is prepended to each of these entries; the resulting * ApplicationHome is prepended to each of these entries; the resulting
* strings are concatenated (seperated by PATH_SEPARATOR) and used as the * strings are concatenated (separated by PATH_SEPARATOR) and used as the
* value of -cp option to the launcher. * value of -cp option to the launcher.
*/ */
#ifndef APP_CLASSPATH #ifndef APP_CLASSPATH

View File

@ -194,7 +194,7 @@ static pid_t filename_to_pid(const char* filename) {
// check if the given path is considered a secure directory for // check if the given path is considered a secure directory for
// the backing store files. Returns true if the directory exists // the backing store files. Returns true if the directory exists
// and is considered a secure location. Returns false if the path // and is considered a secure location. Returns false if the path
// is a symbolic link or if an error occured. // is a symbolic link or if an error occurred.
// //
static bool is_directory_secure(const char* path) { static bool is_directory_secure(const char* path) {
struct stat statbuf; struct stat statbuf;

View File

@ -195,7 +195,7 @@ static int filename_to_pid(const char* filename) {
// check if the given path is considered a secure directory for // check if the given path is considered a secure directory for
// the backing store files. Returns true if the directory exists // the backing store files. Returns true if the directory exists
// and is considered a secure location. Returns false if the path // and is considered a secure location. Returns false if the path
// is a symbolic link or if an error occured. // is a symbolic link or if an error occurred.
// //
static bool is_directory_secure(const char* path) { static bool is_directory_secure(const char* path) {
@ -994,7 +994,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
return false; return false;
} }
// if running on windows 2000 or later, set the automatic inheritence // if running on windows 2000 or later, set the automatic inheritance
// control flags. // control flags.
SetSecurityDescriptorControlFnPtr _SetSecurityDescriptorControl; SetSecurityDescriptorControlFnPtr _SetSecurityDescriptorControl;
_SetSecurityDescriptorControl = (SetSecurityDescriptorControlFnPtr) _SetSecurityDescriptorControl = (SetSecurityDescriptorControlFnPtr)
@ -1002,7 +1002,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
"SetSecurityDescriptorControl"); "SetSecurityDescriptorControl");
if (_SetSecurityDescriptorControl != NULL) { if (_SetSecurityDescriptorControl != NULL) {
// We do not want to further propogate inherited DACLs, so making them // We do not want to further propagate inherited DACLs, so making them
// protected prevents that. // protected prevents that.
if (!_SetSecurityDescriptorControl(pSD, SE_DACL_PROTECTED, if (!_SetSecurityDescriptorControl(pSD, SE_DACL_PROTECTED,
SE_DACL_PROTECTED)) { SE_DACL_PROTECTED)) {

View File

@ -532,7 +532,7 @@ int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_
if (oldAct.sa_sigaction != signalHandler) { if (oldAct.sa_sigaction != signalHandler) {
void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler); : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
warning("Unexpected Signal %d occured under user-defined signal handler " INTPTR_FORMAT, sig, (intptr_t)sighand); warning("Unexpected Signal %d occurred under user-defined signal handler " INTPTR_FORMAT, sig, (intptr_t)sighand);
} }
} }

View File

@ -694,7 +694,7 @@ int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_
if (oldAct.sa_sigaction != signalHandler) { if (oldAct.sa_sigaction != signalHandler) {
void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler); : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
warning("Unexpected Signal %d occured under user-defined signal handler %#lx", sig, (long)sighand); warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand);
} }
} }

View File

@ -365,7 +365,7 @@ public class Database {
// HACK ALERT. The compilation of ad_<arch> files is very slow. // HACK ALERT. The compilation of ad_<arch> files is very slow.
// We want to start compiling them as early as possible. The compilation // We want to start compiling them as early as possible. The compilation
// order on unix is dependant on the order we emit files here. // order on unix is dependent on the order we emit files here.
// By sorting the output before emitting it, we expect // By sorting the output before emitting it, we expect
// that ad_<arch> will be compiled early. // that ad_<arch> will be compiled early.
boolean shouldSortObjFiles = true; boolean shouldSortObjFiles = true;

View File

@ -88,7 +88,7 @@ reg_class X_REG(AX, BX); // form a matcher register class of X_REG
// these are used for constraints, etc. // these are used for constraints, etc.
alloc_class class1(AX, BX); // form an allocation class of registers alloc_class class1(AX, BX); // form an allocation class of registers
// used by the register allocator for seperate // used by the register allocator for separate
// allocation of target register classes // allocation of target register classes
3. Pipeline Syntax for Scheduling 3. Pipeline Syntax for Scheduling
@ -150,7 +150,7 @@ D. Delimiters
b. %} (block terminator) b. %} (block terminator)
c. EOF (file terminator) c. EOF (file terminator)
4. Each statement must start on a seperate line 4. Each statement must start on a separate line
5. Identifiers cannot contain: (){}%;,"/\ 5. Identifiers cannot contain: (){}%;,"/\

View File

@ -4555,7 +4555,7 @@ void ADLParser::parse_err(int flag, const char *fmt, ...) {
//---------------------------ensure_start_of_line------------------------------ //---------------------------ensure_start_of_line------------------------------
// A preprocessor directive has been encountered. Be sure it has fallen at // A preprocessor directive has been encountered. Be sure it has fallen at
// the begining of a line, or else report an error. // the beginning of a line, or else report an error.
void ADLParser::ensure_start_of_line(void) { void ADLParser::ensure_start_of_line(void) {
if (_curchar == '\n') { next_line(); return; } if (_curchar == '\n') { next_line(); return; }
assert( _ptr >= _curline && _ptr < _curline+strlen(_curline), assert( _ptr >= _curline && _ptr < _curline+strlen(_curline),

View File

@ -275,7 +275,7 @@ void Dict::print(PrintKeyOrValue print_key, PrintKeyOrValue print_value) {
// Convert string to hash key. This algorithm implements a universal hash // Convert string to hash key. This algorithm implements a universal hash
// function with the multipliers frozen (ok, so it's not universal). The // function with the multipliers frozen (ok, so it's not universal). The
// multipliers (and allowable characters) are all odd, so the resultant sum // multipliers (and allowable characters) are all odd, so the resultant sum
// is odd - guarenteed not divisible by any power of two, so the hash tables // is odd - guaranteed not divisible by any power of two, so the hash tables
// can be any power of two with good results. Also, I choose multipliers // can be any power of two with good results. Also, I choose multipliers
// that have only 2 bits set (the low is always set to be odd) so // that have only 2 bits set (the low is always set to be odd) so
// multiplication requires only shifts and adds. Characters are required to // multiplication requires only shifts and adds. Characters are required to
@ -296,7 +296,7 @@ int hashstr(const void *t) {
} }
//------------------------------hashptr-------------------------------------- //------------------------------hashptr--------------------------------------
// Slimey cheap hash function; no guarenteed performance. Better than the // Slimey cheap hash function; no guaranteed performance. Better than the
// default for pointers, especially on MS-DOS machines. // default for pointers, especially on MS-DOS machines.
int hashptr(const void *key) { int hashptr(const void *key) {
#ifdef __TURBOC__ #ifdef __TURBOC__
@ -306,7 +306,7 @@ int hashptr(const void *key) {
#endif #endif
} }
// Slimey cheap hash function; no guarenteed performance. // Slimey cheap hash function; no guaranteed performance.
int hashkey(const void *key) { int hashkey(const void *key) {
return (int)((intptr_t)key); return (int)((intptr_t)key);
} }

View File

@ -89,10 +89,10 @@ class Dict { // Dictionary structure
// Hashing functions // Hashing functions
int hashstr(const void *s); // Nice string hash int hashstr(const void *s); // Nice string hash
// Slimey cheap hash function; no guarenteed performance. Better than the // Slimey cheap hash function; no guaranteed performance. Better than the
// default for pointers, especially on MS-DOS machines. // default for pointers, especially on MS-DOS machines.
int hashptr(const void *key); int hashptr(const void *key);
// Slimey cheap hash function; no guarenteed performance. // Slimey cheap hash function; no guaranteed performance.
int hashkey(const void *key); int hashkey(const void *key);
// Key comparators // Key comparators

View File

@ -50,10 +50,10 @@ FileBuff::FileBuff( BufferedFile *fptr, ArchDesc& archDesc) : _fp(fptr), _AD(arc
file_error(SEMERR, 0, "Buffer allocation failed\n"); file_error(SEMERR, 0, "Buffer allocation failed\n");
exit(1); // Exit on allocation failure exit(1); // Exit on allocation failure
} }
*_bigbuf = '\n'; // Lead with a sentinal newline *_bigbuf = '\n'; // Lead with a sentinel newline
_buf = _bigbuf+1; // Skip sentinal _buf = _bigbuf+1; // Skip sentinel
_bufmax = _buf; // Buffer is empty _bufmax = _buf; // Buffer is empty
_bufeol = _bigbuf; // _bufeol points at sentinal _bufeol = _bigbuf; // _bufeol points at sentinel
_filepos = -1; // filepos is in sync with _bufeol _filepos = -1; // filepos is in sync with _bufeol
_bufoff = _offset = 0L; // Offset at file start _bufoff = _offset = 0L; // Offset at file start
@ -62,8 +62,8 @@ FileBuff::FileBuff( BufferedFile *fptr, ArchDesc& archDesc) : _fp(fptr), _AD(arc
file_error(SEMERR, 0, "File read error, no input read\n"); file_error(SEMERR, 0, "File read error, no input read\n");
exit(1); // Exit on read error exit(1); // Exit on read error
} }
*_bufmax = '\n'; // End with a sentinal new-line *_bufmax = '\n'; // End with a sentinel new-line
*(_bufmax+1) = '\0'; // Then end with a sentinal NULL *(_bufmax+1) = '\0'; // Then end with a sentinel NULL
} }
//------------------------------~FileBuff-------------------------------------- //------------------------------~FileBuff--------------------------------------
@ -81,7 +81,7 @@ char *FileBuff::get_line(void) {
_linenum++; _linenum++;
retval = ++_bufeol; // return character following end of previous line retval = ++_bufeol; // return character following end of previous line
if (*retval == '\0') return NULL; // Check for EOF sentinal if (*retval == '\0') return NULL; // Check for EOF sentinel
// Search for newline character which must end each line // Search for newline character which must end each line
for(_filepos++; *_bufeol != '\n'; _bufeol++) for(_filepos++; *_bufeol != '\n'; _bufeol++)
_filepos++; // keep filepos in sync with _bufeol _filepos++; // keep filepos in sync with _bufeol

View File

@ -37,7 +37,7 @@ class ArchDesc;
//------------------------------FileBuff-------------------------------------- //------------------------------FileBuff--------------------------------------
// This class defines a nicely behaved buffer of text. Entire file of text // This class defines a nicely behaved buffer of text. Entire file of text
// is read into buffer at creation, with sentinals at start and end. // is read into buffer at creation, with sentinels at start and end.
class FileBuff { class FileBuff {
friend class FileBuffRegion; friend class FileBuffRegion;
private: private:
@ -46,8 +46,8 @@ class FileBuff {
long _bufoff; // Start of buffer file offset long _bufoff; // Start of buffer file offset
char *_buf; // The buffer itself. char *_buf; // The buffer itself.
char *_bigbuf; // The buffer plus sentinals; actual heap area char *_bigbuf; // The buffer plus sentinels; actual heap area
char *_bufmax; // A pointer to the buffer end sentinal char *_bufmax; // A pointer to the buffer end sentinel
char *_bufeol; // A pointer to the last complete line end char *_bufeol; // A pointer to the last complete line end
int _err; // Error flag for file seek/read operations int _err; // Error flag for file seek/read operations

View File

@ -1281,7 +1281,7 @@ void InstructForm::set_unique_opnds() {
_num_uniq = num_uniq; _num_uniq = num_uniq;
} }
// Generate index values needed for determing the operand position // Generate index values needed for determining the operand position
void InstructForm::index_temps(FILE *fp, FormDict &globals, const char *prefix, const char *receiver) { void InstructForm::index_temps(FILE *fp, FormDict &globals, const char *prefix, const char *receiver) {
uint idx = 0; // position of operand in match rule uint idx = 0; // position of operand in match rule
int cur_num_opnds = num_opnds(); int cur_num_opnds = num_opnds();
@ -2197,7 +2197,7 @@ int OperandForm::operand_position(const char *name, int usedef) {
// Return zero-based position in component list, only counting constants; // Return zero-based position in component list, only counting constants;
// Return -1 if not in list. // Return -1 if not in list.
int OperandForm::constant_position(FormDict &globals, const Component *last) { int OperandForm::constant_position(FormDict &globals, const Component *last) {
// Iterate through components and count constants preceeding 'constant' // Iterate through components and count constants preceding 'constant'
int position = 0; int position = 0;
Component *comp; Component *comp;
_components.reset(); _components.reset();
@ -2235,7 +2235,7 @@ int OperandForm::constant_position(FormDict &globals, const char *name) {
// Return zero-based position in component list, only counting constants; // Return zero-based position in component list, only counting constants;
// Return -1 if not in list. // Return -1 if not in list.
int OperandForm::register_position(FormDict &globals, const char *reg_name) { int OperandForm::register_position(FormDict &globals, const char *reg_name) {
// Iterate through components and count registers preceeding 'last' // Iterate through components and count registers preceding 'last'
uint position = 0; uint position = 0;
Component *comp; Component *comp;
_components.reset(); _components.reset();

View File

@ -277,7 +277,7 @@ public:
// //
// Generate the format call for the replacement variable // Generate the format call for the replacement variable
void rep_var_format(FILE *fp, const char *rep_var); void rep_var_format(FILE *fp, const char *rep_var);
// Generate index values needed for determing the operand position // Generate index values needed for determining the operand position
void index_temps (FILE *fp, FormDict &globals, const char *prefix = "", const char *receiver = ""); void index_temps (FILE *fp, FormDict &globals, const char *prefix = "", const char *receiver = "");
// --------------------------- // ---------------------------
@ -344,7 +344,7 @@ public:
// --------------------------- Code Block // --------------------------- Code Block
// Add code // Add code
void add_code(const char *string_preceeding_replacement_var); void add_code(const char *string_preceding_replacement_var);
// Add a replacement variable or one of its subfields // Add a replacement variable or one of its subfields
// Subfields are stored with a leading '$' // Subfields are stored with a leading '$'
void add_rep_var(char *replacement_var); void add_rep_var(char *replacement_var);

View File

@ -574,7 +574,7 @@ void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &inst, bool for_c
// Generate the user-defined portion of the format // Generate the user-defined portion of the format
if( inst._format ) { if( inst._format ) {
// If there are replacement variables, // If there are replacement variables,
// Generate index values needed for determing the operand position // Generate index values needed for determining the operand position
if( inst._format->_rep_vars.count() ) if( inst._format->_rep_vars.count() )
inst.index_temps(fp, globals); inst.index_temps(fp, globals);

View File

@ -31,7 +31,7 @@
// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
// the assembler keeps a copy of the code buffers boundaries & modifies them when // the assembler keeps a copy of the code buffers boundaries & modifies them when
// emitting bytes rather than using the code buffers accessor functions all the time. // emitting bytes rather than using the code buffers accessor functions all the time.
// The code buffer is updated via set_code_end(...) after emiting a whole instruction. // The code buffer is updated via set_code_end(...) after emitting a whole instruction.
AbstractAssembler::AbstractAssembler(CodeBuffer* code) { AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
if (code == NULL) return; if (code == NULL) return;

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains platform-independant assembler declarations. // This file contains platform-independent assembler declarations.
class CodeBuffer; class CodeBuffer;
class MacroAssembler; class MacroAssembler;

View File

@ -541,7 +541,7 @@ void ciTypeFlow::StateVector::do_aaload(ciBytecodeStream* str) {
// is report a value that will meet correctly with any downstream // is report a value that will meet correctly with any downstream
// reference types on paths that will truly be executed. This null type // reference types on paths that will truly be executed. This null type
// meets with any reference type to yield that same reference type. // meets with any reference type to yield that same reference type.
// (The compiler will generate an unconditonal exception here.) // (The compiler will generate an unconditional exception here.)
push(null_type()); push(null_type());
return; return;
} }

View File

@ -156,7 +156,7 @@ symbolOop SymbolTable::basic_add(int index, u1 *name, int len,
symbolOop test = lookup(index, (char*)name, len, hashValue); symbolOop test = lookup(index, (char*)name, len, hashValue);
if (test != NULL) { if (test != NULL) {
// A race occured and another thread introduced the symbol, this one // A race occurred and another thread introduced the symbol, this one
// will be dropped and collected. // will be dropped and collected.
return test; return test;
} }
@ -193,7 +193,7 @@ bool SymbolTable::basic_add(constantPoolHandle cp, int names_count,
int index = hash_to_index(hashValues[i]); int index = hash_to_index(hashValues[i]);
symbolOop test = lookup(index, names[i], lengths[i], hashValues[i]); symbolOop test = lookup(index, names[i], lengths[i], hashValues[i]);
if (test != NULL) { if (test != NULL) {
// A race occured and another thread introduced the symbol, this one // A race occurred and another thread introduced the symbol, this one
// will be dropped and collected. Use test instead. // will be dropped and collected. Use test instead.
cp->symbol_at_put(cp_indices[i], test); cp->symbol_at_put(cp_indices[i], test);
} else { } else {

View File

@ -380,7 +380,7 @@ address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
// There are potential race conditions during exception cache updates, so we // There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because // must own the ExceptionCache_lock before doing ANY modifications. Because
// we dont lock during reads, it is possible to have several threads attempt // we don't lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted // to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it. // copies of the current data before adding it.

View File

@ -167,7 +167,7 @@ class nmethod : public CodeBlob {
nmFlags flags; // various flags to keep track of nmethod state nmFlags flags; // various flags to keep track of nmethod state
bool _markedForDeoptimization; // Used for stack deoptimization bool _markedForDeoptimization; // Used for stack deoptimization
enum { alive = 0, enum { alive = 0,
not_entrant = 1, // uncommon trap has happend but activations may still exist not_entrant = 1, // uncommon trap has happened but activations may still exist
zombie = 2, zombie = 2,
unloaded = 3 }; unloaded = 3 };

View File

@ -393,7 +393,7 @@ class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// Restarts the concurrent phases timer. // Restarts the concurrent phases timer.
void concurrent_phases_resume(); void concurrent_phases_resume();
// Time begining and end of the marking phase for // Time beginning and end of the marking phase for
// a synchronous MS collection. A MS collection // a synchronous MS collection. A MS collection
// that finishes in the foreground can have started // that finishes in the foreground can have started
// in the background. These methods capture the // in the background. These methods capture the

View File

@ -69,7 +69,7 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
// end of the sweep of the tenured generation. // end of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_counter; PerfVariable* _avg_cms_free_counter;
// Average of the free space in the tenured generation at the // Average of the free space in the tenured generation at the
// start of the sweep of the tenured genertion. // start of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_at_sweep_counter; PerfVariable* _avg_cms_free_at_sweep_counter;
// Average of the free space in the tenured generation at the // Average of the free space in the tenured generation at the
// after any resizing of the tenured generation at the end // after any resizing of the tenured generation at the end

View File

@ -4178,7 +4178,7 @@ bool CMSCollector::do_marking_mt(bool asynch) {
// and is deferred for now; see CR# TBF. 07252005YSR. XXX // and is deferred for now; see CR# TBF. 07252005YSR. XXX
assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
// If _restart_addr is non-NULL, a marking stack overflow // If _restart_addr is non-NULL, a marking stack overflow
// occured; we need to do a fresh marking iteration from the // occurred; we need to do a fresh marking iteration from the
// indicated restart address. // indicated restart address.
if (_foregroundGCIsActive && asynch) { if (_foregroundGCIsActive && asynch) {
// We may be running into repeated stack overflows, having // We may be running into repeated stack overflows, having
@ -4221,7 +4221,7 @@ bool CMSCollector::do_marking_st(bool asynch) {
// should be incremental with periodic yields. // should be incremental with periodic yields.
_markBitMap.iterate(&markFromRootsClosure); _markBitMap.iterate(&markFromRootsClosure);
// If _restart_addr is non-NULL, a marking stack overflow // If _restart_addr is non-NULL, a marking stack overflow
// occured; we need to do a fresh iteration from the // occurred; we need to do a fresh iteration from the
// indicated restart address. // indicated restart address.
while (_restart_addr != NULL) { while (_restart_addr != NULL) {
if (_foregroundGCIsActive && asynch) { if (_foregroundGCIsActive && asynch) {

View File

@ -2513,7 +2513,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
} }
save_marks(); save_marks();
// We must do this before any possible evacuation that should propogate // We must do this before any possible evacuation that should propagate
// marks, including evacuation of popular objects in a popular pause. // marks, including evacuation of popular objects in a popular pause.
if (mark_in_progress()) { if (mark_in_progress()) {
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();

View File

@ -78,7 +78,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
} }
// Card marks are not precise. The current system can leave us with // Card marks are not precise. The current system can leave us with
// a mismash of precise marks and begining of object marks. This means // a mismash of precise marks and beginning of object marks. This means
// we test for missing precise marks first. If any are found, we don't // we test for missing precise marks first. If any are found, we don't
// fail unless the object head is also unmarked. // fail unless the object head is also unmarked.
virtual void do_object(oop obj) { virtual void do_object(oop obj) {
@ -258,7 +258,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
if (!start_array->object_starts_in_range(slice_start, slice_end)) { if (!start_array->object_starts_in_range(slice_start, slice_end)) {
continue; continue;
} }
// Update our begining addr // Update our beginning addr
HeapWord* first_object = start_array->object_start(slice_start); HeapWord* first_object = start_array->object_start(slice_start);
debug_only(oop* first_object_within_slice = (oop*) first_object;) debug_only(oop* first_object_within_slice = (oop*) first_object;)
if (first_object < slice_start) { if (first_object < slice_start) {

View File

@ -127,7 +127,7 @@ class ObjectStartArray : public CHeapObj {
// Optimized for finding the first object that crosses into // Optimized for finding the first object that crosses into
// a given block. The blocks contain the offset of the last // a given block. The blocks contain the offset of the last
// object in that block. Scroll backwards by one, and the first // object in that block. Scroll backwards by one, and the first
// object hit should be at the begining of the block // object hit should be at the beginning of the block
HeapWord* object_start(HeapWord* addr) const { HeapWord* object_start(HeapWord* addr) const {
assert(_covered_region.contains(addr), "Must be in covered region"); assert(_covered_region.contains(addr), "Must be in covered region");
jbyte* block = block_for_addr(addr); jbyte* block = block_for_addr(addr);

View File

@ -26,7 +26,7 @@
// PrefetchQueue is a FIFO queue of variable length (currently 8). // PrefetchQueue is a FIFO queue of variable length (currently 8).
// //
// We need to examine the performance penalty of variable lengths. // We need to examine the performance penalty of variable lengths.
// We may also want to split this into cpu dependant bits. // We may also want to split this into cpu dependent bits.
// //
const int PREFETCH_QUEUE_SIZE = 8; const int PREFETCH_QUEUE_SIZE = 8;

View File

@ -74,7 +74,7 @@ void MutableNUMASpace::ensure_parsability() {
for (int i = 0; i < lgrp_spaces()->length(); i++) { for (int i = 0; i < lgrp_spaces()->length(); i++) {
LGRPSpace *ls = lgrp_spaces()->at(i); LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space(); MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceeding the one containing top() if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) { if (s->free_in_words() > 0) {
size_t area_touched_words = pointer_delta(s->end(), s->top()); size_t area_touched_words = pointer_delta(s->end(), s->top());
CollectedHeap::fill_with_object(s->top(), area_touched_words); CollectedHeap::fill_with_object(s->top(), area_touched_words);

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the abstract interpreter and the abstract interpreter generator. // of the abstract interpreter and the abstract interpreter generator.
// Organization of the interpreter(s). There exists two different interpreters in hotpot // Organization of the interpreter(s). There exists two different interpreters in hotpot

View File

@ -2642,7 +2642,7 @@ handle_return:
// two interpreted frames). We need to save the current arguments in C heap so that // two interpreted frames). We need to save the current arguments in C heap so that
// the deoptimized frame when it restarts can copy the arguments to its expression // the deoptimized frame when it restarts can copy the arguments to its expression
// stack and re-execute the call. We also have to notify deoptimization that this // stack and re-execute the call. We also have to notify deoptimization that this
// has occured and to pick the preerved args copy them to the deoptimized frame's // has occurred and to pick the preserved args copy them to the deoptimized frame's
// java expression stack. Yuck. // java expression stack. Yuck.
// //
THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file holds platform-independant bodies of inline functions for the C++ based interpreter // This file holds platform-independent bodies of inline functions for the C++ based interpreter
#ifdef CC_INTERP #ifdef CC_INTERP

View File

@ -24,7 +24,7 @@
#ifdef CC_INTERP #ifdef CC_INTERP
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the c++ interpreter // of the c++ interpreter
class CppInterpreter: public AbstractInterpreter { class CppInterpreter: public AbstractInterpreter {

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the template interpreter generator. // of the template interpreter generator.
#ifdef CC_INTERP #ifdef CC_INTERP

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the interpreter and the interpreter generator. // of the interpreter and the interpreter generator.
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the interpreter generator. // of the interpreter generator.

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the template interpreter and the template interpreter generator. // of the template interpreter and the template interpreter generator.
#ifndef CC_INTERP #ifndef CC_INTERP

View File

@ -22,7 +22,7 @@
* *
*/ */
// This file contains the platform-independant parts // This file contains the platform-independent parts
// of the template interpreter generator. // of the template interpreter generator.
#ifndef CC_INTERP #ifndef CC_INTERP

View File

@ -306,7 +306,7 @@ void Dict::print() {
// Convert string to hash key. This algorithm implements a universal hash // Convert string to hash key. This algorithm implements a universal hash
// function with the multipliers frozen (ok, so it's not universal). The // function with the multipliers frozen (ok, so it's not universal). The
// multipliers (and allowable characters) are all odd, so the resultant sum // multipliers (and allowable characters) are all odd, so the resultant sum
// is odd - guarenteed not divisible by any power of two, so the hash tables // is odd - guaranteed not divisible by any power of two, so the hash tables
// can be any power of two with good results. Also, I choose multipliers // can be any power of two with good results. Also, I choose multipliers
// that have only 2 bits set (the low is always set to be odd) so // that have only 2 bits set (the low is always set to be odd) so
// multiplication requires only shifts and adds. Characters are required to // multiplication requires only shifts and adds. Characters are required to
@ -326,7 +326,7 @@ int hashstr(const void *t) {
} }
//------------------------------hashptr-------------------------------------- //------------------------------hashptr--------------------------------------
// Slimey cheap hash function; no guarenteed performance. Better than the // Slimey cheap hash function; no guaranteed performance. Better than the
// default for pointers, especially on MS-DOS machines. // default for pointers, especially on MS-DOS machines.
int hashptr(const void *key) { int hashptr(const void *key) {
#ifdef __TURBOC__ #ifdef __TURBOC__
@ -336,7 +336,7 @@ int hashptr(const void *key) {
#endif #endif
} }
// Slimey cheap hash function; no guarenteed performance. // Slimey cheap hash function; no guaranteed performance.
int hashkey(const void *key) { int hashkey(const void *key) {
return (intptr_t)key; return (intptr_t)key;
} }

View File

@ -86,10 +86,10 @@ class Dict : public ResourceObj { // Dictionary structure
// Hashing functions // Hashing functions
int hashstr(const void *s); // Nice string hash int hashstr(const void *s); // Nice string hash
// Slimey cheap hash function; no guarenteed performance. Better than the // Slimey cheap hash function; no guaranteed performance. Better than the
// default for pointers, especially on MS-DOS machines. // default for pointers, especially on MS-DOS machines.
int hashptr(const void *key); int hashptr(const void *key);
// Slimey cheap hash function; no guarenteed performance. // Slimey cheap hash function; no guaranteed performance.
int hashkey(const void *key); int hashkey(const void *key);
// Key comparators // Key comparators

View File

@ -35,14 +35,14 @@
extern address JVM_FunctionAtStart(); extern address JVM_FunctionAtStart();
extern address JVM_FunctionAtEnd(); extern address JVM_FunctionAtEnd();
// Complain and stop. All error conditions occuring during the writing of // Complain and stop. All error conditions occurring during the writing of
// an archive file should stop the process. Unrecoverable errors during // an archive file should stop the process. Unrecoverable errors during
// the reading of the archive file should stop the process. // the reading of the archive file should stop the process.
static void fail(const char *msg, va_list ap) { static void fail(const char *msg, va_list ap) {
// This occurs very early during initialization: tty is not initialized. // This occurs very early during initialization: tty is not initialized.
jio_fprintf(defaultStream::error_stream(), jio_fprintf(defaultStream::error_stream(),
"An error has occured while processing the" "An error has occurred while processing the"
" shared archive file.\n"); " shared archive file.\n");
jio_vfprintf(defaultStream::error_stream(), msg, ap); jio_vfprintf(defaultStream::error_stream(), msg, ap);
jio_fprintf(defaultStream::error_stream(), "\n"); jio_fprintf(defaultStream::error_stream(), "\n");

View File

@ -36,7 +36,7 @@ class PermGen : public CHeapObj {
friend class VMStructs; friend class VMStructs;
protected: protected:
size_t _capacity_expansion_limit; // maximum expansion allowed without a size_t _capacity_expansion_limit; // maximum expansion allowed without a
// full gc occuring // full gc occurring
HeapWord* mem_allocate_in_gen(size_t size, Generation* gen); HeapWord* mem_allocate_in_gen(size_t size, Generation* gen);

View File

@ -2003,7 +2003,7 @@ void GenerateOopMap::print_time() {
// ============ Main Entry Point =========== // ============ Main Entry Point ===========
// //
GenerateOopMap::GenerateOopMap(methodHandle method) { GenerateOopMap::GenerateOopMap(methodHandle method) {
// We have to initialize all variables here, that can be queried direcly // We have to initialize all variables here, that can be queried directly
_method = method; _method = method;
_max_locals=0; _max_locals=0;
_init_vars = NULL; _init_vars = NULL;

View File

@ -292,7 +292,7 @@ class GenerateOopMap VALUE_OBJ_CLASS_SPEC {
int _max_stack; // Cached value of max. stack depth int _max_stack; // Cached value of max. stack depth
int _max_monitors; // Cached value of max. monitor stack depth int _max_monitors; // Cached value of max. monitor stack depth
int _has_exceptions; // True, if exceptions exist for method int _has_exceptions; // True, if exceptions exist for method
bool _got_error; // True, if an error occured during interpretation. bool _got_error; // True, if an error occurred during interpretation.
Handle _exception; // Exception if got_error is true. Handle _exception; // Exception if got_error is true.
bool _did_rewriting; // was bytecodes rewritten bool _did_rewriting; // was bytecodes rewritten
bool _did_relocation; // was relocation neccessary bool _did_relocation; // was relocation neccessary
@ -422,7 +422,7 @@ class GenerateOopMap VALUE_OBJ_CLASS_SPEC {
void add_to_ref_init_set (int localNo); void add_to_ref_init_set (int localNo);
// Conflicts rewrite logic // Conflicts rewrite logic
bool _conflict; // True, if a conflict occured during interpretation bool _conflict; // True, if a conflict occurred during interpretation
int _nof_refval_conflicts; // No. of conflicts that require rewrites int _nof_refval_conflicts; // No. of conflicts that require rewrites
int * _new_var_map; int * _new_var_map;

View File

@ -1917,7 +1917,7 @@ methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
/ itableOffsetEntry::size(); / itableOffsetEntry::size();
for (int cnt = 0 ; ; cnt ++, ioe ++) { for (int cnt = 0 ; ; cnt ++, ioe ++) {
// If the interface isn't implemented by the reciever class, // If the interface isn't implemented by the receiver class,
// the VM should throw IncompatibleClassChangeError. // the VM should throw IncompatibleClassChangeError.
if (cnt >= nof_interfaces) { if (cnt >= nof_interfaces) {
THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError()); THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());

View File

@ -71,7 +71,7 @@ Klass *Klass::up_cast_abstract() {
return r; // Return the 1 concrete class return r; // Return the 1 concrete class
} }
// Find LCA in class heirarchy // Find LCA in class hierarchy
Klass *Klass::LCA( Klass *k2 ) { Klass *Klass::LCA( Klass *k2 ) {
Klass *k1 = this; Klass *k1 = this;
while( 1 ) { while( 1 ) {

View File

@ -471,7 +471,7 @@ class Klass : public Klass_vtbl {
} }
bool search_secondary_supers(klassOop k) const; bool search_secondary_supers(klassOop k) const;
// Find LCA in class heirarchy // Find LCA in class hierarchy
Klass *LCA( Klass *k ); Klass *LCA( Klass *k );
// Check whether reflection/jni/jvm code is allowed to instantiate this class; // Check whether reflection/jni/jvm code is allowed to instantiate this class;

View File

@ -296,7 +296,7 @@ class methodOopDesc : public oopDesc {
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
#endif // not PRODUCT #endif // not PRODUCT
// Clear (non-shared space) pointers which could not be relevent // Clear (non-shared space) pointers which could not be relevant
// if this (shared) method were mapped into another JVM. // if this (shared) method were mapped into another JVM.
void remove_unshareable_info(); void remove_unshareable_info();

View File

@ -181,7 +181,7 @@ int Block::is_Empty() const {
} }
//------------------------------has_uncommon_code------------------------------ //------------------------------has_uncommon_code------------------------------
// Return true if the block's code implies that it is not likely to be // Return true if the block's code implies that it is likely to be
// executed infrequently. Check to see if the block ends in a Halt or // executed infrequently. Check to see if the block ends in a Halt or
// a low probability call. // a low probability call.
bool Block::has_uncommon_code() const { bool Block::has_uncommon_code() const {
@ -1311,7 +1311,7 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only)
} }
} else if (e->state() == CFGEdge::open) { } else if (e->state() == CFGEdge::open) {
// Append traces, even without a fall-thru connection. // Append traces, even without a fall-thru connection.
// But leave root entry at the begining of the block list. // But leave root entry at the beginning of the block list.
if (targ_trace != trace(_cfg._broot)) { if (targ_trace != trace(_cfg._broot)) {
e->set_state(CFGEdge::connected); e->set_state(CFGEdge::connected);
src_trace->append(targ_trace); src_trace->append(targ_trace);
@ -1434,7 +1434,7 @@ bool Trace::backedge(CFGEdge *e) {
} }
// Backbranch to the top of a trace // Backbranch to the top of a trace
// Scroll foward through the trace from the targ_block. If we find // Scroll forward through the trace from the targ_block. If we find
// a loop head before another loop top, use the the loop head alignment. // a loop head before another loop top, use the the loop head alignment.
for (Block *b = targ_block; b != NULL; b = next(b)) { for (Block *b = targ_block; b != NULL; b = next(b)) {
if (b->has_loop_alignment()) { if (b->has_loop_alignment()) {

View File

@ -609,7 +609,7 @@ class Trace : public ResourceObj {
Block * next(Block *b) const { return _next_list[b->_pre_order]; } Block * next(Block *b) const { return _next_list[b->_pre_order]; }
void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
// Return the block that preceeds "b" in the trace. // Return the block that precedes "b" in the trace.
Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }

View File

@ -55,7 +55,7 @@
// breadth-first approach but it was worse (showed O(n^2) in the // breadth-first approach but it was worse (showed O(n^2) in the
// pick-next-block code). // pick-next-block code).
// //
// The relevent data is kept in a struct of arrays (it could just as well be // The relevant data is kept in a struct of arrays (it could just as well be
// an array of structs, but the struct-of-arrays is generally a little more // an array of structs, but the struct-of-arrays is generally a little more
// efficient). The arrays are indexed by register number (including // efficient). The arrays are indexed by register number (including
// stack-slots as registers) and so is bounded by 200 to 300 elements in // stack-slots as registers) and so is bounded by 200 to 300 elements in

View File

@ -1350,7 +1350,7 @@ static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *
} }
// Register the new node but do not transform it. Cannot transform until the // Register the new node but do not transform it. Cannot transform until the
// entire Region/Phi conglerate has been hacked as a single huge transform. // entire Region/Phi conglomerate has been hacked as a single huge transform.
igvn->register_new_node_with_optimizer( newn ); igvn->register_new_node_with_optimizer( newn );
// Now I can point to the new node. // Now I can point to the new node.
n->add_req(newn); n->add_req(newn);
@ -1381,7 +1381,7 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
Node *val = phi->in(i); // Constant to split for Node *val = phi->in(i); // Constant to split for
uint hit = 0; // Number of times it occurs uint hit = 0; // Number of times it occurs
for( ; i < phi->req(); i++ ){ // Count occurances of constant for( ; i < phi->req(); i++ ){ // Count occurrences of constant
Node *n = phi->in(i); Node *n = phi->in(i);
if( !n ) return NULL; if( !n ) return NULL;
if( phase->type(n) == Type::TOP ) return NULL; if( phase->type(n) == Type::TOP ) return NULL;
@ -1423,7 +1423,7 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
//============================================================================= //=============================================================================
//------------------------------simple_data_loop_check------------------------- //------------------------------simple_data_loop_check-------------------------
// Try to determing if the phi node in a simple safe/unsafe data loop. // Try to determining if the phi node in a simple safe/unsafe data loop.
// Returns: // Returns:
// enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
// Safe - safe case when the phi and it's inputs reference only safe data // Safe - safe case when the phi and it's inputs reference only safe data
@ -1687,7 +1687,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
progress = phase->C->top(); progress = phase->C->top();
break; break;
} }
// If tranformed to a MergeMem, get the desired slice // If transformed to a MergeMem, get the desired slice
// Otherwise the returned node represents memory for every slice // Otherwise the returned node represents memory for every slice
Node *new_mem = (m->is_MergeMem()) ? Node *new_mem = (m->is_MergeMem()) ?
m->as_MergeMem()->memory_at(alias_idx) : m; m->as_MergeMem()->memory_at(alias_idx) : m;
@ -1962,7 +1962,7 @@ const Type *CatchNode::Value( PhaseTransform *phase ) const {
f[CatchProjNode::fall_through_index] = Type::TOP; f[CatchProjNode::fall_through_index] = Type::TOP;
} else if( call->req() > TypeFunc::Parms ) { } else if( call->req() > TypeFunc::Parms ) {
const Type *arg0 = phase->type( call->in(TypeFunc::Parms) ); const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
// Check for null reciever to virtual or interface calls // Check for null receiver to virtual or interface calls
if( call->is_CallDynamicJava() && if( call->is_CallDynamicJava() &&
arg0->higher_equal(TypePtr::NULL_PTR) ) { arg0->higher_equal(TypePtr::NULL_PTR) ) {
f[CatchProjNode::fall_through_index] = Type::TOP; f[CatchProjNode::fall_through_index] = Type::TOP;
@ -1995,7 +1995,7 @@ Node *CatchProjNode::Identity( PhaseTransform *phase ) {
// also remove any exception table entry. Thus we must know the call // also remove any exception table entry. Thus we must know the call
// feeding the Catch will not really throw an exception. This is ok for // feeding the Catch will not really throw an exception. This is ok for
// the main fall-thru control (happens when we know a call can never throw // the main fall-thru control (happens when we know a call can never throw
// an exception) or for "rethrow", because a further optimnization will // an exception) or for "rethrow", because a further optimization will
// yank the rethrow (happens when we inline a function that can throw an // yank the rethrow (happens when we inline a function that can throw an
// exception and the caller has no handler). Not legal, e.g., for passing // exception and the caller has no handler). Not legal, e.g., for passing
// a NULL receiver to a v-call, or passing bad types to a slow-check-cast. // a NULL receiver to a v-call, or passing bad types to a slow-check-cast.

View File

@ -1246,7 +1246,7 @@ uint PhaseChaitin::Select( ) {
// If the live range is not bound, then we actually had some choices // If the live range is not bound, then we actually had some choices
// to make. In this case, the mask has more bits in it than the colors // to make. In this case, the mask has more bits in it than the colors
// choosen. Restrict the mask to just what was picked. // chosen. Restrict the mask to just what was picked.
if( lrg->num_regs() == 1 ) { // Size 1 live range if( lrg->num_regs() == 1 ) { // Size 1 live range
lrg->Clear(); // Clear the mask lrg->Clear(); // Clear the mask
lrg->Insert(reg); // Set regmask to match selected reg lrg->Insert(reg); // Set regmask to match selected reg

View File

@ -327,7 +327,7 @@ class PhaseChaitin : public PhaseRegAlloc {
// True if lidx is used before any real register is def'd in the block // True if lidx is used before any real register is def'd in the block
bool prompt_use( Block *b, uint lidx ); bool prompt_use( Block *b, uint lidx );
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx ); Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
// Insert the spill at chosen location. Skip over any interveneing Proj's or // Insert the spill at chosen location. Skip over any intervening Proj's or
// Phis. Skip over a CatchNode and projs, inserting in the fall-through block // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
// instead. Update high-pressure indices. Create a new live range. // instead. Update high-pressure indices. Create a new live range.
void insert_proj( Block *b, uint i, Node *spill, uint maxlrg ); void insert_proj( Block *b, uint i, Node *spill, uint maxlrg );
@ -431,7 +431,7 @@ private:
void Simplify(); void Simplify();
// Select colors by re-inserting edges into the IFG. // Select colors by re-inserting edges into the IFG.
// Return TRUE if any spills occured. // Return TRUE if any spills occurred.
uint Select( ); uint Select( );
// Helper function for select which allows biased coloring // Helper function for select which allows biased coloring
OptoReg::Name choose_color( LRG &lrg, int chunk ); OptoReg::Name choose_color( LRG &lrg, int chunk );

View File

@ -123,7 +123,7 @@ void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
} }
//------------------------------clone_projs------------------------------------ //------------------------------clone_projs------------------------------------
// After cloning some rematierialized instruction, clone any MachProj's that // After cloning some rematerialized instruction, clone any MachProj's that
// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants
// use G3 as an address temp. // use G3 as an address temp.
int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
@ -694,8 +694,8 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
} // End of if not infinite-stack neighbor } // End of if not infinite-stack neighbor
} // End of if actually inserted } // End of if actually inserted
} // End of if live range overlaps } // End of if live range overlaps
} // End of else collect intereferences for 1 node } // End of else collect interferences for 1 node
} // End of while forever, scan back for intereferences } // End of while forever, scan back for interferences
return reg_degree; return reg_degree;
} }
@ -786,7 +786,7 @@ bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block
if( rm_size == 0 ) return false; if( rm_size == 0 ) return false;
// Another early bail-out test is when we are double-coalescing and the // Another early bail-out test is when we are double-coalescing and the
// 2 copies are seperated by some control flow. // 2 copies are separated by some control flow.
if( dst_copy != src_copy ) { if( dst_copy != src_copy ) {
Block *src_b = _phc._cfg._bbs[src_copy->_idx]; Block *src_b = _phc._cfg._bbs[src_copy->_idx];
Block *b2 = b; Block *b2 = b;

View File

@ -337,7 +337,7 @@ void Compile::print_compile_messages() {
tty->print_cr("*********************************************************"); tty->print_cr("*********************************************************");
} }
if (env()->break_at_compile()) { if (env()->break_at_compile()) {
// Open the debugger when compiing this method. // Open the debugger when compiling this method.
tty->print("### Breaking when compiling: "); tty->print("### Breaking when compiling: ");
method()->print_short_name(); method()->print_short_name();
tty->cr(); tty->cr();
@ -1191,8 +1191,8 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
break; break;
case 2: // No collasping at level 2; keep all splits case 2: // No collapsing at level 2; keep all splits
case 3: // No collasping at level 3; keep all splits case 3: // No collapsing at level 3; keep all splits
break; break;
default: default:
Unimplemented(); Unimplemented();
@ -2102,7 +2102,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
// [base_reg + offset] // [base_reg + offset]
// NullCheck base_reg // NullCheck base_reg
// //
// Pin the new DecodeN node to non-null path on these patforms (Sparc) // Pin the new DecodeN node to non-null path on these platform (Sparc)
// to keep the information to which NULL check the new DecodeN node // to keep the information to which NULL check the new DecodeN node
// corresponds to use it as value in implicit_null_check(). // corresponds to use it as value in implicit_null_check().
// //

View File

@ -71,7 +71,7 @@ testing.
to figure out which test post-dominates. The real problem is that it doesn't to figure out which test post-dominates. The real problem is that it doesn't
matter which one you pick. After you pick up, the dominating-test elider in matter which one you pick. After you pick up, the dominating-test elider in
IGVN can remove the test and allow you to hoist up to the dominating test on IGVN can remove the test and allow you to hoist up to the dominating test on
the choosen oop bypassing the test on the not-choosen oop. Seen in testing. the chosen oop bypassing the test on the not-chosen oop. Seen in testing.
Oops. Oops.
(3) Leave the CastPP's in. This makes the graph more accurate in some sense; (3) Leave the CastPP's in. This makes the graph more accurate in some sense;

View File

@ -35,7 +35,7 @@
// by constant into a multiply/shift/add series. Return false if calculations // by constant into a multiply/shift/add series. Return false if calculations
// fail. // fail.
// //
// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with // Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with
// minor type name and parameter changes. // minor type name and parameter changes.
static bool magic_int_divide_constants(jint d, jint &M, jint &s) { static bool magic_int_divide_constants(jint d, jint &M, jint &s) {
int32_t p; int32_t p;
@ -202,7 +202,7 @@ static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor
// by constant into a multiply/shift/add series. Return false if calculations // by constant into a multiply/shift/add series. Return false if calculations
// fail. // fail.
// //
// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with // Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with
// minor type name and parameter changes. Adjusted to 64 bit word width. // minor type name and parameter changes. Adjusted to 64 bit word width.
static bool magic_long_divide_constants(jlong d, jlong &M, jint &s) { static bool magic_long_divide_constants(jlong d, jlong &M, jint &s) {
int64_t p; int64_t p;
@ -1069,7 +1069,7 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int log2_con = -1; int log2_con = -1;
// If this is a power of two, they maybe we can mask it // If this is a power of two, then maybe we can mask it
if( is_power_of_2_long(pos_con) ) { if( is_power_of_2_long(pos_con) ) {
log2_con = log2_long(pos_con); log2_con = log2_long(pos_con);

View File

@ -183,7 +183,7 @@ class Block_Stack {
if (pre_order == 1) if (pre_order == 1)
t->_parent = NULL; // first block doesn't have parent t->_parent = NULL; // first block doesn't have parent
else { else {
// Save parent (currernt top block on stack) in DFS // Save parent (current top block on stack) in DFS
t->_parent = &_tarjan[_stack_top->block->_pre_order]; t->_parent = &_tarjan[_stack_top->block->_pre_order];
} }
// Now put this block on stack // Now put this block on stack

View File

@ -515,7 +515,7 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
// cause the failure in add_offset() with narrow oops since TypeOopPtr() // cause the failure in add_offset() with narrow oops since TypeOopPtr()
// constructor verifies correctness of the offset. // constructor verifies correctness of the offset.
// //
// It could happend on subclass's branch (from the type profiling // It could happened on subclass's branch (from the type profiling
// inlining) which was not eliminated during parsing since the exactness // inlining) which was not eliminated during parsing since the exactness
// of the allocation type was not propagated to the subclass type check. // of the allocation type was not propagated to the subclass type check.
// //
@ -703,7 +703,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
while (prev != result) { while (prev != result) {
prev = result; prev = result;
if (result == start_mem) if (result == start_mem)
break; // hit one of our sentinals break; // hit one of our sentinels
if (result->is_Mem()) { if (result->is_Mem()) {
const Type *at = phase->type(result->in(MemNode::Address)); const Type *at = phase->type(result->in(MemNode::Address));
if (at != Type::TOP) { if (at != Type::TOP) {
@ -720,7 +720,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0); Node *proj_in = result->in(0);
if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) {
break; // hit one of our sentinals break; // hit one of our sentinels
} else if (proj_in->is_Call()) { } else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call(); CallNode *call = proj_in->as_Call();
if (!call->may_modify(tinst, phase)) { if (!call->may_modify(tinst, phase)) {
@ -804,7 +804,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
// search the Memory chain for a store with the appropriate type // search the Memory chain for a store with the appropriate type
// address type. If a Phi is found, create a new version with // address type. If a Phi is found, create a new version with
// the approriate memory slices from each of the Phi inputs. // the appropriate memory slices from each of the Phi inputs.
// For stores, process the users as follows: // For stores, process the users as follows:
// MemNode: push on memnode_worklist // MemNode: push on memnode_worklist
// MergeMem: push on mergemem_worklist // MergeMem: push on mergemem_worklist
@ -1558,7 +1558,7 @@ bool ConnectionGraph::compute_escape() {
has_non_escaping_obj = true; // Non GlobalEscape has_non_escaping_obj = true; // Non GlobalEscape
Node* n = ptn->_node; Node* n = ptn->_node;
if (n->is_Allocate() && ptn->_scalar_replaceable ) { if (n->is_Allocate() && ptn->_scalar_replaceable ) {
// Push scalar replaceable alocations on alloc_worklist // Push scalar replaceable allocations on alloc_worklist
// for processing in split_unique_types(). // for processing in split_unique_types().
alloc_worklist.append(n); alloc_worklist.append(n);
} }

View File

@ -606,7 +606,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
if (pred_block != early) { if (pred_block != early) {
// If any predecessor of the Phi matches the load's "early block", // If any predecessor of the Phi matches the load's "early block",
// we do not need a precedence edge between the Phi and 'load' // we do not need a precedence edge between the Phi and 'load'
// since the load will be forced into a block preceeding the Phi. // since the load will be forced into a block preceding the Phi.
pred_block->set_raise_LCA_mark(load_index); pred_block->set_raise_LCA_mark(load_index);
assert(!LCA_orig->dominates(pred_block) || assert(!LCA_orig->dominates(pred_block) ||
early->dominates(pred_block), "early is high enough"); early->dominates(pred_block), "early is high enough");
@ -1399,7 +1399,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
#ifdef ASSERT #ifdef ASSERT
for (uint i = 0; i < _num_blocks; i++ ) { for (uint i = 0; i < _num_blocks; i++ ) {
Block *b = _blocks[i]; Block *b = _blocks[i];
assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requiers meaningful block frequency"); assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
} }
#endif #endif
@ -1652,7 +1652,7 @@ float Block::succ_prob(uint i) {
// successor blocks. // successor blocks.
assert(_num_succs == 2, "expecting 2 successors of a null check"); assert(_num_succs == 2, "expecting 2 successors of a null check");
// If either successor has only one predecessor, then the // If either successor has only one predecessor, then the
// probabiltity estimate can be derived using the // probability estimate can be derived using the
// relative frequency of the successor and this block. // relative frequency of the successor and this block.
if (_succs[i]->num_preds() == 2) { if (_succs[i]->num_preds() == 2) {
return _succs[i]->_freq / _freq; return _succs[i]->_freq / _freq;
@ -1854,7 +1854,7 @@ void Block::update_uncommon_branch(Block* ub) {
} }
//------------------------------update_succ_freq------------------------------- //------------------------------update_succ_freq-------------------------------
// Update the appropriate frequency associated with block 'b', a succesor of // Update the appropriate frequency associated with block 'b', a successor of
// a block in this loop. // a block in this loop.
void CFGLoop::update_succ_freq(Block* b, float freq) { void CFGLoop::update_succ_freq(Block* b, float freq) {
if (b->_loop == this) { if (b->_loop == this) {

View File

@ -1148,7 +1148,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
Node *tst = _gvn.transform( btst ); Node *tst = _gvn.transform( btst );
//----------- //-----------
// if peephole optimizations occured, a prior test existed. // if peephole optimizations occurred, a prior test existed.
// If a prior test existed, maybe it dominates as we can avoid this test. // If a prior test existed, maybe it dominates as we can avoid this test.
if (tst != btst && type == T_OBJECT) { if (tst != btst && type == T_OBJECT) {
// At this point we want to scan up the CFG to see if we can // At this point we want to scan up the CFG to see if we can
@ -1196,7 +1196,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Consider using 'Reason_class_check' instead? // Consider using 'Reason_class_check' instead?
// To cause an implicit null check, we set the not-null probability // To cause an implicit null check, we set the not-null probability
// to the maximum (PROB_MAX). For an explicit check the probablity // to the maximum (PROB_MAX). For an explicit check the probability
// is set to a smaller value. // is set to a smaller value.
if (null_control != NULL || too_many_traps(reason)) { if (null_control != NULL || too_many_traps(reason)) {
// probability is less likely // probability is less likely

View File

@ -292,7 +292,7 @@ void PhaseIFG::verify( const PhaseChaitin *pc ) const {
//------------------------------interfere_with_live---------------------------- //------------------------------interfere_with_live----------------------------
// Interfere this register with everything currently live. Use the RegMasks // Interfere this register with everything currently live. Use the RegMasks
// to trim the set of possible interferences. Return a count of register-only // to trim the set of possible interferences. Return a count of register-only
// inteferences as an estimate of register pressure. // interferences as an estimate of register pressure.
void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) { void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
uint retval = 0; uint retval = 0;
// Interfere with everything live. // Interfere with everything live.

View File

@ -81,7 +81,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
uint i4; uint i4;
for( i4 = 1; i4 < phi->req(); i4++ ) { for( i4 = 1; i4 < phi->req(); i4++ ) {
con1 = phi->in(i4); con1 = phi->in(i4);
if( !con1 ) return NULL; // Do not optimize partially collaped merges if( !con1 ) return NULL; // Do not optimize partially collapsed merges
if( con1->is_Con() ) break; // Found a constant if( con1->is_Con() ) break; // Found a constant
// Also allow null-vs-not-null checks // Also allow null-vs-not-null checks
const TypePtr *tp = igvn->type(con1)->isa_ptr(); const TypePtr *tp = igvn->type(con1)->isa_ptr();
@ -204,7 +204,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// T F T F T F // T F T F T F
// ..s.. ..t .. ..s.. ..t.. ..s.. ..t.. // ..s.. ..t .. ..s.. ..t.. ..s.. ..t..
// //
// Split the paths coming into the merge point into 2 seperate groups of // Split the paths coming into the merge point into 2 separate groups of
// merges. On the left will be all the paths feeding constants into the // merges. On the left will be all the paths feeding constants into the
// Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi // Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi
// will fold up into a constant; this will let the Cmp fold up as well as // will fold up into a constant; this will let the Cmp fold up as well as
@ -236,7 +236,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
} }
// Register the new RegionNodes but do not transform them. Cannot // Register the new RegionNodes but do not transform them. Cannot
// transform until the entire Region/Phi conglerate has been hacked // transform until the entire Region/Phi conglomerate has been hacked
// as a single huge transform. // as a single huge transform.
igvn->register_new_node_with_optimizer( region_c ); igvn->register_new_node_with_optimizer( region_c );
igvn->register_new_node_with_optimizer( region_x ); igvn->register_new_node_with_optimizer( region_x );
@ -599,7 +599,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj
//------------------------------fold_compares---------------------------- //------------------------------fold_compares----------------------------
// See if a pair of CmpIs can be converted into a CmpU. In some cases // See if a pair of CmpIs can be converted into a CmpU. In some cases
// the direction of this if is determined by the preciding if so it // the direction of this if is determined by the preceding if so it
// can be eliminate entirely. Given an if testing (CmpI n c) check // can be eliminate entirely. Given an if testing (CmpI n c) check
// for an immediately control dependent if that is testing (CmpI n c2) // for an immediately control dependent if that is testing (CmpI n c2)
// and has one projection leading to this if and the other projection // and has one projection leading to this if and the other projection
@ -811,7 +811,7 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try to remove extra range checks. All 'up_one_dom' gives up at merges // Try to remove extra range checks. All 'up_one_dom' gives up at merges
// so all checks we inspect post-dominate the top-most check we find. // so all checks we inspect post-dominate the top-most check we find.
// If we are going to fail the current check and we reach the top check // If we are going to fail the current check and we reach the top check
// then we are guarenteed to fail, so just start interpreting there. // then we are guaranteed to fail, so just start interpreting there.
// We 'expand' the top 2 range checks to include all post-dominating // We 'expand' the top 2 range checks to include all post-dominating
// checks. // checks.

View File

@ -992,7 +992,7 @@ bool LibraryCallKit::inline_string_indexOf() {
Node *argument = pop(); // pop non-receiver first: it was pushed second Node *argument = pop(); // pop non-receiver first: it was pushed second
Node *receiver = pop(); Node *receiver = pop();
// don't intrinsify is argument isn't a constant string. // don't intrinsify if argument isn't a constant string.
if (!argument->is_Con()) { if (!argument->is_Con()) {
return false; return false;
} }
@ -1267,7 +1267,7 @@ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
// result = DPow(x,y); // result = DPow(x,y);
// } // }
// if (result != result)? { // if (result != result)? {
// ucommon_trap(); // uncommon_trap();
// } // }
// return result; // return result;
@ -1324,7 +1324,7 @@ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
// Check if (y isn't int) then go to slow path // Check if (y isn't int) then go to slow path
Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) ); Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
// Branch eith way // Branch either way
IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
Node *slow_path = opt_iff(r,if2); // Set region path 2 Node *slow_path = opt_iff(r,if2); // Set region path 2
@ -1715,8 +1715,8 @@ inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
} }
//----------------------------inline_reverseBytes_int/long------------------- //----------------------------inline_reverseBytes_int/long-------------------
// inline Int.reverseBytes(int) // inline Integer.reverseBytes(int)
// inline Long.reverseByes(long) // inline Long.reverseBytes(long)
bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes"); assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
@ -1915,7 +1915,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// addition to memory membars when is_volatile. This is a little // addition to memory membars when is_volatile. This is a little
// too strong, but avoids the need to insert per-alias-type // too strong, but avoids the need to insert per-alias-type
// volatile membars (for stores; compare Parse::do_put_xxx), which // volatile membars (for stores; compare Parse::do_put_xxx), which
// we cannot do effctively here because we probably only have a // we cannot do effectively here because we probably only have a
// rough approximation of type. // rough approximation of type.
need_mem_bar = true; need_mem_bar = true;
// For Stores, place a memory ordering barrier now. // For Stores, place a memory ordering barrier now.
@ -2099,7 +2099,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
// overly confusing. (This is a true fact! I originally combined // overly confusing. (This is a true fact! I originally combined
// them, but even I was confused by it!) As much code/comments as // them, but even I was confused by it!) As much code/comments as
// possible are retained from inline_unsafe_access though to make // possible are retained from inline_unsafe_access though to make
// the correspondances clearer. - dl // the correspondences clearer. - dl
if (callee()->is_static()) return false; // caller must have the capability! if (callee()->is_static()) return false; // caller must have the capability!
@ -2166,7 +2166,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
int alias_idx = C->get_alias_index(adr_type); int alias_idx = C->get_alias_index(adr_type);
// Memory-model-wise, a CAS acts like a little synchronized block, // Memory-model-wise, a CAS acts like a little synchronized block,
// so needs barriers on each side. These don't't translate into // so needs barriers on each side. These don't translate into
// actual barriers on most machines, but we still need rest of // actual barriers on most machines, but we still need rest of
// compiler to respect ordering. // compiler to respect ordering.
@ -3208,7 +3208,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) ); Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) );
// This hack lets the hash bits live anywhere in the mark object now, as long // This hack lets the hash bits live anywhere in the mark object now, as long
// as the shift drops the relevent bits into the low 32 bits. Note that // as the shift drops the relevant bits into the low 32 bits. Note that
// Java spec says that HashCode is an int so there's no point in capturing // Java spec says that HashCode is an int so there's no point in capturing
// an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
hshifted_header = ConvX2I(hshifted_header); hshifted_header = ConvX2I(hshifted_header);
@ -3255,7 +3255,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
} }
//---------------------------inline_native_getClass---------------------------- //---------------------------inline_native_getClass----------------------------
// Build special case code for calls to hashCode on an object. // Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() { bool LibraryCallKit::inline_native_getClass() {
Node* obj = null_check_receiver(callee()); Node* obj = null_check_receiver(callee());
if (stopped()) return true; if (stopped()) return true;
@ -4594,7 +4594,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
} }
// The memory edges above are precise in order to model effects around // The memory edges above are precise in order to model effects around
// array copyies accurately to allow value numbering of field loads around // array copies accurately to allow value numbering of field loads around
// arraycopy. Such field loads, both before and after, are common in Java // arraycopy. Such field loads, both before and after, are common in Java
// collections and similar classes involving header/array data structures. // collections and similar classes involving header/array data structures.
// //

View File

@ -39,7 +39,7 @@
// Leftover bits become the new live-in for the predecessor block, and the pred // Leftover bits become the new live-in for the predecessor block, and the pred
// block is put on the worklist. // block is put on the worklist.
// The locally live-in stuff is computed once and added to predecessor // The locally live-in stuff is computed once and added to predecessor
// live-out sets. This seperate compilation is done in the outer loop below. // live-out sets. This separate compilation is done in the outer loop below.
PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
} }

View File

@ -121,7 +121,7 @@ void Parse::do_monitor_exit() {
kill_dead_locals(); kill_dead_locals();
pop(); // Pop oop to unlock pop(); // Pop oop to unlock
// Because monitors are guarenteed paired (else we bail out), we know // Because monitors are guaranteed paired (else we bail out), we know
// the matching Lock for this Unlock. Hence we know there is no need // the matching Lock for this Unlock. Hence we know there is no need
// for a null check on Unlock. // for a null check on Unlock.
shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());

View File

@ -119,7 +119,7 @@ void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
//---------------------is_invariant_addition----------------------------- //---------------------is_invariant_addition-----------------------------
// Return nonzero index of invariant operand for an Add or Sub // Return nonzero index of invariant operand for an Add or Sub
// of (nonconstant) invariant and variant values. Helper for reassoicate_invariants. // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
int op = n->Opcode(); int op = n->Opcode();
if (op == Op_AddI || op == Op_SubI) { if (op == Op_AddI || op == Op_SubI) {
@ -520,7 +520,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
//------------------------------policy_align----------------------------------- //------------------------------policy_align-----------------------------------
// Return TRUE or FALSE if the loop should be cache-line aligned. Gather the // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
// expression that does the alignment. Note that only one array base can be // expression that does the alignment. Note that only one array base can be
// aligned in a loop (unless the VM guarentees mutual alignment). Note that // aligned in a loop (unless the VM guarantees mutual alignment). Note that
// if we vectorize short memory ops into longer memory ops, we may want to // if we vectorize short memory ops into longer memory ops, we may want to
// increase alignment. // increase alignment.
bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {

View File

@ -131,7 +131,7 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj(); ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj();
// Hoist invariant casts out of each loop to the appropiate // Hoist invariant casts out of each loop to the appropriate
// control projection. // control projection.
Node_List worklist; Node_List worklist;

View File

@ -274,7 +274,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// //
// Canonicalize the condition on the test. If we can exactly determine // Canonicalize the condition on the test. If we can exactly determine
// the trip-counter exit value, then set limit to that value and use // the trip-counter exit value, then set limit to that value and use
// a '!=' test. Otherwise use conditon '<' for count-up loops and // a '!=' test. Otherwise use condition '<' for count-up loops and
// '>' for count-down loops. If the condition is inverted and we will // '>' for count-down loops. If the condition is inverted and we will
// be rolling through MININT to MAXINT, then bail out. // be rolling through MININT to MAXINT, then bail out.
@ -290,7 +290,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// If compare points to incr, we are ok. Otherwise the compare // If compare points to incr, we are ok. Otherwise the compare
// can directly point to the phi; in this case adjust the compare so that // can directly point to the phi; in this case adjust the compare so that
// it points to the incr by adusting the limit. // it points to the incr by adjusting the limit.
if( cmp->in(1) == phi || cmp->in(2) == phi ) if( cmp->in(1) == phi || cmp->in(2) == phi )
limit = gvn->transform(new (C, 3) AddINode(limit,stride)); limit = gvn->transform(new (C, 3) AddINode(limit,stride));
@ -471,7 +471,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
lazy_replace( x, l ); lazy_replace( x, l );
set_idom(l, init_control, dom_depth(x)); set_idom(l, init_control, dom_depth(x));
// Check for immediately preceeding SafePoint and remove // Check for immediately preceding SafePoint and remove
Node *sfpt2 = le->in(0); Node *sfpt2 = le->in(0);
if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
@ -1506,7 +1506,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// Build Dominators for elision of NULL checks & loop finding. // Build Dominators for elision of NULL checks & loop finding.
// Since nodes do not have a slot for immediate dominator, make // Since nodes do not have a slot for immediate dominator, make
// a persistant side array for that info indexed on node->_idx. // a persistent side array for that info indexed on node->_idx.
_idom_size = C->unique(); _idom_size = C->unique();
_idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size );
_dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size );
@ -1529,7 +1529,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// Given dominators, try to find inner loops with calls that must // Given dominators, try to find inner loops with calls that must
// always be executed (call dominates loop tail). These loops do // always be executed (call dominates loop tail). These loops do
// not need a seperate safepoint. // not need a separate safepoint.
Node_List cisstack(a); Node_List cisstack(a);
_ltree_root->check_safepts(visited, cisstack); _ltree_root->check_safepts(visited, cisstack);
@ -2332,7 +2332,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist,
if (done) { if (done) {
// All of n's inputs have been processed, complete post-processing. // All of n's inputs have been processed, complete post-processing.
// Compute earilest point this Node can go. // Compute earliest point this Node can go.
// CFG, Phi, pinned nodes already know their controlling input. // CFG, Phi, pinned nodes already know their controlling input.
if (!has_node(n)) { if (!has_node(n)) {
// Record earliest legal location // Record earliest legal location
@ -2672,9 +2672,9 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
pinned = false; pinned = false;
} }
if( pinned ) { if( pinned ) {
IdealLoopTree *choosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
if( !choosen_loop->_child ) // Inner loop? if( !chosen_loop->_child ) // Inner loop?
choosen_loop->_body.push(n); // Collect inner loops chosen_loop->_body.push(n); // Collect inner loops
return; return;
} }
} else { // No slot zero } else { // No slot zero
@ -2746,9 +2746,9 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
set_ctrl(n, least); set_ctrl(n, least);
// Collect inner loop bodies // Collect inner loop bodies
IdealLoopTree *choosen_loop = get_loop(least); IdealLoopTree *chosen_loop = get_loop(least);
if( !choosen_loop->_child ) // Inner loop? if( !chosen_loop->_child ) // Inner loop?
choosen_loop->_body.push(n);// Collect inner loops chosen_loop->_body.push(n);// Collect inner loops
} }
#ifndef PRODUCT #ifndef PRODUCT

View File

@ -390,7 +390,7 @@ public:
// Return TRUE or FALSE if the loop should be cache-line aligned. // Return TRUE or FALSE if the loop should be cache-line aligned.
// Gather the expression that does the alignment. Note that only // Gather the expression that does the alignment. Note that only
// one array base can be aligned in a loop (unless the VM guarentees // one array base can be aligned in a loop (unless the VM guarantees
// mutual alignment). Note that if we vectorize short memory ops // mutual alignment). Note that if we vectorize short memory ops
// into longer memory ops, we may want to increase alignment. // into longer memory ops, we may want to increase alignment.
bool policy_align( PhaseIdealLoop *phase ) const; bool policy_align( PhaseIdealLoop *phase ) const;
@ -403,7 +403,7 @@ public:
// Reassociate invariant add and subtract expressions. // Reassociate invariant add and subtract expressions.
Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase);
// Return nonzero index of invariant operand if invariant and variant // Return nonzero index of invariant operand if invariant and variant
// are combined with an Add or Sub. Helper for reassoicate_invariants. // are combined with an Add or Sub. Helper for reassociate_invariants.
int is_invariant_addition(Node* n, PhaseIdealLoop *phase); int is_invariant_addition(Node* n, PhaseIdealLoop *phase);
// Return true if n is invariant // Return true if n is invariant

View File

@ -97,7 +97,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// (Note: This tweaking with igvn only works because x is a new node.) // (Note: This tweaking with igvn only works because x is a new node.)
_igvn.set_type(x, t); _igvn.set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node // If x is a TypeNode, capture any more-precise type permanently into Node
// othewise it will be not updated during igvn->transform since // otherwise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already. // igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t); x->raise_bottom_type(t);
Node *y = x->Identity(&_igvn); Node *y = x->Identity(&_igvn);
@ -879,7 +879,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
Node *x_ctrl = NULL; Node *x_ctrl = NULL;
if( u->is_Phi() ) { if( u->is_Phi() ) {
// Replace all uses of normal nodes. Replace Phi uses // Replace all uses of normal nodes. Replace Phi uses
// individually, so the seperate Nodes can sink down // individually, so the separate Nodes can sink down
// different paths. // different paths.
uint k = 1; uint k = 1;
while( u->in(k) != n ) k++; while( u->in(k) != n ) k++;

View File

@ -136,7 +136,7 @@ void MachNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Size of instruction in bytes // Size of instruction in bytes
uint MachNode::size(PhaseRegAlloc *ra_) const { uint MachNode::size(PhaseRegAlloc *ra_) const {
// If a virtual was not defined for this specific instruction, // If a virtual was not defined for this specific instruction,
// Call the helper which finds the size by emiting the bits. // Call the helper which finds the size by emitting the bits.
return MachNode::emit_size(ra_); return MachNode::emit_size(ra_);
} }

View File

@ -216,7 +216,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
while (true) { while (true) {
if (mem == alloc_mem || mem == start_mem ) { if (mem == alloc_mem || mem == start_mem ) {
return mem; // hit one of our sentinals return mem; // hit one of our sentinels
} else if (mem->is_MergeMem()) { } else if (mem->is_MergeMem()) {
mem = mem->as_MergeMem()->memory_at(alias_idx); mem = mem->as_MergeMem()->memory_at(alias_idx);
} else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
@ -1668,7 +1668,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
if (UseOptoBiasInlining) { if (UseOptoBiasInlining) {
/* /*
* See the full descrition in MacroAssembler::biased_locking_enter(). * See the full description in MacroAssembler::biased_locking_enter().
* *
* if( (mark_word & biased_lock_mask) == biased_lock_pattern ) { * if( (mark_word & biased_lock_mask) == biased_lock_pattern ) {
* // The object is biased. * // The object is biased.
@ -1904,7 +1904,7 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
if (UseOptoBiasInlining) { if (UseOptoBiasInlining) {
// Check for biased locking unlock case, which is a no-op. // Check for biased locking unlock case, which is a no-op.
// See the full descrition in MacroAssembler::biased_locking_exit(). // See the full description in MacroAssembler::biased_locking_exit().
region = new (C, 4) RegionNode(4); region = new (C, 4) RegionNode(4);
// create a Phi for the memory state // create a Phi for the memory state
mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);

View File

@ -897,7 +897,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
#ifdef ASSERT #ifdef ASSERT
_new2old_map.map(m->_idx, n); _new2old_map.map(m->_idx, n);
#endif #endif
mstack.push(m, Post_Visit, n, i); // Don't neet to visit mstack.push(m, Post_Visit, n, i); // Don't need to visit
mstack.push(m->in(0), Visit, m, 0); mstack.push(m->in(0), Visit, m, 0);
} else { } else {
mstack.push(m, Visit, n, i); mstack.push(m, Visit, n, i);
@ -1267,7 +1267,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
} }
} }
// Not forceably cloning. If shared, put it into a register. // Not forceable cloning. If shared, put it into a register.
return shared; return shared;
} }
@ -1542,7 +1542,7 @@ void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *m
// This is what my child will give me. // This is what my child will give me.
int opnd_class_instance = s->_rule[op]; int opnd_class_instance = s->_rule[op];
// Choose between operand class or not. // Choose between operand class or not.
// This is what I will recieve. // This is what I will receive.
int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op; int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
// New rule for child. Chase operand classes to get the actual rule. // New rule for child. Chase operand classes to get the actual rule.
int newrule = s->_rule[catch_op]; int newrule = s->_rule[catch_op];
@ -1966,7 +1966,7 @@ void Matcher::find_shared( Node *n ) {
// BoolNode::match_edge always returns a zero. // BoolNode::match_edge always returns a zero.
// We reorder the Op_If in a pre-order manner, so we can visit without // We reorder the Op_If in a pre-order manner, so we can visit without
// accidently sharing the Cmp (the Bool and the If make 2 users). // accidentally sharing the Cmp (the Bool and the If make 2 users).
n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
} }
else if (nstate == Post_Visit) { else if (nstate == Post_Visit) {

View File

@ -100,12 +100,12 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
while (prev != result) { while (prev != result) {
prev = result; prev = result;
if (result == start_mem) if (result == start_mem)
break; // hit one of our sentinals break; // hit one of our sentinels
// skip over a call which does not affect this memory slice // skip over a call which does not affect this memory slice
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0); Node *proj_in = result->in(0);
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
break; // hit one of our sentinals break; // hit one of our sentinels
} else if (proj_in->is_Call()) { } else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call(); CallNode *call = proj_in->as_Call();
if (!call->may_modify(t_adr, phase)) { if (!call->may_modify(t_adr, phase)) {
@ -198,7 +198,7 @@ static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const T
// If not, we can update the input infinitely along a MergeMem cycle // If not, we can update the input infinitely along a MergeMem cycle
// Equivalent code in PhiNode::Ideal // Equivalent code in PhiNode::Ideal
Node* m = phase->transform(mmem); Node* m = phase->transform(mmem);
// If tranformed to a MergeMem, get the desired slice // If transformed to a MergeMem, get the desired slice
// Otherwise the returned node represents memory for every slice // Otherwise the returned node represents memory for every slice
mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
// Update input if it is progress over what we have now // Update input if it is progress over what we have now
@ -970,7 +970,7 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
} }
// Search for an existing data phi which was generated before for the same // Search for an existing data phi which was generated before for the same
// instance's field to avoid infinite genertion of phis in a loop. // instance's field to avoid infinite generation of phis in a loop.
Node *region = mem->in(0); Node *region = mem->in(0);
if (is_instance_field_load_with_local_phi(region)) { if (is_instance_field_load_with_local_phi(region)) {
const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr(); const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr();
@ -1254,7 +1254,7 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// (This tweaking with igvn only works because x is a new node.) // (This tweaking with igvn only works because x is a new node.)
igvn->set_type(x, t); igvn->set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node // If x is a TypeNode, capture any more-precise type permanently into Node
// othewise it will be not updated during igvn->transform since // otherwise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already. // igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t); x->raise_bottom_type(t);
Node *y = x->Identity(igvn); Node *y = x->Identity(igvn);
@ -2591,7 +2591,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
// capturing of nearby memory operations. // capturing of nearby memory operations.
// //
// During macro-expansion, all captured initializations which store // During macro-expansion, all captured initializations which store
// constant values of 32 bits or smaller are coalesced (if advantagous) // constant values of 32 bits or smaller are coalesced (if advantageous)
// into larger 'tiles' 32 or 64 bits. This allows an object to be // into larger 'tiles' 32 or 64 bits. This allows an object to be
// initialized in fewer memory operations. Memory words which are // initialized in fewer memory operations. Memory words which are
// covered by neither tiles nor non-constant stores are pre-zeroed // covered by neither tiles nor non-constant stores are pre-zeroed
@ -3678,7 +3678,7 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
else if (old_mmem != NULL) { else if (old_mmem != NULL) {
new_mem = old_mmem->memory_at(i); new_mem = old_mmem->memory_at(i);
} }
// else preceeding memory was not a MergeMem // else preceding memory was not a MergeMem
// replace equivalent phis (unfortunately, they do not GVN together) // replace equivalent phis (unfortunately, they do not GVN together)
if (new_mem != NULL && new_mem != new_base && if (new_mem != NULL && new_mem != new_base &&

View File

@ -757,10 +757,10 @@ public:
// Model. Monitor-enter and volatile-load act as Aquires: no following ref // Model. Monitor-enter and volatile-load act as Aquires: no following ref
// can be moved to before them. We insert a MemBar-Acquire after a FastLock or // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
// volatile-load. Monitor-exit and volatile-store act as Release: no // volatile-load. Monitor-exit and volatile-store act as Release: no
// preceeding ref can be moved to after them. We insert a MemBar-Release // preceding ref can be moved to after them. We insert a MemBar-Release
// before a FastUnlock or volatile-store. All volatiles need to be // before a FastUnlock or volatile-store. All volatiles need to be
// serialized, so we follow all volatile-stores with a MemBar-Volatile to // serialized, so we follow all volatile-stores with a MemBar-Volatile to
// seperate it from any following volatile-load. // separate it from any following volatile-load.
class MemBarNode: public MultiNode { class MemBarNode: public MultiNode {
virtual uint hash() const ; // { return NO_HASH; } virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const ; // Always fail, except on self virtual uint cmp( const Node &n ) const ; // Always fail, except on self

View File

@ -968,22 +968,23 @@ const Type *Node::Value( PhaseTransform * ) const {
// Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
// "X+3" unchanged in case it is shared. // "X+3" unchanged in case it is shared.
// //
// If you modify the 'this' pointer's inputs, you must use 'set_req' with // If you modify the 'this' pointer's inputs, you should use
// def-use info. If you are making a new Node (either as the new root or // 'set_req'. If you are making a new Node (either as the new root or
// some new internal piece) you must NOT use set_req with def-use info. // some new internal piece) you may use 'init_req' to set the initial
// You can make a new Node with either 'new' or 'clone'. In either case, // value. You can make a new Node with either 'new' or 'clone'. In
// def-use info is (correctly) not generated. // either case, def-use info is correctly maintained.
//
// Example: reshape "(X+3)+4" into "X+7": // Example: reshape "(X+3)+4" into "X+7":
// set_req(1,in(1)->in(1) /* grab X */, du /* must use DU on 'this' */); // set_req(1, in(1)->in(1));
// set_req(2,phase->intcon(7),du); // set_req(2, phase->intcon(7));
// return this; // return this;
// Example: reshape "X*4" into "X<<1" // Example: reshape "X*4" into "X<<2"
// return new (C,3) LShiftINode( in(1), phase->intcon(1) ); // return new (C,3) LShiftINode(in(1), phase->intcon(2));
// //
// You must call 'phase->transform(X)' on any new Nodes X you make, except // You must call 'phase->transform(X)' on any new Nodes X you make, except
// for the returned root node. Example: reshape "X*31" with "(X<<5)-1". // for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
// Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5))); // Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5)));
// return new (C,3) AddINode(shift, phase->intcon(-1)); // return new (C,3) AddINode(shift, in(1));
// //
// When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
// These forms are faster than 'phase->transform(new (C,1) ConNode())' and Do // These forms are faster than 'phase->transform(new (C,1) ConNode())' and Do
@ -1679,7 +1680,7 @@ void Node::verify_edges(Unique_Node_List &visited) {
if (visited.member(this)) return; if (visited.member(this)) return;
visited.push(this); visited.push(this);
// Walk over all input edges, checking for correspondance // Walk over all input edges, checking for correspondence
for( i = 0; i < len(); i++ ) { for( i = 0; i < len(); i++ ) {
n = in(i); n = in(i);
if (n != NULL && !n->is_top()) { if (n != NULL && !n->is_top()) {
@ -1723,7 +1724,7 @@ void Node::verify_recur(const Node *n, int verify_depth,
// Contained in new_space or old_space? // Contained in new_space or old_space?
VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
// Check for visited in the proper space. Numberings are not unique // Check for visited in the proper space. Numberings are not unique
// across spaces so we need a seperate VectorSet for each space. // across spaces so we need a separate VectorSet for each space.
if( v->test_set(n->_idx) ) return; if( v->test_set(n->_idx) ) return;
if (n->is_Con() && n->bottom_type() == Type::TOP) { if (n->is_Con() && n->bottom_type() == Type::TOP) {

View File

@ -257,7 +257,7 @@ protected:
Node **_in; // Array of use-def references to Nodes Node **_in; // Array of use-def references to Nodes
Node **_out; // Array of def-use references to Nodes Node **_out; // Array of def-use references to Nodes
// Input edges are split into two catagories. Required edges are required // Input edges are split into two categories. Required edges are required
// for semantic correctness; order is important and NULLs are allowed. // for semantic correctness; order is important and NULLs are allowed.
// Precedence edges are used to help determine execution order and are // Precedence edges are used to help determine execution order and are
// added, e.g., for scheduling purposes. They are unordered and not // added, e.g., for scheduling purposes. They are unordered and not
@ -854,7 +854,7 @@ public:
// If the hash function returns the special sentinel value NO_HASH, // If the hash function returns the special sentinel value NO_HASH,
// the node is guaranteed never to compare equal to any other node. // the node is guaranteed never to compare equal to any other node.
// If we accidently generate a hash with value NO_HASH the node // If we accidentally generate a hash with value NO_HASH the node
// won't go into the table and we'll lose a little optimization. // won't go into the table and we'll lose a little optimization.
enum { NO_HASH = 0 }; enum { NO_HASH = 0 };
virtual uint hash() const; virtual uint hash() const;

View File

@ -1171,7 +1171,7 @@ void Compile::Fill_buffer() {
cb->flush_bundle(false); cb->flush_bundle(false);
// The following logic is duplicated in the code ifdeffed for // The following logic is duplicated in the code ifdeffed for
// ENABLE_ZAP_DEAD_LOCALS which apppears above in this file. It // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
// should be factored out. Or maybe dispersed to the nodes? // should be factored out. Or maybe dispersed to the nodes?
// Special handling for SafePoint/Call Nodes // Special handling for SafePoint/Call Nodes
@ -1275,7 +1275,7 @@ void Compile::Fill_buffer() {
} }
#ifdef ASSERT #ifdef ASSERT
// Check that oop-store preceeds the card-mark // Check that oop-store precedes the card-mark
else if( mach->ideal_Opcode() == Op_StoreCM ) { else if( mach->ideal_Opcode() == Op_StoreCM ) {
uint storeCM_idx = j; uint storeCM_idx = j;
Node *oop_store = mach->in(mach->_cnt); // First precedence edge Node *oop_store = mach->in(mach->_cnt); // First precedence edge
@ -1291,7 +1291,7 @@ void Compile::Fill_buffer() {
#endif #endif
else if( !n->is_Proj() ) { else if( !n->is_Proj() ) {
// Remember the begining of the previous instruction, in case // Remember the beginning of the previous instruction, in case
// it's followed by a flag-kill and a null-check. Happens on // it's followed by a flag-kill and a null-check. Happens on
// Intel all the time, with add-to-memory kind of opcodes. // Intel all the time, with add-to-memory kind of opcodes.
previous_offset = current_offset; previous_offset = current_offset;
@ -1567,7 +1567,7 @@ Scheduling::Scheduling(Arena *arena, Compile &compile)
compile.set_node_bundling_limit(_node_bundling_limit); compile.set_node_bundling_limit(_node_bundling_limit);
// This one is persistant within the Compile class // This one is persistent within the Compile class
_node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max); _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
// Allocate space for fixed-size arrays // Allocate space for fixed-size arrays
@ -1666,7 +1666,7 @@ void Compile::ScheduleAndBundle() {
// Compute the latency of all the instructions. This is fairly simple, // Compute the latency of all the instructions. This is fairly simple,
// because we already have a legal ordering. Walk over the instructions // because we already have a legal ordering. Walk over the instructions
// from first to last, and compute the latency of the instruction based // from first to last, and compute the latency of the instruction based
// on the latency of the preceeding instruction(s). // on the latency of the preceding instruction(s).
void Scheduling::ComputeLocalLatenciesForward(const Block *bb) { void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
#ifndef PRODUCT #ifndef PRODUCT
if (_cfg->C->trace_opto_output()) if (_cfg->C->trace_opto_output())
@ -1931,7 +1931,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
uint siz = _available.size(); uint siz = _available.size();
// Conditional branches can support an instruction that // Conditional branches can support an instruction that
// is unconditionally executed and not dependant by the // is unconditionally executed and not dependent by the
// branch, OR a conditionally executed instruction if // branch, OR a conditionally executed instruction if
// the branch is taken. In practice, this means that // the branch is taken. In practice, this means that
// the first instruction at the branch target is // the first instruction at the branch target is
@ -1947,7 +1947,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
#endif #endif
// At least 1 instruction is on the available list // At least 1 instruction is on the available list
// that is not dependant on the branch // that is not dependent on the branch
for (uint i = 0; i < siz; i++) { for (uint i = 0; i < siz; i++) {
Node *d = _available[i]; Node *d = _available[i];
const Pipeline *avail_pipeline = d->pipeline(); const Pipeline *avail_pipeline = d->pipeline();

View File

@ -78,7 +78,7 @@ public:
}; };
// See if it is OK to inline. // See if it is OK to inline.
// The reciever is the inline tree for the caller. // The receiver is the inline tree for the caller.
// //
// The result is a temperature indication. If it is hot or cold, // The result is a temperature indication. If it is hot or cold,
// inlining is immediate or undesirable. Otherwise, the info block // inlining is immediate or undesirable. Otherwise, the info block

View File

@ -607,7 +607,7 @@ void Parse::do_all_blocks() {
if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) { if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
// In the absence of irreducible loops, the Region and Phis // In the absence of irreducible loops, the Region and Phis
// associated with a merge that doesn't involve a backedge can // associated with a merge that doesn't involve a backedge can
// be simplfied now since the RPO parsing order guarantees // be simplified now since the RPO parsing order guarantees
// that any path which was supposed to reach here has already // that any path which was supposed to reach here has already
// been parsed or must be dead. // been parsed or must be dead.
Node* c = control(); Node* c = control();

View File

@ -32,7 +32,7 @@ extern int explicit_null_checks_inserted,
void Parse::array_load(BasicType elem_type) { void Parse::array_load(BasicType elem_type) {
const Type* elem = Type::TOP; const Type* elem = Type::TOP;
Node* adr = array_addressing(elem_type, 0, &elem); Node* adr = array_addressing(elem_type, 0, &elem);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
Node* ld = make_load(control(), adr, elem, elem_type, adr_type); Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
@ -43,7 +43,7 @@ void Parse::array_load(BasicType elem_type) {
//--------------------------------array_store---------------------------------- //--------------------------------array_store----------------------------------
void Parse::array_store(BasicType elem_type) { void Parse::array_store(BasicType elem_type) {
Node* adr = array_addressing(elem_type, 1); Node* adr = array_addressing(elem_type, 1);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
Node* val = pop(); Node* val = pop();
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
@ -1541,14 +1541,14 @@ void Parse::do_one_bytecode() {
case Bytecodes::_aaload: array_load(T_OBJECT); break; case Bytecodes::_aaload: array_load(T_OBJECT); break;
case Bytecodes::_laload: { case Bytecodes::_laload: {
a = array_addressing(T_LONG, 0); a = array_addressing(T_LONG, 0);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS)); push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
break; break;
} }
case Bytecodes::_daload: { case Bytecodes::_daload: {
a = array_addressing(T_DOUBLE, 0); a = array_addressing(T_DOUBLE, 0);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES)); push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
break; break;
@ -1560,7 +1560,7 @@ void Parse::do_one_bytecode() {
case Bytecodes::_fastore: array_store(T_FLOAT); break; case Bytecodes::_fastore: array_store(T_FLOAT); break;
case Bytecodes::_aastore: { case Bytecodes::_aastore: {
d = array_addressing(T_OBJECT, 1); d = array_addressing(T_OBJECT, 1);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
array_store_check(); array_store_check();
c = pop(); // Oop to store c = pop(); // Oop to store
b = pop(); // index (already used) b = pop(); // index (already used)
@ -1572,7 +1572,7 @@ void Parse::do_one_bytecode() {
} }
case Bytecodes::_lastore: { case Bytecodes::_lastore: {
a = array_addressing(T_LONG, 2); a = array_addressing(T_LONG, 2);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
c = pop_pair(); c = pop_pair();
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS); store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
@ -1580,7 +1580,7 @@ void Parse::do_one_bytecode() {
} }
case Bytecodes::_dastore: { case Bytecodes::_dastore: {
a = array_addressing(T_DOUBLE, 2); a = array_addressing(T_DOUBLE, 2);
if (stopped()) return; // guarenteed null or range check if (stopped()) return; // guaranteed null or range check
c = pop_pair(); c = pop_pair();
_sp -= 2; // Pop array and index _sp -= 2; // Pop array and index
c = dstore_rounding(c); c = dstore_rounding(c);

View File

@ -73,7 +73,7 @@ elapsedTimer Phase::_t_buildOopMaps;
//------------------------------Phase------------------------------------------ //------------------------------Phase------------------------------------------
Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) { Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) {
// Poll for requests from shutdown mechanism to quiesce comiler (4448539, 4448544). // Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544).
// This is an effective place to poll, since the compiler is full of phases. // This is an effective place to poll, since the compiler is full of phases.
// In particular, every inlining site uses a recursively created Parse phase. // In particular, every inlining site uses a recursively created Parse phase.
CompileBroker::maybe_block(); CompileBroker::maybe_block();

Some files were not shown because too many files have changed in this diff Show More