aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/opto
diff options
context:
space:
mode:
authortrims <none@none>2009-03-12 18:16:36 -0700
committertrims <none@none>2009-03-12 18:16:36 -0700
commitbe223741e196a9674309e0ad6f7a6ee80d4a708c (patch)
tree7d1460f138a00c594cb7dd4097b6971a2c686eb4 /src/share/vm/opto
parent2ccc9bd9f66f568de9d973a2bb916c40058c63fa (diff)
parent31afc3d8e2d634535d9a254cc30e88724e99cb07 (diff)
downloadjdk8u_hotspot-be223741e196a9674309e0ad6f7a6ee80d4a708c.tar.gz
Merge
Diffstat (limited to 'src/share/vm/opto')
-rw-r--r--src/share/vm/opto/block.cpp10
-rw-r--r--src/share/vm/opto/block.hpp4
-rw-r--r--src/share/vm/opto/buildOopMap.cpp2
-rw-r--r--src/share/vm/opto/callnode.cpp1
-rw-r--r--src/share/vm/opto/callnode.hpp4
-rw-r--r--src/share/vm/opto/cfgnode.cpp12
-rw-r--r--src/share/vm/opto/chaitin.cpp2
-rw-r--r--src/share/vm/opto/chaitin.hpp4
-rw-r--r--src/share/vm/opto/classes.hpp2
-rw-r--r--src/share/vm/opto/coalesce.cpp8
-rw-r--r--src/share/vm/opto/compile.cpp10
-rw-r--r--src/share/vm/opto/connode.cpp2
-rw-r--r--src/share/vm/opto/divnode.cpp6
-rw-r--r--src/share/vm/opto/domgraph.cpp2
-rw-r--r--src/share/vm/opto/escape.cpp20
-rw-r--r--src/share/vm/opto/gcm.cpp78
-rw-r--r--src/share/vm/opto/graphKit.cpp4
-rw-r--r--src/share/vm/opto/ifg.cpp2
-rw-r--r--src/share/vm/opto/ifnode.cpp10
-rw-r--r--src/share/vm/opto/library_call.cpp22
-rw-r--r--src/share/vm/opto/live.cpp2
-rw-r--r--src/share/vm/opto/locknode.cpp2
-rw-r--r--src/share/vm/opto/loopTransform.cpp4
-rw-r--r--src/share/vm/opto/loopUnswitch.cpp2
-rw-r--r--src/share/vm/opto/loopnode.cpp24
-rw-r--r--src/share/vm/opto/loopnode.hpp4
-rw-r--r--src/share/vm/opto/loopopts.cpp4
-rw-r--r--src/share/vm/opto/machnode.cpp2
-rw-r--r--src/share/vm/opto/macro.cpp23
-rw-r--r--src/share/vm/opto/matcher.cpp56
-rw-r--r--src/share/vm/opto/memnode.cpp53
-rw-r--r--src/share/vm/opto/memnode.hpp29
-rw-r--r--src/share/vm/opto/mulnode.cpp50
-rw-r--r--src/share/vm/opto/node.cpp27
-rw-r--r--src/share/vm/opto/node.hpp4
-rw-r--r--src/share/vm/opto/output.cpp14
-rw-r--r--src/share/vm/opto/parse.hpp2
-rw-r--r--src/share/vm/opto/parse1.cpp2
-rw-r--r--src/share/vm/opto/parse2.cpp14
-rw-r--r--src/share/vm/opto/phase.cpp2
-rw-r--r--src/share/vm/opto/phaseX.cpp8
-rw-r--r--src/share/vm/opto/postaloc.cpp4
-rw-r--r--src/share/vm/opto/reg_split.cpp10
-rw-r--r--src/share/vm/opto/runtime.cpp2
-rw-r--r--src/share/vm/opto/split_if.cpp2
-rw-r--r--src/share/vm/opto/superword.cpp4
-rw-r--r--src/share/vm/opto/superword.hpp2
-rw-r--r--src/share/vm/opto/type.cpp12
-rw-r--r--src/share/vm/opto/type.hpp1
49 files changed, 358 insertions, 212 deletions
diff --git a/src/share/vm/opto/block.cpp b/src/share/vm/opto/block.cpp
index 357953085..10ceec302 100644
--- a/src/share/vm/opto/block.cpp
+++ b/src/share/vm/opto/block.cpp
@@ -181,7 +181,7 @@ int Block::is_Empty() const {
}
//------------------------------has_uncommon_code------------------------------
-// Return true if the block's code implies that it is not likely to be
+// Return true if the block's code implies that it is likely to be
// executed infrequently. Check to see if the block ends in a Halt or
// a low probability call.
bool Block::has_uncommon_code() const {
@@ -909,6 +909,10 @@ void PhaseCFG::verify( ) const {
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
assert( b->find_node(def) < j, "uses must follow definitions" );
}
+ if( def->is_SafePointScalarObject() ) {
+ assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node");
+ assert(_bbs[def->_idx] == _bbs[def->in(0)->_idx], "SafePointScalarObject Node should be at the same block as its control edge");
+ }
}
}
}
@@ -1307,7 +1311,7 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only)
}
} else if (e->state() == CFGEdge::open) {
// Append traces, even without a fall-thru connection.
- // But leave root entry at the begining of the block list.
+ // But leave root entry at the beginning of the block list.
if (targ_trace != trace(_cfg._broot)) {
e->set_state(CFGEdge::connected);
src_trace->append(targ_trace);
@@ -1430,7 +1434,7 @@ bool Trace::backedge(CFGEdge *e) {
}
// Backbranch to the top of a trace
- // Scroll foward through the trace from the targ_block. If we find
+ // Scroll forward through the trace from the targ_block. If we find
// a loop head before another loop top, use the the loop head alignment.
for (Block *b = targ_block; b != NULL; b = next(b)) {
if (b->has_loop_alignment()) {
diff --git a/src/share/vm/opto/block.hpp b/src/share/vm/opto/block.hpp
index 43ce09fe9..f4c46ba2a 100644
--- a/src/share/vm/opto/block.hpp
+++ b/src/share/vm/opto/block.hpp
@@ -347,6 +347,8 @@ class PhaseCFG : public Phase {
// Helper function to insert a node into a block
void schedule_node_into_block( Node *n, Block *b );
+ void replace_block_proj_ctrl( Node *n );
+
// Set the basic block for pinned Nodes
void schedule_pinned_nodes( VectorSet &visited );
@@ -607,7 +609,7 @@ class Trace : public ResourceObj {
Block * next(Block *b) const { return _next_list[b->_pre_order]; }
void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
- // Return the block that preceeds "b" in the trace.
+ // Return the block that precedes "b" in the trace.
Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }
diff --git a/src/share/vm/opto/buildOopMap.cpp b/src/share/vm/opto/buildOopMap.cpp
index 30a9d2684..4a8612687 100644
--- a/src/share/vm/opto/buildOopMap.cpp
+++ b/src/share/vm/opto/buildOopMap.cpp
@@ -55,7 +55,7 @@
// breadth-first approach but it was worse (showed O(n^2) in the
// pick-next-block code).
//
-// The relevent data is kept in a struct of arrays (it could just as well be
+// The relevant data is kept in a struct of arrays (it could just as well be
// an array of structs, but the struct-of-arrays is generally a little more
// efficient). The arrays are indexed by register number (including
// stack-slots as registers) and so is bounded by 200 to 300 elements in
diff --git a/src/share/vm/opto/callnode.cpp b/src/share/vm/opto/callnode.cpp
index 7dff291fc..811693cce 100644
--- a/src/share/vm/opto/callnode.cpp
+++ b/src/share/vm/opto/callnode.cpp
@@ -975,6 +975,7 @@ SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
}
bool SafePointScalarObjectNode::pinned() const { return true; }
+bool SafePointScalarObjectNode::depends_only_on_test() const { return false; }
uint SafePointScalarObjectNode::ideal_reg() const {
return 0; // No matching to machine instruction
diff --git a/src/share/vm/opto/callnode.hpp b/src/share/vm/opto/callnode.hpp
index 06c783364..40a4abeae 100644
--- a/src/share/vm/opto/callnode.hpp
+++ b/src/share/vm/opto/callnode.hpp
@@ -437,6 +437,10 @@ public:
// of the SafePoint node for which it was generated.
virtual bool pinned() const; // { return true; }
+ // SafePointScalarObject depends on the SafePoint node
+ // for which it was generated.
+ virtual bool depends_only_on_test() const; // { return false; }
+
virtual uint size_of() const { return sizeof(*this); }
// Assumes that "this" is an argument to a safepoint node "s", and that
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index fdc4bb059..e48c15e79 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -1350,7 +1350,7 @@ static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *
}
// Register the new node but do not transform it. Cannot transform until the
- // entire Region/Phi conglerate has been hacked as a single huge transform.
+ // entire Region/Phi conglomerate has been hacked as a single huge transform.
igvn->register_new_node_with_optimizer( newn );
// Now I can point to the new node.
n->add_req(newn);
@@ -1381,7 +1381,7 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
Node *val = phi->in(i); // Constant to split for
uint hit = 0; // Number of times it occurs
- for( ; i < phi->req(); i++ ){ // Count occurances of constant
+ for( ; i < phi->req(); i++ ){ // Count occurrences of constant
Node *n = phi->in(i);
if( !n ) return NULL;
if( phase->type(n) == Type::TOP ) return NULL;
@@ -1423,7 +1423,7 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
//=============================================================================
//------------------------------simple_data_loop_check-------------------------
-// Try to determing if the phi node in a simple safe/unsafe data loop.
+// Try to determining if the phi node in a simple safe/unsafe data loop.
// Returns:
// enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
// Safe - safe case when the phi and it's inputs reference only safe data
@@ -1687,7 +1687,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
progress = phase->C->top();
break;
}
- // If tranformed to a MergeMem, get the desired slice
+ // If transformed to a MergeMem, get the desired slice
// Otherwise the returned node represents memory for every slice
Node *new_mem = (m->is_MergeMem()) ?
m->as_MergeMem()->memory_at(alias_idx) : m;
@@ -1962,7 +1962,7 @@ const Type *CatchNode::Value( PhaseTransform *phase ) const {
f[CatchProjNode::fall_through_index] = Type::TOP;
} else if( call->req() > TypeFunc::Parms ) {
const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
- // Check for null reciever to virtual or interface calls
+ // Check for null receiver to virtual or interface calls
if( call->is_CallDynamicJava() &&
arg0->higher_equal(TypePtr::NULL_PTR) ) {
f[CatchProjNode::fall_through_index] = Type::TOP;
@@ -1995,7 +1995,7 @@ Node *CatchProjNode::Identity( PhaseTransform *phase ) {
// also remove any exception table entry. Thus we must know the call
// feeding the Catch will not really throw an exception. This is ok for
// the main fall-thru control (happens when we know a call can never throw
- // an exception) or for "rethrow", because a further optimnization will
+ // an exception) or for "rethrow", because a further optimization will
// yank the rethrow (happens when we inline a function that can throw an
// exception and the caller has no handler). Not legal, e.g., for passing
// a NULL receiver to a v-call, or passing bad types to a slow-check-cast.
diff --git a/src/share/vm/opto/chaitin.cpp b/src/share/vm/opto/chaitin.cpp
index d551519c0..e890c4ae3 100644
--- a/src/share/vm/opto/chaitin.cpp
+++ b/src/share/vm/opto/chaitin.cpp
@@ -1246,7 +1246,7 @@ uint PhaseChaitin::Select( ) {
// If the live range is not bound, then we actually had some choices
// to make. In this case, the mask has more bits in it than the colors
- // choosen. Restrict the mask to just what was picked.
+ // chosen. Restrict the mask to just what was picked.
if( lrg->num_regs() == 1 ) { // Size 1 live range
lrg->Clear(); // Clear the mask
lrg->Insert(reg); // Set regmask to match selected reg
diff --git a/src/share/vm/opto/chaitin.hpp b/src/share/vm/opto/chaitin.hpp
index 32f3eb0c1..0de7dd41e 100644
--- a/src/share/vm/opto/chaitin.hpp
+++ b/src/share/vm/opto/chaitin.hpp
@@ -327,7 +327,7 @@ class PhaseChaitin : public PhaseRegAlloc {
// True if lidx is used before any real register is def'd in the block
bool prompt_use( Block *b, uint lidx );
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
- // Insert the spill at chosen location. Skip over any interveneing Proj's or
+ // Insert the spill at chosen location. Skip over any intervening Proj's or
// Phis. Skip over a CatchNode and projs, inserting in the fall-through block
// instead. Update high-pressure indices. Create a new live range.
void insert_proj( Block *b, uint i, Node *spill, uint maxlrg );
@@ -431,7 +431,7 @@ private:
void Simplify();
// Select colors by re-inserting edges into the IFG.
- // Return TRUE if any spills occured.
+ // Return TRUE if any spills occurred.
uint Select( );
// Helper function for select which allows biased coloring
OptoReg::Name choose_color( LRG &lrg, int chunk );
diff --git a/src/share/vm/opto/classes.hpp b/src/share/vm/opto/classes.hpp
index 94aaa6c7f..d527f5ea4 100644
--- a/src/share/vm/opto/classes.hpp
+++ b/src/share/vm/opto/classes.hpp
@@ -129,11 +129,13 @@ macro(JumpProj)
macro(LShiftI)
macro(LShiftL)
macro(LoadB)
+macro(LoadUB)
macro(LoadUS)
macro(LoadD)
macro(LoadD_unaligned)
macro(LoadF)
macro(LoadI)
+macro(LoadUI2L)
macro(LoadKlass)
macro(LoadNKlass)
macro(LoadL)
diff --git a/src/share/vm/opto/coalesce.cpp b/src/share/vm/opto/coalesce.cpp
index 7d9ab0083..52c009927 100644
--- a/src/share/vm/opto/coalesce.cpp
+++ b/src/share/vm/opto/coalesce.cpp
@@ -123,7 +123,7 @@ void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
}
//------------------------------clone_projs------------------------------------
-// After cloning some rematierialized instruction, clone any MachProj's that
+// After cloning some rematerialized instruction, clone any MachProj's that
// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants
// use G3 as an address temp.
int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
@@ -694,8 +694,8 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
} // End of if not infinite-stack neighbor
} // End of if actually inserted
} // End of if live range overlaps
- } // End of else collect intereferences for 1 node
- } // End of while forever, scan back for intereferences
+ } // End of else collect interferences for 1 node
+ } // End of while forever, scan back for interferences
return reg_degree;
}
@@ -786,7 +786,7 @@ bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block
if( rm_size == 0 ) return false;
// Another early bail-out test is when we are double-coalescing and the
- // 2 copies are seperated by some control flow.
+ // 2 copies are separated by some control flow.
if( dst_copy != src_copy ) {
Block *src_b = _phc._cfg._bbs[src_copy->_idx];
Block *b2 = b;
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index e7b215c38..d1b9332a9 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -337,7 +337,7 @@ void Compile::print_compile_messages() {
tty->print_cr("*********************************************************");
}
if (env()->break_at_compile()) {
- // Open the debugger when compiing this method.
+ // Open the debugger when compiling this method.
tty->print("### Breaking when compiling: ");
method()->print_short_name();
tty->cr();
@@ -1191,8 +1191,8 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
default: ShouldNotReachHere();
}
break;
- case 2: // No collasping at level 2; keep all splits
- case 3: // No collasping at level 3; keep all splits
+ case 2: // No collapsing at level 2; keep all splits
+ case 3: // No collapsing at level 3; keep all splits
break;
default:
Unimplemented();
@@ -2005,8 +2005,10 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
case Op_StoreP:
case Op_StoreN:
case Op_LoadB:
+ case Op_LoadUB:
case Op_LoadUS:
case Op_LoadI:
+ case Op_LoadUI2L:
case Op_LoadKlass:
case Op_LoadNKlass:
case Op_LoadL:
@@ -2102,7 +2104,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
// [base_reg + offset]
// NullCheck base_reg
//
- // Pin the new DecodeN node to non-null path on these patforms (Sparc)
+ // Pin the new DecodeN node to non-null path on these platform (Sparc)
// to keep the information to which NULL check the new DecodeN node
// corresponds to use it as value in implicit_null_check().
//
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index 7e1cafefa..d46b6d4e9 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -71,7 +71,7 @@ testing.
to figure out which test post-dominates. The real problem is that it doesn't
matter which one you pick. After you pick up, the dominating-test elider in
IGVN can remove the test and allow you to hoist up to the dominating test on
-the choosen oop bypassing the test on the not-choosen oop. Seen in testing.
+the chosen oop bypassing the test on the not-chosen oop. Seen in testing.
Oops.
(3) Leave the CastPP's in. This makes the graph more accurate in some sense;
diff --git a/src/share/vm/opto/divnode.cpp b/src/share/vm/opto/divnode.cpp
index 67cad0679..55350e11f 100644
--- a/src/share/vm/opto/divnode.cpp
+++ b/src/share/vm/opto/divnode.cpp
@@ -35,7 +35,7 @@
// by constant into a multiply/shift/add series. Return false if calculations
// fail.
//
-// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with
+// Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with
// minor type name and parameter changes.
static bool magic_int_divide_constants(jint d, jint &M, jint &s) {
int32_t p;
@@ -202,7 +202,7 @@ static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor
// by constant into a multiply/shift/add series. Return false if calculations
// fail.
//
-// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with
+// Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with
// minor type name and parameter changes. Adjusted to 64 bit word width.
static bool magic_long_divide_constants(jlong d, jlong &M, jint &s) {
int64_t p;
@@ -1069,7 +1069,7 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int log2_con = -1;
- // If this is a power of two, they maybe we can mask it
+ // If this is a power of two, then maybe we can mask it
if( is_power_of_2_long(pos_con) ) {
log2_con = log2_long(pos_con);
diff --git a/src/share/vm/opto/domgraph.cpp b/src/share/vm/opto/domgraph.cpp
index 2ef02fd0c..af198e3c7 100644
--- a/src/share/vm/opto/domgraph.cpp
+++ b/src/share/vm/opto/domgraph.cpp
@@ -183,7 +183,7 @@ class Block_Stack {
if (pre_order == 1)
t->_parent = NULL; // first block doesn't have parent
else {
- // Save parent (currernt top block on stack) in DFS
+ // Save parent (current top block on stack) in DFS
t->_parent = &_tarjan[_stack_top->block->_pre_order];
}
// Now put this block on stack
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index bccfb9c64..c9ddc95dc 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -515,7 +515,7 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
// cause the failure in add_offset() with narrow oops since TypeOopPtr()
// constructor verifies correctness of the offset.
//
- // It could happend on subclass's branch (from the type profiling
+ // It could happened on subclass's branch (from the type profiling
// inlining) which was not eliminated during parsing since the exactness
// of the allocation type was not propagated to the subclass type check.
//
@@ -703,7 +703,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
while (prev != result) {
prev = result;
if (result == start_mem)
- break; // hit one of our sentinals
+ break; // hit one of our sentinels
if (result->is_Mem()) {
const Type *at = phase->type(result->in(MemNode::Address));
if (at != Type::TOP) {
@@ -720,7 +720,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0);
if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) {
- break; // hit one of our sentinals
+ break; // hit one of our sentinels
} else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call();
if (!call->may_modify(tinst, phase)) {
@@ -756,6 +756,16 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
} else {
break;
}
+ } else if (result->Opcode() == Op_SCMemProj) {
+ assert(result->in(0)->is_LoadStore(), "sanity");
+ const Type *at = phase->type(result->in(0)->in(MemNode::Address));
+ if (at != Type::TOP) {
+ assert (at->isa_ptr() != NULL, "pointer type required.");
+ int idx = C->get_alias_index(at->is_ptr());
+ assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
+ break;
+ }
+ result = result->in(0)->in(MemNode::Memory);
}
}
if (result->is_Phi()) {
@@ -794,7 +804,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
// search the Memory chain for a store with the appropriate type
// address type. If a Phi is found, create a new version with
-// the approriate memory slices from each of the Phi inputs.
+// the appropriate memory slices from each of the Phi inputs.
// For stores, process the users as follows:
// MemNode: push on memnode_worklist
// MergeMem: push on mergemem_worklist
@@ -1548,7 +1558,7 @@ bool ConnectionGraph::compute_escape() {
has_non_escaping_obj = true; // Non GlobalEscape
Node* n = ptn->_node;
if (n->is_Allocate() && ptn->_scalar_replaceable ) {
- // Push scalar replaceable alocations on alloc_worklist
+ // Push scalar replaceable allocations on alloc_worklist
// for processing in split_unique_types().
alloc_worklist.append(n);
}
diff --git a/src/share/vm/opto/gcm.cpp b/src/share/vm/opto/gcm.cpp
index c68fc0863..85263fcb3 100644
--- a/src/share/vm/opto/gcm.cpp
+++ b/src/share/vm/opto/gcm.cpp
@@ -57,6 +57,37 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
}
}
+//----------------------------replace_block_proj_ctrl-------------------------
+// Nodes that have is_block_proj() nodes as their control need to use
+// the appropriate Region for their actual block as their control since
+// the projection will be in a predecessor block.
+void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
+ const Node *in0 = n->in(0);
+ assert(in0 != NULL, "Only control-dependent");
+ const Node *p = in0->is_block_proj();
+ if (p != NULL && p != n) { // Control from a block projection?
+ assert(!n->pinned() || n->is_SafePointScalarObject(), "only SafePointScalarObject pinned node is expected here");
+ // Find trailing Region
+ Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
+ uint j = 0;
+ if (pb->_num_succs != 1) { // More then 1 successor?
+ // Search for successor
+ uint max = pb->_nodes.size();
+ assert( max > 1, "" );
+ uint start = max - pb->_num_succs;
+ // Find which output path belongs to projection
+ for (j = start; j < max; j++) {
+ if( pb->_nodes[j] == in0 )
+ break;
+ }
+ assert( j < max, "must find" );
+ // Change control to match head of successor basic block
+ j -= start;
+ }
+ n->set_req(0, pb->_succs[j]->head());
+ }
+}
+
//------------------------------schedule_pinned_nodes--------------------------
// Set the basic block for Nodes pinned into blocks
@@ -68,8 +99,10 @@ void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
Node *n = spstack.pop();
if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down!
+ assert( n->in(0), "pinned Node must have Control" );
+ // Before setting block replace block_proj control edge
+ replace_block_proj_ctrl(n);
Node *input = n->in(0);
- assert( input, "pinned Node must have Control" );
while( !input->is_block_start() )
input = input->in(0);
Block *b = _bbs[input->_idx]; // Basic block of controlling input
@@ -158,34 +191,12 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
uint i = nstack_top_i;
if (i == 0) {
- // Special control input processing.
- // While I am here, go ahead and look for Nodes which are taking control
- // from a is_block_proj Node. After I inserted RegionNodes to make proper
- // blocks, the control at a is_block_proj more properly comes from the
- // Region being controlled by the block_proj Node.
+ // Fixup some control. Constants without control get attached
+ // to root and nodes that use is_block_proj() nodes should be attached
+ // to the region that starts their block.
const Node *in0 = n->in(0);
if (in0 != NULL) { // Control-dependent?
- const Node *p = in0->is_block_proj();
- if (p != NULL && p != n) { // Control from a block projection?
- // Find trailing Region
- Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
- uint j = 0;
- if (pb->_num_succs != 1) { // More then 1 successor?
- // Search for successor
- uint max = pb->_nodes.size();
- assert( max > 1, "" );
- uint start = max - pb->_num_succs;
- // Find which output path belongs to projection
- for (j = start; j < max; j++) {
- if( pb->_nodes[j] == in0 )
- break;
- }
- assert( j < max, "must find" );
- // Change control to match head of successor basic block
- j -= start;
- }
- n->set_req(0, pb->_succs[j]->head());
- }
+ replace_block_proj_ctrl(n);
} else { // n->in(0) == NULL
if (n->req() == 1) { // This guy is a constant with NO inputs?
n->set_req(0, _root);
@@ -226,6 +237,8 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
if (!n->pinned()) {
// Set earliest legal block.
_bbs.map(n->_idx, find_deepest_input(n, _bbs));
+ } else {
+ assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
}
if (nstack.is_empty()) {
@@ -593,7 +606,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
if (pred_block != early) {
// If any predecessor of the Phi matches the load's "early block",
// we do not need a precedence edge between the Phi and 'load'
- // since the load will be forced into a block preceeding the Phi.
+ // since the load will be forced into a block preceding the Phi.
pred_block->set_raise_LCA_mark(load_index);
assert(!LCA_orig->dominates(pred_block) ||
early->dominates(pred_block), "early is high enough");
@@ -1386,7 +1399,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
#ifdef ASSERT
for (uint i = 0; i < _num_blocks; i++ ) {
Block *b = _blocks[i];
- assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requiers meaningful block frequency");
+ assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
}
#endif
@@ -1639,7 +1652,7 @@ float Block::succ_prob(uint i) {
// successor blocks.
assert(_num_succs == 2, "expecting 2 successors of a null check");
// If either successor has only one predecessor, then the
- // probabiltity estimate can be derived using the
+ // probability estimate can be derived using the
// relative frequency of the successor and this block.
if (_succs[i]->num_preds() == 2) {
return _succs[i]->_freq / _freq;
@@ -1841,7 +1854,7 @@ void Block::update_uncommon_branch(Block* ub) {
}
//------------------------------update_succ_freq-------------------------------
-// Update the appropriate frequency associated with block 'b', a succesor of
+// Update the appropriate frequency associated with block 'b', a successor of
// a block in this loop.
void CFGLoop::update_succ_freq(Block* b, float freq) {
if (b->_loop == this) {
@@ -1888,7 +1901,8 @@ void CFGLoop::scale_freq() {
for (int i = 0; i < _members.length(); i++) {
CFGElement* s = _members.at(i);
float block_freq = s->_freq * loop_freq;
- if (block_freq < MIN_BLOCK_FREQUENCY) block_freq = MIN_BLOCK_FREQUENCY;
+ if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
+ block_freq = MIN_BLOCK_FREQUENCY;
s->_freq = block_freq;
}
CFGLoop* ch = _child;
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index 1fc77ca3f..9313ae4f8 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -1148,7 +1148,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
Node *tst = _gvn.transform( btst );
//-----------
- // if peephole optimizations occured, a prior test existed.
+ // if peephole optimizations occurred, a prior test existed.
// If a prior test existed, maybe it dominates as we can avoid this test.
if (tst != btst && type == T_OBJECT) {
// At this point we want to scan up the CFG to see if we can
@@ -1196,7 +1196,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Consider using 'Reason_class_check' instead?
// To cause an implicit null check, we set the not-null probability
- // to the maximum (PROB_MAX). For an explicit check the probablity
+ // to the maximum (PROB_MAX). For an explicit check the probability
// is set to a smaller value.
if (null_control != NULL || too_many_traps(reason)) {
// probability is less likely
diff --git a/src/share/vm/opto/ifg.cpp b/src/share/vm/opto/ifg.cpp
index 892a11483..9d260cbec 100644
--- a/src/share/vm/opto/ifg.cpp
+++ b/src/share/vm/opto/ifg.cpp
@@ -292,7 +292,7 @@ void PhaseIFG::verify( const PhaseChaitin *pc ) const {
//------------------------------interfere_with_live----------------------------
// Interfere this register with everything currently live. Use the RegMasks
// to trim the set of possible interferences. Return a count of register-only
-// inteferences as an estimate of register pressure.
+// interferences as an estimate of register pressure.
void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
uint retval = 0;
// Interfere with everything live.
diff --git a/src/share/vm/opto/ifnode.cpp b/src/share/vm/opto/ifnode.cpp
index 4f230765a..38fab34a5 100644
--- a/src/share/vm/opto/ifnode.cpp
+++ b/src/share/vm/opto/ifnode.cpp
@@ -81,7 +81,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
uint i4;
for( i4 = 1; i4 < phi->req(); i4++ ) {
con1 = phi->in(i4);
- if( !con1 ) return NULL; // Do not optimize partially collaped merges
+ if( !con1 ) return NULL; // Do not optimize partially collapsed merges
if( con1->is_Con() ) break; // Found a constant
// Also allow null-vs-not-null checks
const TypePtr *tp = igvn->type(con1)->isa_ptr();
@@ -204,7 +204,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// T F T F T F
// ..s.. ..t .. ..s.. ..t.. ..s.. ..t..
//
- // Split the paths coming into the merge point into 2 seperate groups of
+ // Split the paths coming into the merge point into 2 separate groups of
// merges. On the left will be all the paths feeding constants into the
// Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi
// will fold up into a constant; this will let the Cmp fold up as well as
@@ -236,7 +236,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
}
// Register the new RegionNodes but do not transform them. Cannot
- // transform until the entire Region/Phi conglerate has been hacked
+ // transform until the entire Region/Phi conglomerate has been hacked
// as a single huge transform.
igvn->register_new_node_with_optimizer( region_c );
igvn->register_new_node_with_optimizer( region_x );
@@ -599,7 +599,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj
//------------------------------fold_compares----------------------------
// See if a pair of CmpIs can be converted into a CmpU. In some cases
-// the direction of this if is determined by the preciding if so it
+// the direction of this if is determined by the preceding if so it
// can be eliminate entirely. Given an if testing (CmpI n c) check
// for an immediately control dependent if that is testing (CmpI n c2)
// and has one projection leading to this if and the other projection
@@ -811,7 +811,7 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try to remove extra range checks. All 'up_one_dom' gives up at merges
// so all checks we inspect post-dominate the top-most check we find.
// If we are going to fail the current check and we reach the top check
- // then we are guarenteed to fail, so just start interpreting there.
+ // then we are guaranteed to fail, so just start interpreting there.
// We 'expand' the top 2 range checks to include all post-dominating
// checks.
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index 17a5c1f79..6cbcee84b 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -992,7 +992,7 @@ bool LibraryCallKit::inline_string_indexOf() {
Node *argument = pop(); // pop non-receiver first: it was pushed second
Node *receiver = pop();
- // don't intrinsify is argument isn't a constant string.
+ // don't intrinsify if argument isn't a constant string.
if (!argument->is_Con()) {
return false;
}
@@ -1267,7 +1267,7 @@ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
// result = DPow(x,y);
// }
// if (result != result)? {
- // ucommon_trap();
+ // uncommon_trap();
// }
// return result;
@@ -1324,7 +1324,7 @@ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
// Check if (y isn't int) then go to slow path
Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
- // Branch eith way
+ // Branch either way
IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
Node *slow_path = opt_iff(r,if2); // Set region path 2
@@ -1715,8 +1715,8 @@ inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
}
//----------------------------inline_reverseBytes_int/long-------------------
-// inline Int.reverseBytes(int)
-// inline Long.reverseByes(long)
+// inline Integer.reverseBytes(int)
+// inline Long.reverseBytes(long)
bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
@@ -1915,7 +1915,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// addition to memory membars when is_volatile. This is a little
// too strong, but avoids the need to insert per-alias-type
// volatile membars (for stores; compare Parse::do_put_xxx), which
- // we cannot do effctively here because we probably only have a
+ // we cannot do effectively here because we probably only have a
// rough approximation of type.
need_mem_bar = true;
// For Stores, place a memory ordering barrier now.
@@ -2099,7 +2099,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
// overly confusing. (This is a true fact! I originally combined
// them, but even I was confused by it!) As much code/comments as
// possible are retained from inline_unsafe_access though to make
- // the correspondances clearer. - dl
+ // the correspondences clearer. - dl
if (callee()->is_static()) return false; // caller must have the capability!
@@ -2166,7 +2166,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
int alias_idx = C->get_alias_index(adr_type);
// Memory-model-wise, a CAS acts like a little synchronized block,
- // so needs barriers on each side. These don't't translate into
+ // so needs barriers on each side. These don't translate into
// actual barriers on most machines, but we still need rest of
// compiler to respect ordering.
@@ -3208,7 +3208,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) );
// This hack lets the hash bits live anywhere in the mark object now, as long
- // as the shift drops the relevent bits into the low 32 bits. Note that
+ // as the shift drops the relevant bits into the low 32 bits. Note that
// Java spec says that HashCode is an int so there's no point in capturing
// an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
hshifted_header = ConvX2I(hshifted_header);
@@ -3255,7 +3255,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
}
//---------------------------inline_native_getClass----------------------------
-// Build special case code for calls to hashCode on an object.
+// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
Node* obj = null_check_receiver(callee());
if (stopped()) return true;
@@ -4594,7 +4594,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
}
// The memory edges above are precise in order to model effects around
- // array copyies accurately to allow value numbering of field loads around
+ // array copies accurately to allow value numbering of field loads around
// arraycopy. Such field loads, both before and after, are common in Java
// collections and similar classes involving header/array data structures.
//
diff --git a/src/share/vm/opto/live.cpp b/src/share/vm/opto/live.cpp
index 394d1314f..6f034faa8 100644
--- a/src/share/vm/opto/live.cpp
+++ b/src/share/vm/opto/live.cpp
@@ -39,7 +39,7 @@
// Leftover bits become the new live-in for the predecessor block, and the pred
// block is put on the worklist.
// The locally live-in stuff is computed once and added to predecessor
-// live-out sets. This seperate compilation is done in the outer loop below.
+// live-out sets. This separate compilation is done in the outer loop below.
PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
}
diff --git a/src/share/vm/opto/locknode.cpp b/src/share/vm/opto/locknode.cpp
index 0099284a7..f6a012221 100644
--- a/src/share/vm/opto/locknode.cpp
+++ b/src/share/vm/opto/locknode.cpp
@@ -121,7 +121,7 @@ void Parse::do_monitor_exit() {
kill_dead_locals();
pop(); // Pop oop to unlock
- // Because monitors are guarenteed paired (else we bail out), we know
+ // Because monitors are guaranteed paired (else we bail out), we know
// the matching Lock for this Unlock. Hence we know there is no need
// for a null check on Unlock.
shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
diff --git a/src/share/vm/opto/loopTransform.cpp b/src/share/vm/opto/loopTransform.cpp
index caa676f6f..4ff59d9d4 100644
--- a/src/share/vm/opto/loopTransform.cpp
+++ b/src/share/vm/opto/loopTransform.cpp
@@ -119,7 +119,7 @@ void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
//---------------------is_invariant_addition-----------------------------
// Return nonzero index of invariant operand for an Add or Sub
-// of (nonconstant) invariant and variant values. Helper for reassoicate_invariants.
+// of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
int op = n->Opcode();
if (op == Op_AddI || op == Op_SubI) {
@@ -520,7 +520,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
//------------------------------policy_align-----------------------------------
// Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
// expression that does the alignment. Note that only one array base can be
-// aligned in a loop (unless the VM guarentees mutual alignment). Note that
+// aligned in a loop (unless the VM guarantees mutual alignment). Note that
// if we vectorize short memory ops into longer memory ops, we may want to
// increase alignment.
bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
diff --git a/src/share/vm/opto/loopUnswitch.cpp b/src/share/vm/opto/loopUnswitch.cpp
index 2a385e768..4bb67ad0f 100644
--- a/src/share/vm/opto/loopUnswitch.cpp
+++ b/src/share/vm/opto/loopUnswitch.cpp
@@ -131,7 +131,7 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj();
- // Hoist invariant casts out of each loop to the appropiate
+ // Hoist invariant casts out of each loop to the appropriate
// control projection.
Node_List worklist;
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index b07d25cfc..bb372e0d3 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -274,7 +274,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
//
// Canonicalize the condition on the test. If we can exactly determine
// the trip-counter exit value, then set limit to that value and use
- // a '!=' test. Otherwise use conditon '<' for count-up loops and
+ // a '!=' test. Otherwise use condition '<' for count-up loops and
// '>' for count-down loops. If the condition is inverted and we will
// be rolling through MININT to MAXINT, then bail out.
@@ -290,7 +290,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// If compare points to incr, we are ok. Otherwise the compare
// can directly point to the phi; in this case adjust the compare so that
- // it points to the incr by adusting the limit.
+ // it points to the incr by adjusting the limit.
if( cmp->in(1) == phi || cmp->in(2) == phi )
limit = gvn->transform(new (C, 3) AddINode(limit,stride));
@@ -471,7 +471,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
lazy_replace( x, l );
set_idom(l, init_control, dom_depth(x));
- // Check for immediately preceeding SafePoint and remove
+ // Check for immediately preceding SafePoint and remove
Node *sfpt2 = le->in(0);
if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
@@ -1506,7 +1506,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// Build Dominators for elision of NULL checks & loop finding.
// Since nodes do not have a slot for immediate dominator, make
- // a persistant side array for that info indexed on node->_idx.
+ // a persistent side array for that info indexed on node->_idx.
_idom_size = C->unique();
_idom = NEW_RESOURCE_ARRAY( Node*, _idom_size );
_dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size );
@@ -1529,7 +1529,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// Given dominators, try to find inner loops with calls that must
// always be executed (call dominates loop tail). These loops do
- // not need a seperate safepoint.
+ // not need a separate safepoint.
Node_List cisstack(a);
_ltree_root->check_safepts(visited, cisstack);
@@ -2332,7 +2332,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist,
if (done) {
// All of n's inputs have been processed, complete post-processing.
- // Compute earilest point this Node can go.
+ // Compute earliest point this Node can go.
// CFG, Phi, pinned nodes already know their controlling input.
if (!has_node(n)) {
// Record earliest legal location
@@ -2672,9 +2672,9 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
pinned = false;
}
if( pinned ) {
- IdealLoopTree *choosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
- if( !choosen_loop->_child ) // Inner loop?
- choosen_loop->_body.push(n); // Collect inner loops
+ IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
+ if( !chosen_loop->_child ) // Inner loop?
+ chosen_loop->_body.push(n); // Collect inner loops
return;
}
} else { // No slot zero
@@ -2746,9 +2746,9 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
set_ctrl(n, least);
// Collect inner loop bodies
- IdealLoopTree *choosen_loop = get_loop(least);
- if( !choosen_loop->_child ) // Inner loop?
- choosen_loop->_body.push(n);// Collect inner loops
+ IdealLoopTree *chosen_loop = get_loop(least);
+ if( !chosen_loop->_child ) // Inner loop?
+ chosen_loop->_body.push(n);// Collect inner loops
}
#ifndef PRODUCT
diff --git a/src/share/vm/opto/loopnode.hpp b/src/share/vm/opto/loopnode.hpp
index 537756466..ab172a3eb 100644
--- a/src/share/vm/opto/loopnode.hpp
+++ b/src/share/vm/opto/loopnode.hpp
@@ -390,7 +390,7 @@ public:
// Return TRUE or FALSE if the loop should be cache-line aligned.
// Gather the expression that does the alignment. Note that only
- // one array base can be aligned in a loop (unless the VM guarentees
+ // one array base can be aligned in a loop (unless the VM guarantees
// mutual alignment). Note that if we vectorize short memory ops
// into longer memory ops, we may want to increase alignment.
bool policy_align( PhaseIdealLoop *phase ) const;
@@ -403,7 +403,7 @@ public:
// Reassociate invariant add and subtract expressions.
Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase);
// Return nonzero index of invariant operand if invariant and variant
- // are combined with an Add or Sub. Helper for reassoicate_invariants.
+ // are combined with an Add or Sub. Helper for reassociate_invariants.
int is_invariant_addition(Node* n, PhaseIdealLoop *phase);
// Return true if n is invariant
diff --git a/src/share/vm/opto/loopopts.cpp b/src/share/vm/opto/loopopts.cpp
index 41048cbcb..454c207fd 100644
--- a/src/share/vm/opto/loopopts.cpp
+++ b/src/share/vm/opto/loopopts.cpp
@@ -97,7 +97,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// (Note: This tweaking with igvn only works because x is a new node.)
_igvn.set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node
- // othewise it will be not updated during igvn->transform since
+ // otherwise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t);
Node *y = x->Identity(&_igvn);
@@ -879,7 +879,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
Node *x_ctrl = NULL;
if( u->is_Phi() ) {
// Replace all uses of normal nodes. Replace Phi uses
- // individually, so the seperate Nodes can sink down
+ // individually, so the separate Nodes can sink down
// different paths.
uint k = 1;
while( u->in(k) != n ) k++;
diff --git a/src/share/vm/opto/machnode.cpp b/src/share/vm/opto/machnode.cpp
index eadd0da5f..adb7ecb98 100644
--- a/src/share/vm/opto/machnode.cpp
+++ b/src/share/vm/opto/machnode.cpp
@@ -136,7 +136,7 @@ void MachNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Size of instruction in bytes
uint MachNode::size(PhaseRegAlloc *ra_) const {
// If a virtual was not defined for this specific instruction,
- // Call the helper which finds the size by emiting the bits.
+ // Call the helper which finds the size by emitting the bits.
return MachNode::emit_size(ra_);
}
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index 38ed4d593..239bc05af 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -64,6 +64,7 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
uint old_unique = C->unique();
Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
if (old_unique != C->unique()) {
+ new_in->set_req(0, newcall->in(0)); // reset control edge
new_in = transform_later(new_in); // Register new node.
}
old_in = new_in;
@@ -215,7 +216,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
while (true) {
if (mem == alloc_mem || mem == start_mem ) {
- return mem; // hit one of our sentinals
+ return mem; // hit one of our sentinels
} else if (mem->is_MergeMem()) {
mem = mem->as_MergeMem()->memory_at(alias_idx);
} else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
@@ -250,6 +251,15 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
}
mem = mem->in(MemNode::Memory);
+ } else if (mem->Opcode() == Op_SCMemProj) {
+ assert(mem->in(0)->is_LoadStore(), "sanity");
+ const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
+ int adr_idx = Compile::current()->get_alias_index(atype);
+ if (adr_idx == alias_idx) {
+ assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
+ return NULL;
+ }
+ mem = mem->in(0)->in(MemNode::Memory);
} else {
return mem;
}
@@ -329,8 +339,15 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
return NULL;
}
values.at_put(j, val);
+ } else if (val->Opcode() == Op_SCMemProj) {
+ assert(val->in(0)->is_LoadStore(), "sanity");
+ assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
+ return NULL;
} else {
+#ifdef ASSERT
+ val->dump();
assert(false, "unknown node on this path");
+#endif
return NULL; // unknown node on this path
}
}
@@ -1651,7 +1668,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
if (UseOptoBiasInlining) {
/*
- * See the full descrition in MacroAssembler::biased_locking_enter().
+ * See the full description in MacroAssembler::biased_locking_enter().
*
* if( (mark_word & biased_lock_mask) == biased_lock_pattern ) {
* // The object is biased.
@@ -1887,7 +1904,7 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
if (UseOptoBiasInlining) {
// Check for biased locking unlock case, which is a no-op.
- // See the full descrition in MacroAssembler::biased_locking_exit().
+ // See the full description in MacroAssembler::biased_locking_exit().
region = new (C, 4) RegionNode(4);
// create a Phi for the memory state
mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index 8df615efc..613153556 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -897,7 +897,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
#ifdef ASSERT
_new2old_map.map(m->_idx, n);
#endif
- mstack.push(m, Post_Visit, n, i); // Don't neet to visit
+ mstack.push(m, Post_Visit, n, i); // Don't need to visit
mstack.push(m->in(0), Visit, m, 0);
} else {
mstack.push(m, Visit, n, i);
@@ -1267,7 +1267,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
}
}
- // Not forceably cloning. If shared, put it into a register.
+ // Not forceable cloning. If shared, put it into a register.
return shared;
}
@@ -1542,7 +1542,7 @@ void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *m
// This is what my child will give me.
int opnd_class_instance = s->_rule[op];
// Choose between operand class or not.
- // This is what I will recieve.
+ // This is what I will receive.
int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
// New rule for child. Chase operand classes to get the actual rule.
int newrule = s->_rule[catch_op];
@@ -1707,11 +1707,18 @@ OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
void Matcher::find_shared( Node *n ) {
// Allocate stack of size C->unique() * 2 to avoid frequent realloc
MStack mstack(C->unique() * 2);
+ // Mark nodes as address_visited if they are inputs to an address expression
+ VectorSet address_visited(Thread::current()->resource_area());
mstack.push(n, Visit); // Don't need to pre-visit root node
while (mstack.is_nonempty()) {
n = mstack.node(); // Leave node on stack
Node_State nstate = mstack.state();
+ uint nop = n->Opcode();
if (nstate == Pre_Visit) {
+ if (address_visited.test(n->_idx)) { // Visited in address already?
+ // Flag as visited and shared now.
+ set_visited(n);
+ }
if (is_visited(n)) { // Visited already?
// Node is shared and has no reason to clone. Flag it as shared.
// This causes it to match into a register for the sharing.
@@ -1726,7 +1733,7 @@ void Matcher::find_shared( Node *n ) {
set_visited(n); // Flag as visited now
bool mem_op = false;
- switch( n->Opcode() ) { // Handle some opcodes special
+ switch( nop ) { // Handle some opcodes special
case Op_Phi: // Treat Phis as shared roots
case Op_Parm:
case Op_Proj: // All handled specially during matching
@@ -1887,34 +1894,51 @@ void Matcher::find_shared( Node *n ) {
// to have a single use so force sharing here.
set_shared(m->in(AddPNode::Base)->in(1));
}
+
+ // Some inputs for address expression are not put on stack
+ // to avoid marking them as shared and forcing them into register
+ // if they are used only in address expressions.
+ // But they should be marked as shared if there are other uses
+ // besides address expressions.
+
Node *off = m->in(AddPNode::Offset);
- if( off->is_Con() ) {
- set_visited(m); // Flag as visited now
+ if( off->is_Con() &&
+ // When there are other uses besides address expressions
+ // put it on stack and mark as shared.
+ !is_visited(m) ) {
+ address_visited.test_set(m->_idx); // Flag as address_visited
Node *adr = m->in(AddPNode::Address);
// Intel, ARM and friends can handle 2 adds in addressing mode
if( clone_shift_expressions && adr->is_AddP() &&
// AtomicAdd is not an addressing expression.
// Cheap to find it by looking for screwy base.
- !adr->in(AddPNode::Base)->is_top() ) {
- set_visited(adr); // Flag as visited now
+ !adr->in(AddPNode::Base)->is_top() &&
+ // Are there other uses besides address expressions?
+ !is_visited(adr) ) {
+ address_visited.set(adr->_idx); // Flag as address_visited
Node *shift = adr->in(AddPNode::Offset);
// Check for shift by small constant as well
if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
- shift->in(2)->get_int() <= 3 ) {
- set_visited(shift); // Flag as visited now
+ shift->in(2)->get_int() <= 3 &&
+ // Are there other uses besides address expressions?
+ !is_visited(shift) ) {
+ address_visited.set(shift->_idx); // Flag as address_visited
mstack.push(shift->in(2), Visit);
+ Node *conv = shift->in(1);
#ifdef _LP64
// Allow Matcher to match the rule which bypass
// ConvI2L operation for an array index on LP64
// if the index value is positive.
- if( shift->in(1)->Opcode() == Op_ConvI2L &&
- shift->in(1)->as_Type()->type()->is_long()->_lo >= 0 ) {
- set_visited(shift->in(1)); // Flag as visited now
- mstack.push(shift->in(1)->in(1), Pre_Visit);
+ if( conv->Opcode() == Op_ConvI2L &&
+ conv->as_Type()->type()->is_long()->_lo >= 0 &&
+ // Are there other uses besides address expressions?
+ !is_visited(conv) ) {
+ address_visited.set(conv->_idx); // Flag as address_visited
+ mstack.push(conv->in(1), Pre_Visit);
} else
#endif
- mstack.push(shift->in(1), Pre_Visit);
+ mstack.push(conv, Pre_Visit);
} else {
mstack.push(shift, Pre_Visit);
}
@@ -1942,7 +1966,7 @@ void Matcher::find_shared( Node *n ) {
// BoolNode::match_edge always returns a zero.
// We reorder the Op_If in a pre-order manner, so we can visit without
- // accidently sharing the Cmp (the Bool and the If make 2 users).
+ // accidentally sharing the Cmp (the Bool and the If make 2 users).
n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
}
else if (nstate == Post_Visit) {
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index 7038a731c..570e813e2 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -100,12 +100,12 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
while (prev != result) {
prev = result;
if (result == start_mem)
- break; // hit one of our sentinals
+ break; // hit one of our sentinels
// skip over a call which does not affect this memory slice
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
Node *proj_in = result->in(0);
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
- break; // hit one of our sentinals
+ break; // hit one of our sentinels
} else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call();
if (!call->may_modify(t_adr, phase)) {
@@ -198,7 +198,7 @@ static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const T
// If not, we can update the input infinitely along a MergeMem cycle
// Equivalent code in PhiNode::Ideal
Node* m = phase->transform(mmem);
- // If tranformed to a MergeMem, get the desired slice
+ // If transformed to a MergeMem, get the desired slice
// Otherwise the returned node represents memory for every slice
mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
// Update input if it is progress over what we have now
@@ -778,7 +778,7 @@ Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const Type
adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
"use LoadRangeNode instead");
switch (bt) {
- case T_BOOLEAN:
+ case T_BOOLEAN: return new (C, 3) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() );
case T_BYTE: return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() );
case T_INT: return new (C, 3) LoadINode (ctl, mem, adr, adr_type, rt->is_int() );
case T_CHAR: return new (C, 3) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() );
@@ -970,7 +970,7 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
}
// Search for an existing data phi which was generated before for the same
- // instance's field to avoid infinite genertion of phis in a loop.
+ // instance's field to avoid infinite generation of phis in a loop.
Node *region = mem->in(0);
if (is_instance_field_load_with_local_phi(region)) {
const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr();
@@ -1066,11 +1066,11 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
break;
}
}
- LoadNode* load = NULL;
- if (allocation != NULL && base->in(load_index)->is_Load()) {
- load = base->in(load_index)->as_Load();
- }
- if (load != NULL && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) {
+ bool has_load = ( allocation != NULL &&
+ (base->in(load_index)->is_Load() ||
+ base->in(load_index)->is_DecodeN() &&
+ base->in(load_index)->in(1)->is_Load()) );
+ if (has_load && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) {
// Push the loads from the phi that comes from valueOf up
// through it to allow elimination of the loads and the recovery
// of the original value.
@@ -1106,11 +1106,20 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
result->set_req(load_index, in2);
return result;
}
- } else if (base->is_Load()) {
+ } else if (base->is_Load() ||
+ base->is_DecodeN() && base->in(1)->is_Load()) {
+ if (base->is_DecodeN()) {
+ // Get LoadN node which loads cached Integer object
+ base = base->in(1);
+ }
// Eliminate the load of Integer.value for integers from the cache
// array by deriving the value from the index into the array.
// Capture the offset of the load and then reverse the computation.
Node* load_base = base->in(Address)->in(AddPNode::Base);
+ if (load_base->is_DecodeN()) {
+ // Get LoadN node which loads IntegerCache.cache field
+ load_base = load_base->in(1);
+ }
if (load_base != NULL) {
Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type());
intptr_t cache_offset;
@@ -1245,7 +1254,7 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// (This tweaking with igvn only works because x is a new node.)
igvn->set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node
- // othewise it will be not updated during igvn->transform since
+ // otherwise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t);
Node *y = x->Identity(igvn);
@@ -1607,6 +1616,22 @@ Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return LoadNode::Ideal(phase, can_reshape);
}
+//--------------------------LoadUBNode::Ideal-------------------------------------
+//
+// If the previous store is to the same address as this load,
+// and the value stored was larger than a byte, replace this load
+// with the value stored truncated to a byte. If no truncation is
+// needed, the replacement is done in LoadNode::Identity().
+//
+Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+ Node* mem = in(MemNode::Memory);
+ Node* value = can_see_stored_value(mem, phase);
+ if (value && !phase->type(value)->higher_equal(_type))
+ return new (phase->C, 3) AndINode(value, phase->intcon(0xFF));
+ // Identity call will handle the case where truncation is not needed.
+ return LoadNode::Ideal(phase, can_reshape);
+}
+
//--------------------------LoadUSNode::Ideal-------------------------------------
//
// If the previous store is to the same address as this load,
@@ -2582,7 +2607,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
// capturing of nearby memory operations.
//
// During macro-expansion, all captured initializations which store
-// constant values of 32 bits or smaller are coalesced (if advantagous)
+// constant values of 32 bits or smaller are coalesced (if advantageous)
// into larger 'tiles' 32 or 64 bits. This allows an object to be
// initialized in fewer memory operations. Memory words which are
// covered by neither tiles nor non-constant stores are pre-zeroed
@@ -3669,7 +3694,7 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
else if (old_mmem != NULL) {
new_mem = old_mmem->memory_at(i);
}
- // else preceeding memory was not a MergeMem
+ // else preceding memory was not a MergeMem
// replace equivalent phis (unfortunately, they do not GVN together)
if (new_mem != NULL && new_mem != new_base &&
diff --git a/src/share/vm/opto/memnode.hpp b/src/share/vm/opto/memnode.hpp
index d64067261..e318f3079 100644
--- a/src/share/vm/opto/memnode.hpp
+++ b/src/share/vm/opto/memnode.hpp
@@ -207,6 +207,19 @@ public:
virtual BasicType memory_type() const { return T_BYTE; }
};
+//------------------------------LoadUBNode-------------------------------------
+// Load a unsigned byte (8bits unsigned) from memory
+class LoadUBNode : public LoadNode {
+public:
+ LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
+ : LoadNode(c, mem, adr, at, ti) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return Op_RegI; }
+ virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
+ virtual int store_Opcode() const { return Op_StoreB; }
+ virtual BasicType memory_type() const { return T_BYTE; }
+};
+
//------------------------------LoadUSNode-------------------------------------
// Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode {
@@ -232,6 +245,18 @@ public:
virtual BasicType memory_type() const { return T_INT; }
};
+//------------------------------LoadUI2LNode-----------------------------------
+// Load an unsigned integer into long from memory
+class LoadUI2LNode : public LoadNode {
+public:
+ LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
+ : LoadNode(c, mem, adr, at, t) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return Op_RegL; }
+ virtual int store_Opcode() const { return Op_StoreL; }
+ virtual BasicType memory_type() const { return T_LONG; }
+};
+
//------------------------------LoadRangeNode----------------------------------
// Load an array length from the array
class LoadRangeNode : public LoadINode {
@@ -757,10 +782,10 @@ public:
// Model. Monitor-enter and volatile-load act as Aquires: no following ref
// can be moved to before them. We insert a MemBar-Acquire after a FastLock or
// volatile-load. Monitor-exit and volatile-store act as Release: no
-// preceeding ref can be moved to after them. We insert a MemBar-Release
+// preceding ref can be moved to after them. We insert a MemBar-Release
// before a FastUnlock or volatile-store. All volatiles need to be
// serialized, so we follow all volatile-stores with a MemBar-Volatile to
-// seperate it from any following volatile-load.
+// separate it from any following volatile-load.
class MemBarNode: public MultiNode {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
diff --git a/src/share/vm/opto/mulnode.cpp b/src/share/vm/opto/mulnode.cpp
index 081dce647..7700272f6 100644
--- a/src/share/vm/opto/mulnode.cpp
+++ b/src/share/vm/opto/mulnode.cpp
@@ -486,20 +486,23 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
return new (phase->C, 3) AndINode(ldus, phase->intcon(mask&0xFFFF));
}
- // Masking sign bits off of a Byte? Let the matcher use an unsigned load
- if( lop == Op_LoadB &&
- (!in(0) && load->in(0)) &&
- (mask == 0x000000FF) ) {
- // Associate this node with the LoadB, so the matcher can see them together.
- // If we don't do this, it is common for the LoadB to have one control
- // edge, and the store or call containing this AndI to have a different
- // control edge. This will cause Label_Root to group the AndI with
- // the encoding store or call, so the matcher has no chance to match
- // this AndI together with the LoadB. Setting the control edge here
- // prevents Label_Root from grouping the AndI with the store or call,
- // if it has a control edge that is inconsistent with the LoadB.
- set_req(0, load->in(0));
- return this;
+ // Masking sign bits off of a Byte? Do an unsigned byte load.
+ if (lop == Op_LoadB && mask == 0x000000FF) {
+ return new (phase->C, 3) LoadUBNode(load->in(MemNode::Control),
+ load->in(MemNode::Memory),
+ load->in(MemNode::Address),
+ load->adr_type());
+ }
+
+ // Masking sign bits off of a Byte plus additional lower bits? Do
+ // an unsigned byte load plus an and.
+ if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
+ Node* ldub = new (phase->C, 3) LoadUBNode(load->in(MemNode::Control),
+ load->in(MemNode::Memory),
+ load->in(MemNode::Address),
+ load->adr_type());
+ ldub = phase->transform(ldub);
+ return new (phase->C, 3) AndINode(ldub, phase->intcon(mask));
}
// Masking off sign bits? Dont make them!
@@ -599,12 +602,21 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
const jlong mask = t2->get_con();
- Node *rsh = in(1);
- uint rop = rsh->Opcode();
+ Node* in1 = in(1);
+ uint op = in1->Opcode();
+
+ // Masking sign bits off of an integer? Do an unsigned integer to long load.
+ if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == 0x00000000FFFFFFFFL) {
+ Node* load = in1->in(1);
+ return new (phase->C, 3) LoadUI2LNode(load->in(MemNode::Control),
+ load->in(MemNode::Memory),
+ load->in(MemNode::Address),
+ load->adr_type());
+ }
// Masking off sign bits? Dont make them!
- if( rop == Op_RShiftL ) {
- const TypeInt *t12 = phase->type(rsh->in(2))->isa_int();
+ if (op == Op_RShiftL) {
+ const TypeInt *t12 = phase->type(in1->in(2))->isa_int();
if( t12 && t12->is_con() ) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
@@ -613,7 +625,7 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// bits survive. NO sign-extension bits survive the maskings.
if( (sign_bits_mask & mask) == 0 ) {
// Use zero-fill shift instead
- Node *zshift = phase->transform(new (phase->C, 3) URShiftLNode(rsh->in(1),rsh->in(2)));
+ Node *zshift = phase->transform(new (phase->C, 3) URShiftLNode(in1->in(1), in1->in(2)));
return new (phase->C, 3) AndLNode( zshift, in(2) );
}
}
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index 9130403ed..f7d71d172 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -968,22 +968,23 @@ const Type *Node::Value( PhaseTransform * ) const {
// Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
// "X+3" unchanged in case it is shared.
//
-// If you modify the 'this' pointer's inputs, you must use 'set_req' with
-// def-use info. If you are making a new Node (either as the new root or
-// some new internal piece) you must NOT use set_req with def-use info.
-// You can make a new Node with either 'new' or 'clone'. In either case,
-// def-use info is (correctly) not generated.
+// If you modify the 'this' pointer's inputs, you should use
+// 'set_req'. If you are making a new Node (either as the new root or
+// some new internal piece) you may use 'init_req' to set the initial
+// value. You can make a new Node with either 'new' or 'clone'. In
+// either case, def-use info is correctly maintained.
+//
// Example: reshape "(X+3)+4" into "X+7":
-// set_req(1,in(1)->in(1) /* grab X */, du /* must use DU on 'this' */);
-// set_req(2,phase->intcon(7),du);
+// set_req(1, in(1)->in(1));
+// set_req(2, phase->intcon(7));
// return this;
-// Example: reshape "X*4" into "X<<1"
-// return new (C,3) LShiftINode( in(1), phase->intcon(1) );
+// Example: reshape "X*4" into "X<<2"
+// return new (C,3) LShiftINode(in(1), phase->intcon(2));
//
// You must call 'phase->transform(X)' on any new Nodes X you make, except
-// for the returned root node. Example: reshape "X*31" with "(X<<5)-1".
+// for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
// Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5)));
-// return new (C,3) AddINode(shift, phase->intcon(-1));
+// return new (C,3) AddINode(shift, in(1));
//
// When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
// These forms are faster than 'phase->transform(new (C,1) ConNode())' and Do
@@ -1679,7 +1680,7 @@ void Node::verify_edges(Unique_Node_List &visited) {
if (visited.member(this)) return;
visited.push(this);
- // Walk over all input edges, checking for correspondance
+ // Walk over all input edges, checking for correspondence
for( i = 0; i < len(); i++ ) {
n = in(i);
if (n != NULL && !n->is_top()) {
@@ -1723,7 +1724,7 @@ void Node::verify_recur(const Node *n, int verify_depth,
// Contained in new_space or old_space?
VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
// Check for visited in the proper space. Numberings are not unique
- // across spaces so we need a seperate VectorSet for each space.
+ // across spaces so we need a separate VectorSet for each space.
if( v->test_set(n->_idx) ) return;
if (n->is_Con() && n->bottom_type() == Type::TOP) {
diff --git a/src/share/vm/opto/node.hpp b/src/share/vm/opto/node.hpp
index f55a40309..bad160705 100644
--- a/src/share/vm/opto/node.hpp
+++ b/src/share/vm/opto/node.hpp
@@ -257,7 +257,7 @@ protected:
Node **_in; // Array of use-def references to Nodes
Node **_out; // Array of def-use references to Nodes
- // Input edges are split into two catagories. Required edges are required
+ // Input edges are split into two categories. Required edges are required
// for semantic correctness; order is important and NULLs are allowed.
// Precedence edges are used to help determine execution order and are
// added, e.g., for scheduling purposes. They are unordered and not
@@ -854,7 +854,7 @@ public:
// If the hash function returns the special sentinel value NO_HASH,
// the node is guaranteed never to compare equal to any other node.
- // If we accidently generate a hash with value NO_HASH the node
+ // If we accidentally generate a hash with value NO_HASH the node
// won't go into the table and we'll lose a little optimization.
enum { NO_HASH = 0 };
virtual uint hash() const;
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index 7d6482ccd..c29274174 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -1171,7 +1171,7 @@ void Compile::Fill_buffer() {
cb->flush_bundle(false);
// The following logic is duplicated in the code ifdeffed for
- // ENABLE_ZAP_DEAD_LOCALS which apppears above in this file. It
+ // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
// should be factored out. Or maybe dispersed to the nodes?
// Special handling for SafePoint/Call Nodes
@@ -1275,7 +1275,7 @@ void Compile::Fill_buffer() {
}
#ifdef ASSERT
- // Check that oop-store preceeds the card-mark
+ // Check that oop-store precedes the card-mark
else if( mach->ideal_Opcode() == Op_StoreCM ) {
uint storeCM_idx = j;
Node *oop_store = mach->in(mach->_cnt); // First precedence edge
@@ -1291,7 +1291,7 @@ void Compile::Fill_buffer() {
#endif
else if( !n->is_Proj() ) {
- // Remember the begining of the previous instruction, in case
+ // Remember the beginning of the previous instruction, in case
// it's followed by a flag-kill and a null-check. Happens on
// Intel all the time, with add-to-memory kind of opcodes.
previous_offset = current_offset;
@@ -1567,7 +1567,7 @@ Scheduling::Scheduling(Arena *arena, Compile &compile)
compile.set_node_bundling_limit(_node_bundling_limit);
- // This one is persistant within the Compile class
+ // This one is persistent within the Compile class
_node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
// Allocate space for fixed-size arrays
@@ -1666,7 +1666,7 @@ void Compile::ScheduleAndBundle() {
// Compute the latency of all the instructions. This is fairly simple,
// because we already have a legal ordering. Walk over the instructions
// from first to last, and compute the latency of the instruction based
-// on the latency of the preceeding instruction(s).
+// on the latency of the preceding instruction(s).
void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
#ifndef PRODUCT
if (_cfg->C->trace_opto_output())
@@ -1931,7 +1931,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
uint siz = _available.size();
// Conditional branches can support an instruction that
- // is unconditionally executed and not dependant by the
+ // is unconditionally executed and not dependent by the
// branch, OR a conditionally executed instruction if
// the branch is taken. In practice, this means that
// the first instruction at the branch target is
@@ -1947,7 +1947,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
#endif
// At least 1 instruction is on the available list
- // that is not dependant on the branch
+ // that is not dependent on the branch
for (uint i = 0; i < siz; i++) {
Node *d = _available[i];
const Pipeline *avail_pipeline = d->pipeline();
diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
index d33acba3e..e00dfca6a 100644
--- a/src/share/vm/opto/parse.hpp
+++ b/src/share/vm/opto/parse.hpp
@@ -78,7 +78,7 @@ public:
};
// See if it is OK to inline.
- // The reciever is the inline tree for the caller.
+ // The receiver is the inline tree for the caller.
//
// The result is a temperature indication. If it is hot or cold,
// inlining is immediate or undesirable. Otherwise, the info block
diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
index b896faca4..12b75fb32 100644
--- a/src/share/vm/opto/parse1.cpp
+++ b/src/share/vm/opto/parse1.cpp
@@ -607,7 +607,7 @@ void Parse::do_all_blocks() {
if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
// In the absence of irreducible loops, the Region and Phis
// associated with a merge that doesn't involve a backedge can
- // be simplfied now since the RPO parsing order guarantees
+ // be simplified now since the RPO parsing order guarantees
// that any path which was supposed to reach here has already
// been parsed or must be dead.
Node* c = control();
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 0f40fdd96..201ffad99 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -32,7 +32,7 @@ extern int explicit_null_checks_inserted,
void Parse::array_load(BasicType elem_type) {
const Type* elem = Type::TOP;
Node* adr = array_addressing(elem_type, 0, &elem);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
@@ -43,7 +43,7 @@ void Parse::array_load(BasicType elem_type) {
//--------------------------------array_store----------------------------------
void Parse::array_store(BasicType elem_type) {
Node* adr = array_addressing(elem_type, 1);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
Node* val = pop();
_sp -= 2; // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
@@ -1541,14 +1541,14 @@ void Parse::do_one_bytecode() {
case Bytecodes::_aaload: array_load(T_OBJECT); break;
case Bytecodes::_laload: {
a = array_addressing(T_LONG, 0);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index
push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
break;
}
case Bytecodes::_daload: {
a = array_addressing(T_DOUBLE, 0);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
_sp -= 2; // Pop array and index
push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
break;
@@ -1560,7 +1560,7 @@ void Parse::do_one_bytecode() {
case Bytecodes::_fastore: array_store(T_FLOAT); break;
case Bytecodes::_aastore: {
d = array_addressing(T_OBJECT, 1);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
array_store_check();
c = pop(); // Oop to store
b = pop(); // index (already used)
@@ -1572,7 +1572,7 @@ void Parse::do_one_bytecode() {
}
case Bytecodes::_lastore: {
a = array_addressing(T_LONG, 2);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
c = pop_pair();
_sp -= 2; // Pop array and index
store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
@@ -1580,7 +1580,7 @@ void Parse::do_one_bytecode() {
}
case Bytecodes::_dastore: {
a = array_addressing(T_DOUBLE, 2);
- if (stopped()) return; // guarenteed null or range check
+ if (stopped()) return; // guaranteed null or range check
c = pop_pair();
_sp -= 2; // Pop array and index
c = dstore_rounding(c);
diff --git a/src/share/vm/opto/phase.cpp b/src/share/vm/opto/phase.cpp
index 904214a33..b0ea80a82 100644
--- a/src/share/vm/opto/phase.cpp
+++ b/src/share/vm/opto/phase.cpp
@@ -73,7 +73,7 @@ elapsedTimer Phase::_t_buildOopMaps;
//------------------------------Phase------------------------------------------
Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) {
- // Poll for requests from shutdown mechanism to quiesce comiler (4448539, 4448544).
+ // Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544).
// This is an effective place to poll, since the compiler is full of phases.
// In particular, every inlining site uses a recursively created Parse phase.
CompileBroker::maybe_block();
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index 484629a90..78c484522 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -196,7 +196,7 @@ void NodeHash::hash_insert( Node *n ) {
}
//------------------------------hash_delete------------------------------------
-// Replace in hash table with sentinal
+// Replace in hash table with sentinel
bool NodeHash::hash_delete( const Node *n ) {
Node *k;
uint hash = n->hash();
@@ -207,7 +207,7 @@ bool NodeHash::hash_delete( const Node *n ) {
uint key = hash & (_max-1);
uint stride = key | 0x01;
debug_only( uint counter = 0; );
- for( ; /* (k != NULL) && (k != _sentinal) */; ) {
+ for( ; /* (k != NULL) && (k != _sentinel) */; ) {
debug_only( counter++ );
debug_only( _delete_probes++ );
k = _table[key]; // Get hashed value
@@ -715,7 +715,7 @@ Node *PhaseGVN::transform_no_reclaim( Node *n ) {
#ifdef ASSERT
//------------------------------dead_loop_check--------------------------------
-// Check for a simple dead loop when a data node references itself direcly
+// Check for a simple dead loop when a data node references itself directly
// or through an other data node excluding cons and phis.
void PhaseGVN::dead_loop_check( Node *n ) {
// Phi may reference itself in a loop
@@ -1359,7 +1359,7 @@ void PhaseCCP::analyze() {
worklist.push(p); // Propagate change to user
}
}
- // If we changed the reciever type to a call, we need to revisit
+ // If we changed the receiver type to a call, we need to revisit
// the Catch following the call. It's looking for a non-NULL
// receiver to know when to enable the regular fall-through path
// in addition to the NullPtrException path
diff --git a/src/share/vm/opto/postaloc.cpp b/src/share/vm/opto/postaloc.cpp
index cd881065f..fa28c6aa1 100644
--- a/src/share/vm/opto/postaloc.cpp
+++ b/src/share/vm/opto/postaloc.cpp
@@ -46,7 +46,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const {
// be splitting live ranges for callee save registers to such
// an extent that in large methods the chains can be very long
// (50+). The conservative answer is to return true if we don't
- // know as this prevents optimizations from occuring.
+ // know as this prevents optimizations from occurring.
const int limit = 60;
int i;
@@ -286,7 +286,7 @@ bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n,
//
// n will be replaced with the old value but n might have
// kills projections associated with it so remove them now so that
- // yank_if_dead will be able to elminate the copy once the uses
+ // yank_if_dead will be able to eliminate the copy once the uses
// have been transferred to the old[value].
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);
diff --git a/src/share/vm/opto/reg_split.cpp b/src/share/vm/opto/reg_split.cpp
index 0efbe04b9..62636023f 100644
--- a/src/share/vm/opto/reg_split.cpp
+++ b/src/share/vm/opto/reg_split.cpp
@@ -26,8 +26,8 @@
#include "incls/_reg_split.cpp.incl"
//------------------------------Split--------------------------------------
-// Walk the graph in RPO and for each lrg which spills, propogate reaching
-// definitions. During propogation, split the live range around regions of
+// Walk the graph in RPO and for each lrg which spills, propagate reaching
+// definitions. During propagation, split the live range around regions of
// High Register Pressure (HRP). If a Def is in a region of Low Register
// Pressure (LRP), it will not get spilled until we encounter a region of
// HRP between it and one of its uses. We will spill at the transition
@@ -88,7 +88,7 @@ Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
}
//------------------------------insert_proj------------------------------------
-// Insert the spill at chosen location. Skip over any interveneing Proj's or
+// Insert the spill at chosen location. Skip over any intervening Proj's or
// Phis. Skip over a CatchNode and projs, inserting in the fall-through block
// instead. Update high-pressure indices. Create a new live range.
void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
@@ -125,7 +125,7 @@ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
}
//------------------------------split_DEF--------------------------------------
-// There are four catagories of Split; UP/DOWN x DEF/USE
+// There are four categories of Split; UP/DOWN x DEF/USE
// Only three of these really occur as DOWN/USE will always color
// Any Split with a DEF cannot CISC-Spill now. Thus we need
// two helper routines, one for Split DEFS (insert after instruction),
@@ -726,7 +726,7 @@ uint PhaseChaitin::Split( uint maxlrg ) {
// ********** Handle Crossing HRP Boundry **********
if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
for( slidx = 0; slidx < spill_cnt; slidx++ ) {
- // Check for need to split at HRP boundry - split if UP
+ // Check for need to split at HRP boundary - split if UP
n1 = Reachblock[slidx];
// bail out if no reaching DEF
if( n1 == NULL ) continue;
diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp
index 7b5effc81..cbbbfacfc 100644
--- a/src/share/vm/opto/runtime.cpp
+++ b/src/share/vm/opto/runtime.cpp
@@ -1196,7 +1196,7 @@ JRT_END
// The following does not work because for one thing, the
// thread state is wrong; it expects java, but it is native.
-// Also, the invarients in a native stub are different and
+// Also, the invariants in a native stub are different and
// I'm not sure it is safe to have a MachCalRuntimeDirectNode
// in there.
// So for now, we do not zap in native stubs.
diff --git a/src/share/vm/opto/split_if.cpp b/src/share/vm/opto/split_if.cpp
index 130b26675..a7a6baaa9 100644
--- a/src/share/vm/opto/split_if.cpp
+++ b/src/share/vm/opto/split_if.cpp
@@ -318,7 +318,7 @@ Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Nod
if( use->is_Phi() ) { // Phi uses in prior block
// Grab the first Phi use; there may be many.
- // Each will be handled as a seperate iteration of
+ // Each will be handled as a separate iteration of
// the "while( phi->outcnt() )" loop.
uint j;
for( j = 1; j < use->req(); j++ )
diff --git a/src/share/vm/opto/superword.cpp b/src/share/vm/opto/superword.cpp
index 0b125a922..d64d2e5ec 100644
--- a/src/share/vm/opto/superword.cpp
+++ b/src/share/vm/opto/superword.cpp
@@ -470,7 +470,7 @@ void SuperWord::mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &p
}
//------------------------------stmts_can_pack---------------------------
-// Can s1 and s2 be in a pack with s1 immediately preceeding s2 and
+// Can s1 and s2 be in a pack with s1 immediately preceding s2 and
// s1 aligned at "align"
bool SuperWord::stmts_can_pack(Node* s1, Node* s2, int align) {
if (isomorphic(s1, s2)) {
@@ -869,7 +869,7 @@ bool SuperWord::profitable(Node_List* p) {
for (uint i = start; i < end; i++) {
if (!is_vector_use(p0, i)) {
// For now, return false if not scalar promotion case (inputs are the same.)
- // Later, implement PackNode and allow differring, non-vector inputs
+ // Later, implement PackNode and allow differing, non-vector inputs
// (maybe just the ones from outside the block.)
Node* p0_def = p0->in(i);
for (uint j = 1; j < p->size(); j++) {
diff --git a/src/share/vm/opto/superword.hpp b/src/share/vm/opto/superword.hpp
index b60cc83c1..1c09607ed 100644
--- a/src/share/vm/opto/superword.hpp
+++ b/src/share/vm/opto/superword.hpp
@@ -308,7 +308,7 @@ class SuperWord : public ResourceObj {
void dependence_graph();
// Return a memory slice (node list) in predecessor order starting at "start"
void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds);
- // Can s1 and s2 be in a pack with s1 immediately preceeding s2 and s1 aligned at "align"
+ // Can s1 and s2 be in a pack with s1 immediately preceding s2 and s1 aligned at "align"
bool stmts_can_pack(Node* s1, Node* s2, int align);
// Does s exist in a pack at position pos?
bool exists_at(Node* s, uint pos);
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index dff39c8e5..e831a2ad6 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -226,6 +226,7 @@ void Type::Initialize_shared(Compile* current) {
TypeInt::CC_LE = TypeInt::make(-1, 0, WidenMin);
TypeInt::CC_GE = TypeInt::make( 0, 1, WidenMin); // == TypeInt::BOOL
TypeInt::BYTE = TypeInt::make(-128,127, WidenMin); // Bytes
+ TypeInt::UBYTE = TypeInt::make(0, 255, WidenMin); // Unsigned Bytes
TypeInt::CHAR = TypeInt::make(0,65535, WidenMin); // Java chars
TypeInt::SHORT = TypeInt::make(-32768,32767, WidenMin); // Java shorts
TypeInt::POS = TypeInt::make(0,max_jint, WidenMin); // Non-neg values
@@ -1022,6 +1023,7 @@ const TypeInt *TypeInt::CC_EQ; // [0] == ZERO
const TypeInt *TypeInt::CC_LE; // [-1,0]
const TypeInt *TypeInt::CC_GE; // [0,1] == BOOL (!)
const TypeInt *TypeInt::BYTE; // Bytes, -128 to 127
+const TypeInt *TypeInt::UBYTE; // Unsigned Bytes, 0 to 255
const TypeInt *TypeInt::CHAR; // Java chars, 0-65535
const TypeInt *TypeInt::SHORT; // Java shorts, -32768-32767
const TypeInt *TypeInt::POS; // Positive 32-bit integers or zero
@@ -2455,7 +2457,7 @@ intptr_t TypeOopPtr::get_con() const {
// code and dereferenced at the time the nmethod is made. Until that time,
// it is not reasonable to do arithmetic with the addresses of oops (we don't
// have access to the addresses!). This does not seem to currently happen,
- // but this assertion here is to help prevent its occurrance.
+ // but this assertion here is to help prevent its occurence.
tty->print_cr("Found oop constant with non-zero offset");
ShouldNotReachHere();
}
@@ -2761,7 +2763,7 @@ const Type *TypeInstPtr::xmeet( const Type *t ) const {
// LCA is object_klass, but if we subclass from the top we can do better
if( above_centerline(_ptr) ) { // if( _ptr == TopPTR || _ptr == AnyNull )
// If 'this' (InstPtr) is above the centerline and it is Object class
- // then we can subclass in the Java class heirarchy.
+ // then we can subclass in the Java class hierarchy.
if (klass()->equals(ciEnv::current()->Object_klass())) {
// that is, tp's array type is a subtype of my klass
return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id);
@@ -3022,7 +3024,7 @@ ciType* TypeInstPtr::java_mirror_type() const {
//------------------------------xdual------------------------------------------
// Dual: do NOT dual on klasses. This means I do NOT understand the Java
-// inheritence mechanism.
+// inheritance mechanism.
const Type *TypeInstPtr::xdual() const {
return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() );
}
@@ -3176,7 +3178,7 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
bool chg = false;
if (lo < min_lo) { lo = min_lo; chg = true; }
if (hi > max_hi) { hi = max_hi; chg = true; }
- // Negative length arrays will produce weird intermediate dead fath-path code
+ // Negative length arrays will produce weird intermediate dead fast-path code
if (lo > hi)
return TypeInt::ZERO;
if (!chg)
@@ -3358,7 +3360,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
// LCA is object_klass, but if we subclass from the top we can do better
if (above_centerline(tp->ptr())) {
// If 'tp' is above the centerline and it is Object class
- // then we can subclass in the Java class heirarchy.
+ // then we can subclass in the Java class hierarchy.
if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) {
// that is, my array type is a subtype of 'tp' klass
return make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id );
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index 0b14763b0..917c271cc 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -415,6 +415,7 @@ public:
static const TypeInt *CC_LE; // [-1,0]
static const TypeInt *CC_GE; // [0,1] == BOOL (!)
static const TypeInt *BYTE;
+ static const TypeInt *UBYTE;
static const TypeInt *CHAR;
static const TypeInt *SHORT;
static const TypeInt *POS;