lcm.cpp 42.6 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/block.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/machnode.hpp"
#include "opto/runtime.hpp"
33 34 35
#if defined AD_MD_HPP
# include AD_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
36
# include "adfiles/ad_x86_32.hpp"
37
#elif defined TARGET_ARCH_MODEL_x86_64
38
# include "adfiles/ad_x86_64.hpp"
39
#elif defined TARGET_ARCH_MODEL_sparc
40
# include "adfiles/ad_sparc.hpp"
41
#elif defined TARGET_ARCH_MODEL_zero
42
# include "adfiles/ad_zero.hpp"
43
#elif defined TARGET_ARCH_MODEL_ppc_64
44
# include "adfiles/ad_ppc_64.hpp"
45
#endif
D
duke 已提交
46

47
// Optimization - Graph Style
D
duke 已提交
48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
// Check whether val is not-null-decoded compressed oop,
// i.e. will grab into the base of the heap if it represents NULL.
static bool accesses_heap_base_zone(Node *val) {
  if (Universe::narrow_oop_base() > 0) { // Implies UseCompressedOops.
    if (val && val->is_Mach()) {
      if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
        // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
        // decode NULL to point to the heap base (Decode_NN).
        if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
          return true;
        }
      }
      // Must recognize load operation with Decode matched in memory operand.
      // We should not reach here exept for PPC/AIX, as os::zero_page_read_protected()
      // returns true everywhere else. On PPC, no such memory operands
      // exist, therefore we did not yet implement a check for such operands.
      NOT_AIX(Unimplemented());
    }
  }
  return false;
}

static bool needs_explicit_null_check_for_read(Node *val) {
  // On some OSes (AIX) the page at address 0 is only write protected.
  // If so, only Store operations will trap.
  if (os::zero_page_read_protected()) {
    return false;  // Implicit null check will work.
  }
  // Also a read accessing the base of a heap-based compressed heap will trap.
  if (accesses_heap_base_zone(val) &&                    // Hits the base zone page.
      Universe::narrow_oop_use_implicit_null_checks()) { // Base zone page is protected.
    return false;
  }

  return true;
}

D
duke 已提交
86 87 88 89 90
//------------------------------implicit_null_check----------------------------
// Detect implicit-null-check opportunities.  Basically, find NULL checks
// with suitable memory ops nearby.  Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
// The proj is the control projection for the not-null case.
91 92
// The val is the pointer being checked for nullness or
// decodeHeapOop_not_null node if it did not fold into address.
93
void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
D
duke 已提交
94 95 96 97 98 99
  // Assume if null check need for 0 offset then always needed
  // Intel solaris doesn't support any null checks yet and no
  // mechanism exists (yet) to set the switches at an os_cpu level
  if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;

  // Make sure the ptr-is-null path appears to be uncommon!
100
  float f = block->end()->as_MachIf()->_prob;
D
duke 已提交
101 102 103 104 105 106 107 108 109
  if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
  if( f > PROB_UNLIKELY_MAG(4) ) return;

  uint bidx = 0;                // Capture index of value into memop
  bool was_store;               // Memory op is a store op

  // Get the successor block for if the test ptr is non-null
  Block* not_null_block;  // this one goes with the proj
  Block* null_block;
110 111 112
  if (block->get_node(block->number_of_nodes()-1) == proj) {
    null_block     = block->_succs[0];
    not_null_block = block->_succs[1];
D
duke 已提交
113
  } else {
114 115 116
    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
    not_null_block = block->_succs[0];
    null_block     = block->_succs[1];
D
duke 已提交
117
  }
118 119 120
  while (null_block->is_Empty() == Block::empty_with_goto) {
    null_block     = null_block->_succs[0];
  }
D
duke 已提交
121 122 123 124 125 126 127

  // Search the exception block for an uncommon trap.
  // (See Parse::do_if and Parse::do_ifnull for the reason
  // we need an uncommon trap.  Briefly, we need a way to
  // detect failure of this optimization, as in 6366351.)
  {
    bool found_trap = false;
128
    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
129
      Node* nn = null_block->get_node(i1);
D
duke 已提交
130
      if (nn->is_MachCall() &&
T
twisti 已提交
131
          nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
D
duke 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
        const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
        if (trtype->isa_int() && trtype->is_int()->is_con()) {
          jint tr_con = trtype->is_int()->get_con();
          Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
          Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
          assert((int)reason < (int)BitsPerInt, "recode bit map");
          if (is_set_nth_bit(allowed_reasons, (int) reason)
              && action != Deoptimization::Action_none) {
            // This uncommon trap is sure to recompile, eventually.
            // When that happens, C->too_many_traps will prevent
            // this transformation from happening again.
            found_trap = true;
          }
        }
        break;
      }
    }
    if (!found_trap) {
      // We did not find an uncommon trap.
      return;
    }
  }

155 156 157 158 159 160 161
  // Check for decodeHeapOop_not_null node which did not fold into address
  bool is_decoden = ((intptr_t)val) & 1;
  val = (Node*)(((intptr_t)val) & ~1);

  assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
         (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");

D
duke 已提交
162 163 164 165 166 167 168 169 170
  // Search the successor block for a load or store who's base value is also
  // the tested value.  There may be several.
  Node_List *out = new Node_List(Thread::current()->resource_area());
  MachNode *best = NULL;        // Best found so far
  for (DUIterator i = val->outs(); val->has_out(i); i++) {
    Node *m = val->out(i);
    if( !m->is_Mach() ) continue;
    MachNode *mach = m->as_Mach();
    was_store = false;
171 172
    int iop = mach->ideal_Opcode();
    switch( iop ) {
D
duke 已提交
173
    case Op_LoadB:
174
    case Op_LoadUB:
175
    case Op_LoadUS:
D
duke 已提交
176 177 178 179 180
    case Op_LoadD:
    case Op_LoadF:
    case Op_LoadI:
    case Op_LoadL:
    case Op_LoadP:
181
    case Op_LoadN:
D
duke 已提交
182 183
    case Op_LoadS:
    case Op_LoadKlass:
184
    case Op_LoadNKlass:
D
duke 已提交
185 186 187
    case Op_LoadRange:
    case Op_LoadD_unaligned:
    case Op_LoadL_unaligned:
188
      assert(mach->in(2) == val, "should be address");
D
duke 已提交
189 190 191 192 193 194 195 196 197
      break;
    case Op_StoreB:
    case Op_StoreC:
    case Op_StoreCM:
    case Op_StoreD:
    case Op_StoreF:
    case Op_StoreI:
    case Op_StoreL:
    case Op_StoreP:
198
    case Op_StoreN:
199
    case Op_StoreNKlass:
D
duke 已提交
200 201 202 203 204 205 206
      was_store = true;         // Memory op is a store op
      // Stores will have their address in slot 2 (memory in slot 1).
      // If the value being nul-checked is in another slot, it means we
      // are storing the checked value, which does NOT check the value!
      if( mach->in(2) != val ) continue;
      break;                    // Found a memory op?
    case Op_StrComp:
C
cfang 已提交
207 208
    case Op_StrEquals:
    case Op_StrIndexOf:
209
    case Op_AryEq:
210
    case Op_EncodeISOArray:
D
duke 已提交
211 212 213 214 215 216
      // Not a legit memory op for implicit null check regardless of
      // embedded loads
      continue;
    default:                    // Also check for embedded loads
      if( !mach->needs_anti_dependence_check() )
        continue;               // Not an memory op; skip it
217 218 219 220 221 222
      if( must_clone[iop] ) {
        // Do not move nodes which produce flags because
        // RA will try to clone it to place near branch and
        // it will cause recompilation, see clone_node().
        continue;
      }
223
      {
224 225
        // Check that value is used in memory address in
        // instructions with embedded load (CmpP val1,(val2+off)).
226 227 228 229 230 231 232 233 234 235 236 237 238
        Node* base;
        Node* index;
        const MachOper* oper = mach->memory_inputs(base, index);
        if (oper == NULL || oper == (MachOper*)-1) {
          continue;             // Not an memory op; skip it
        }
        if (val == base ||
            val == index && val->bottom_type()->isa_narrowoop()) {
          break;                // Found it
        } else {
          continue;             // Skip it
        }
      }
D
duke 已提交
239 240
      break;
    }
241 242 243 244 245 246 247 248

    // On some OSes (AIX) the page at address 0 is only write protected.
    // If so, only Store operations will trap.
    // But a read accessing the base of a heap-based compressed heap will trap.
    if (!was_store && needs_explicit_null_check_for_read(val)) {
      continue;
    }

D
duke 已提交
249 250 251 252 253 254
    // check if the offset is not too high for implicit exception
    {
      intptr_t offset = 0;
      const TypePtr *adr_type = NULL;  // Do not need this return value here
      const Node* base = mach->get_base_and_disp(offset, adr_type);
      if (base == NULL || base == NodeSentinel) {
255 256 257 258
        // Narrow oop address doesn't have base, only index
        if( val->bottom_type()->isa_narrowoop() &&
            MacroAssembler::needs_explicit_null_check(offset) )
          continue;             // Give up if offset is beyond page size
D
duke 已提交
259 260
        // cannot reason about it; is probably not implicit null exception
      } else {
261
        const TypePtr* tptr;
262 263
        if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
                                  Universe::narrow_klass_shift() == 0)) {
264
          // 32-bits narrow oop can be the base of address expressions
265
          tptr = base->get_ptr_type();
266 267 268 269
        } else {
          // only regular oops are expected here
          tptr = base->bottom_type()->is_ptr();
        }
D
duke 已提交
270 271 272 273 274 275 276 277 278 279
        // Give up if offset is not a compile-time constant
        if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
          continue;
        offset += tptr->_offset; // correct if base is offseted
        if( MacroAssembler::needs_explicit_null_check(offset) )
          continue;             // Give up is reference is beyond 4K page size
      }
    }

    // Check ctrl input to see if the null-check dominates the memory op
280
    Block *cb = get_block_for_node(mach);
D
duke 已提交
281 282
    cb = cb->_idom;             // Always hoist at least 1 block
    if( !was_store ) {          // Stores can be hoisted only one block
283
      while( cb->_dom_depth > (block->_dom_depth + 1))
D
duke 已提交
284 285 286 287
        cb = cb->_idom;         // Hoist loads as far as we want
      // The non-null-block should dominate the memory op, too. Live
      // range spilling will insert a spill in the non-null-block if it is
      // needs to spill the memory op for an implicit null check.
288
      if (cb->_dom_depth == (block->_dom_depth + 1)) {
D
duke 已提交
289 290 291 292
        if (cb != not_null_block) continue;
        cb = cb->_idom;
      }
    }
293
    if( cb != block ) continue;
D
duke 已提交
294 295 296 297 298

    // Found a memory user; see if it can be hoisted to check-block
    uint vidx = 0;              // Capture index of value into memop
    uint j;
    for( j = mach->req()-1; j > 0; j-- ) {
299 300 301 302 303
      if( mach->in(j) == val ) {
        vidx = j;
        // Ignore DecodeN val which could be hoisted to where needed.
        if( is_decoden ) continue;
      }
D
duke 已提交
304
      // Block of memory-op input
305 306
      Block *inb = get_block_for_node(mach->in(j));
      Block *b = block;          // Start from nul check
D
duke 已提交
307 308 309 310 311 312 313 314
      while( b != inb && b->_dom_depth > inb->_dom_depth )
        b = b->_idom;           // search upwards for input
      // See if input dominates null check
      if( b != inb )
        break;
    }
    if( j > 0 )
      continue;
315
    Block *mb = get_block_for_node(mach);
D
duke 已提交
316 317 318 319 320 321
    // Hoisting stores requires more checks for the anti-dependence case.
    // Give up hoisting if we have to move the store past any load.
    if( was_store ) {
      Block *b = mb;            // Start searching here for a local load
      // mach use (faulting) trying to hoist
      // n might be blocker to hoisting
322
      while( b != block ) {
D
duke 已提交
323
        uint k;
324
        for( k = 1; k < b->number_of_nodes(); k++ ) {
325
          Node *n = b->get_node(k);
D
duke 已提交
326 327 328 329
          if( n->needs_anti_dependence_check() &&
              n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
            break;              // Found anti-dependent load
        }
330
        if( k < b->number_of_nodes() )
D
duke 已提交
331 332 333
          break;                // Found anti-dependent load
        // Make sure control does not do a merge (would have to check allpaths)
        if( b->num_preds() != 2 ) break;
334
        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
D
duke 已提交
335
      }
336
      if( b != block ) continue;
D
duke 已提交
337 338 339 340 341 342 343 344 345
    }

    // Make sure this memory op is not already being used for a NullCheck
    Node *e = mb->end();
    if( e->is_MachNullCheck() && e->in(1) == mach )
      continue;                 // Already being used as a NULL check

    // Found a candidate!  Pick one with least dom depth - the highest
    // in the dom tree should be closest to the null check.
346
    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
D
duke 已提交
347 348 349 350 351
      best = mach;
      bidx = vidx;
    }
  }
  // No candidate!
352 353 354
  if (best == NULL) {
    return;
  }
D
duke 已提交
355 356 357 358 359

  // ---- Found an implicit null check
  extern int implicit_null_checks;
  implicit_null_checks++;

360 361
  if( is_decoden ) {
    // Check if we need to hoist decodeHeapOop_not_null first.
362 363
    Block *valb = get_block_for_node(val);
    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
364 365
      // Hoist it up to the end of the test block.
      valb->find_remove(val);
366 367
      block->add_inst(val);
      map_node_to_block(val, block);
368 369 370 371
      // DecodeN on x86 may kill flags. Check for flag-killing projections
      // that also need to be hoisted.
      for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
        Node* n = val->fast_out(j);
K
kvn 已提交
372
        if( n->is_MachProj() ) {
373 374 375
          get_block_for_node(n)->find_remove(n);
          block->add_inst(n);
          map_node_to_block(n, block);
376 377 378 379
        }
      }
    }
  }
D
duke 已提交
380
  // Hoist the memory candidate up to the end of the test block.
381
  Block *old_block = get_block_for_node(best);
D
duke 已提交
382
  old_block->find_remove(best);
383 384
  block->add_inst(best);
  map_node_to_block(best, block);
D
duke 已提交
385 386

  // Move the control dependence
387
  if (best->in(0) && best->in(0) == old_block->head())
388
    best->set_req(0, block->head());
D
duke 已提交
389 390 391 392 393

  // Check for flag-killing projections that also need to be hoisted
  // Should be DU safe because no edge updates.
  for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
    Node* n = best->fast_out(j);
K
kvn 已提交
394
    if( n->is_MachProj() ) {
395 396 397
      get_block_for_node(n)->find_remove(n);
      block->add_inst(n);
      map_node_to_block(n, block);
D
duke 已提交
398 399 400 401 402 403 404 405 406 407 408 409
    }
  }

  // proj==Op_True --> ne test; proj==Op_False --> eq test.
  // One of two graph shapes got matched:
  //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
  //   (IfFalse (If (Bool EQ (CmpP ptr NULL))))
  // NULL checks are always branch-if-eq.  If we see a IfTrue projection
  // then we are replacing a 'ne' test with a 'eq' NULL check test.
  // We need to flip the projections to keep the same semantics.
  if( proj->Opcode() == Op_IfTrue ) {
    // Swap order of projections in basic block to swap branch targets
410 411 412 413
    Node *tmp1 = block->get_node(block->end_idx()+1);
    Node *tmp2 = block->get_node(block->end_idx()+2);
    block->map_node(tmp2, block->end_idx()+1);
    block->map_node(tmp1, block->end_idx()+2);
414
    Node *tmp = new (C) Node(C->top()); // Use not NULL input
D
duke 已提交
415 416 417 418 419 420 421 422 423 424 425
    tmp1->replace_by(tmp);
    tmp2->replace_by(tmp1);
    tmp->replace_by(tmp2);
    tmp->destruct();
  }

  // Remove the existing null check; use a new implicit null check instead.
  // Since schedule-local needs precise def-use info, we need to correct
  // it as well.
  Node *old_tst = proj->in(0);
  MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
426 427
  block->map_node(nul_chk, block->end_idx());
  map_node_to_block(nul_chk, block);
D
duke 已提交
428 429 430 431
  // Redirect users of old_test to nul_chk
  for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
    old_tst->last_out(i2)->set_req(0, nul_chk);
  // Clean-up any dead code
432 433
  for (uint i3 = 0; i3 < old_tst->req(); i3++) {
    Node* in = old_tst->in(i3);
D
duke 已提交
434
    old_tst->set_req(i3, NULL);
435 436 437 438 439 440
    if (in->outcnt() == 0) {
      // Remove dead input node
      in->disconnect_inputs(NULL, C);
      block->find_remove(in);
    }
  }
D
duke 已提交
441

442 443
  latency_from_uses(nul_chk);
  latency_from_uses(best);
D
duke 已提交
444 445 446 447 448 449 450 451 452 453 454 455 456
}


//------------------------------select-----------------------------------------
// Select a nice fellow from the worklist to schedule next. If there is only
// one choice, then use it. Projections take top priority for correctness
// reasons - if I see a projection, then it is next.  There are a number of
// other special cases, for instructions that consume condition codes, et al.
// These are chosen immediately. Some instructions are required to immediately
// precede the last instruction in the block, and these are taken last. Of the
// remaining cases (most), choose the instruction with the greatest latency
// (that is, the most number of pseudo-cycles required to the end of the
// routine). If there is a tie, choose the instruction with the most inputs.
457
Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
D
duke 已提交
458 459 460 461 462 463 464 465 466 467 468 469

  // If only a single entry on the stack, use it
  uint cnt = worklist.size();
  if (cnt == 1) {
    Node *n = worklist[0];
    worklist.map(0,worklist.pop());
    return n;
  }

  uint choice  = 0; // Bigger is most important
  uint latency = 0; // Bigger is scheduled first
  uint score   = 0; // Bigger is better
470
  int idx = -1;     // Index in worklist
471
  int cand_cnt = 0; // Candidate count
D
duke 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485

  for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
    // Order in worklist is used to break ties.
    // See caller for how this is used to delay scheduling
    // of induction variable increments to after the other
    // uses of the phi are scheduled.
    Node *n = worklist[i];      // Get Node on worklist

    int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
    if( n->is_Proj() ||         // Projections always win
        n->Opcode()== Op_Con || // So does constant 'Top'
        iop == Op_CreateEx ||   // Create-exception must start block
        iop == Op_CheckCastPP
        ) {
486
      worklist.map(i,worklist.pop());
D
duke 已提交
487 488 489 490
      return n;
    }

    // Final call in a block must be adjacent to 'catch'
491
    Node *e = block->end();
D
duke 已提交
492 493 494 495 496 497 498
    if( e->is_Catch() && e->in(0)->in(0) == n )
      continue;

    // Memory op for an implicit null check has to be at the end of the block
    if( e->is_MachNullCheck() && e->in(1) == n )
      continue;

499 500 501 502 503
    // Schedule IV increment last.
    if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd &&
        e->in(1)->in(1) == n && n->is_iteratively_computed())
      continue;

D
duke 已提交
504 505 506 507 508 509 510 511 512 513 514 515 516
    uint n_choice  = 2;

    // See if this instruction is consumed by a branch. If so, then (as the
    // branch is the last instruction in the basic block) force it to the
    // end of the basic block
    if ( must_clone[iop] ) {
      // See if any use is a branch
      bool found_machif = false;

      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
        Node* use = n->fast_out(j);

        // The use is a conditional branch, make them adjacent
517
        if (use->is_MachIf() && get_block_for_node(use) == block) {
D
duke 已提交
518 519 520 521 522 523
          found_machif = true;
          break;
        }

        // More than this instruction pending for successor to be ready,
        // don't choose this if other opportunities are ready
524
        if (ready_cnt.at(use->_idx) > 1)
D
duke 已提交
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
          n_choice = 1;
      }

      // loop terminated, prefer not to use this instruction
      if (found_machif)
        continue;
    }

    // See if this has a predecessor that is "must_clone", i.e. sets the
    // condition code. If so, choose this first
    for (uint j = 0; j < n->req() ; j++) {
      Node *inn = n->in(j);
      if (inn) {
        if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) {
          n_choice = 3;
          break;
        }
      }
    }

    // MachTemps should be scheduled last so they are near their uses
    if (n->is_MachTemp()) {
      n_choice = 1;
    }

550
    uint n_latency = get_latency_for_node(n);
D
duke 已提交
551 552 553
    uint n_score   = n->req();   // Many inputs get high score to break ties

    // Keep best latency found
554 555 556 557 558 559 560 561
    cand_cnt++;
    if (choice < n_choice ||
        (choice == n_choice &&
         ((StressLCM && Compile::randomized_select(cand_cnt)) ||
          (!StressLCM &&
           (latency < n_latency ||
            (latency == n_latency &&
             (score < n_score))))))) {
D
duke 已提交
562 563 564 565 566 567 568
      choice  = n_choice;
      latency = n_latency;
      score   = n_score;
      idx     = i;               // Also keep index in worklist
    }
  } // End of for all ready nodes in worklist

569 570
  assert(idx >= 0, "index should be set");
  Node *n = worklist[(uint)idx];      // Get the winner
D
duke 已提交
571

572
  worklist.map((uint)idx, worklist.pop());     // Compress worklist
D
duke 已提交
573 574 575 576 577
  return n;
}


//------------------------------set_next_call----------------------------------
578
void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
D
duke 已提交
579 580 581 582
  if( next_call.test_set(n->_idx) ) return;
  for( uint i=0; i<n->len(); i++ ) {
    Node *m = n->in(i);
    if( !m ) continue;  // must see all nodes in block that precede call
583 584
    if (get_block_for_node(m) == block) {
      set_next_call(block, m, next_call);
585
    }
D
duke 已提交
586 587 588 589 590 591 592 593 594
  }
}

//------------------------------needed_for_next_call---------------------------
// Set the flag 'next_call' for each Node that is needed for the next call to
// be scheduled.  This flag lets me bias scheduling so Nodes needed for the
// next subroutine call get priority - basically it moves things NOT needed
// for the next call till after the call.  This prevents me from trying to
// carry lots of stuff live across a call.
595
void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
D
duke 已提交
596 597 598 599
  // Find the next control-defining Node in this block
  Node* call = NULL;
  for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
    Node* m = this_call->fast_out(i);
600
    if (get_block_for_node(m) == block && // Local-block user
D
duke 已提交
601
        m != this_call &&       // Not self-start node
602
        m->is_MachCall()) {
D
duke 已提交
603 604
      call = m;
      break;
605
    }
D
duke 已提交
606 607 608
  }
  if (call == NULL)  return;    // No next call (e.g., block end is near)
  // Set next-call for all inputs to this call
609
  set_next_call(block, call, next_call);
D
duke 已提交
610 611
}

612
//------------------------------add_call_kills-------------------------------------
613 614
// helper function that adds caller save registers to MachProjNode
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
615 616 617 618 619 620 621 622 623 624 625 626 627 628
  // Fill in the kill mask for the call
  for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
    if( !regs.Member(r) ) {     // Not already defined by the call
      // Save-on-call register?
      if ((save_policy[r] == 'C') ||
          (save_policy[r] == 'A') ||
          ((save_policy[r] == 'E') && exclude_soe)) {
        proj->_rout.Insert(r);
      }
    }
  }
}


D
duke 已提交
629
//------------------------------sched_call-------------------------------------
630
uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
D
duke 已提交
631 632 633 634 635 636 637
  RegMask regs;

  // Schedule all the users of the call right now.  All the users are
  // projection Nodes, so they must be scheduled next to the call.
  // Collect all the defined registers.
  for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
    Node* n = mcall->fast_out(i);
K
kvn 已提交
638
    assert( n->is_MachProj(), "" );
639 640 641
    int n_cnt = ready_cnt.at(n->_idx)-1;
    ready_cnt.at_put(n->_idx, n_cnt);
    assert( n_cnt == 0, "" );
D
duke 已提交
642
    // Schedule next to call
643
    block->map_node(n, node_cnt++);
D
duke 已提交
644 645 646 647 648
    // Collect defined registers
    regs.OR(n->out_RegMask());
    // Check for scheduling the next control-definer
    if( n->bottom_type() == Type::CONTROL )
      // Warm up next pile of heuristic bits
649
      needed_for_next_call(block, n, next_call);
D
duke 已提交
650 651 652 653

    // Children of projections are now all ready
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j); // Get user
654
      if(get_block_for_node(m) != block) {
655 656
        continue;
      }
D
duke 已提交
657
      if( m->is_Phi() ) continue;
658 659 660
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
D
duke 已提交
661 662 663 664 665 666 667
        worklist.push(m);
    }

  }

  // Act as if the call defines the Frame Pointer.
  // Certainly the FP is alive and well after the call.
668
  regs.Insert(_matcher.c_frame_pointer());
D
duke 已提交
669 670 671 672

  // Set all registers killed and not already defined by the call.
  uint r_cnt = mcall->tf()->range()->cnt();
  int op = mcall->ideal_Opcode();
673 674 675
  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  map_node_to_block(proj, block);
  block->insert_node(proj, node_cnt++);
D
duke 已提交
676 677

  // Select the right register save policy.
678
  const char *save_policy = NULL;
D
duke 已提交
679 680 681 682 683
  switch (op) {
    case Op_CallRuntime:
    case Op_CallLeaf:
    case Op_CallLeafNoFP:
      // Calling C code so use C calling convention
684
      save_policy = _matcher._c_reg_save_policy;
D
duke 已提交
685 686 687 688 689
      break;

    case Op_CallStaticJava:
    case Op_CallDynamicJava:
      // Calling Java code so use Java calling convention
690
      save_policy = _matcher._register_save_policy;
D
duke 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
      break;

    default:
      ShouldNotReachHere();
  }

  // When using CallRuntime mark SOE registers as killed by the call
  // so values that could show up in the RegisterMap aren't live in a
  // callee saved register since the register wouldn't know where to
  // find them.  CallLeaf and CallLeafNoFP are ok because they can't
  // have debug info on them.  Strictly speaking this only needs to be
  // done for oops since idealreg2debugmask takes care of debug info
  // references but there no way to handle oops differently than other
  // pointers as far as the kill mask goes.
  bool exclude_soe = op == Op_CallRuntime;

707 708 709 710 711 712 713 714 715 716
  // If the call is a MethodHandle invoke, we need to exclude the
  // register which is used to save the SP value over MH invokes from
  // the mask.  Otherwise this register could be used for
  // deoptimization information.
  if (op == Op_CallStaticJava) {
    MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
    if (mcallstaticjava->_method_handle_invoke)
      proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
  }

717
  add_call_kills(proj, regs, save_policy, exclude_soe);
D
duke 已提交
718 719 720 721 722 723 724

  return node_cnt;
}


//------------------------------schedule_local---------------------------------
// Topological sort within a block.  Someday become a real scheduler.
725
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
D
duke 已提交
726 727 728 729 730 731
  // Already "sorted" are the block start Node (as the first entry), and
  // the block-ending Node and any trailing control projections.  We leave
  // these alone.  PhiNodes and ParmNodes are made to follow the block start
  // Node.  Everything else gets topo-sorted.

#ifndef PRODUCT
732 733 734
    if (trace_opto_pipelining()) {
      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
      for (uint i = 0;i < block->number_of_nodes(); i++) {
D
duke 已提交
735
        tty->print("# ");
736
        block->get_node(i)->fast_dump();
D
duke 已提交
737 738 739 740 741 742
      }
      tty->print_cr("#");
    }
#endif

  // RootNode is already sorted
743 744 745
  if (block->number_of_nodes() == 1) {
    return true;
  }
D
duke 已提交
746 747

  // Move PhiNodes and ParmNodes from 1 to cnt up to the start
748
  uint node_cnt = block->end_idx();
D
duke 已提交
749 750 751
  uint phi_cnt = 1;
  uint i;
  for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
752
    Node *n = block->get_node(i);
D
duke 已提交
753
    if( n->is_Phi() ||          // Found a PhiNode or ParmNode
754
        (n->is_Proj()  && n->in(0) == block->head()) ) {
D
duke 已提交
755
      // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
756 757
      block->map_node(block->get_node(phi_cnt), i);
      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
D
duke 已提交
758 759 760 761 762 763
    } else {                    // All others
      // Count block-local inputs to 'n'
      uint cnt = n->len();      // Input count
      uint local = 0;
      for( uint j=0; j<cnt; j++ ) {
        Node *m = n->in(j);
764
        if( m && get_block_for_node(m) == block && !m->is_top() )
D
duke 已提交
765 766
          local++;              // One more block-local input
      }
767
      ready_cnt.at_put(n->_idx, local); // Count em up
D
duke 已提交
768

769
#ifdef ASSERT
770
      if( UseConcMarkSweepGC || UseG1GC ) {
D
duke 已提交
771
        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
772 773 774 775
          // Check the precedence edges
          for (uint prec = n->req(); prec < n->len(); prec++) {
            Node* oop_store = n->in(prec);
            if (oop_store != NULL) {
776
              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
777 778
            }
          }
D
duke 已提交
779 780
        }
      }
781 782 783 784
#endif

      // A few node types require changing a required edge to a precedence edge
      // before allocation.
785 786 787
      if( n->is_Mach() && n->req() > TypeFunc::Parms &&
          (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
           n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
788 789 790 791 792 793
        // MemBarAcquire could be created without Precedent edge.
        // del_req() replaces the specified edge with the last input edge
        // and then removes the last edge. If the specified edge > number of
        // edges the last edge will be moved outside of the input edges array
        // and the edge will be lost. This is why this code should be
        // executed only when Precedent (== TypeFunc::Parms) edge is present.
D
duke 已提交
794 795 796 797 798 799
        Node *x = n->in(TypeFunc::Parms);
        n->del_req(TypeFunc::Parms);
        n->add_prec(x);
      }
    }
  }
800 801
  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
D
duke 已提交
802 803 804 805

  // All the prescheduled guys do not hold back internal nodes
  uint i3;
  for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
806
    Node *n = block->get_node(i3);       // Get pre-scheduled
D
duke 已提交
807 808
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j);
809
      if (get_block_for_node(m) == block) { // Local-block user
810 811 812
        int m_cnt = ready_cnt.at(m->_idx)-1;
        ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
      }
D
duke 已提交
813 814 815 816 817 818 819
    }
  }

  Node_List delay;
  // Make a worklist
  Node_List worklist;
  for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
820
    Node *m = block->get_node(i4);
821
    if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
D
duke 已提交
822 823 824 825 826
      if (m->is_iteratively_computed()) {
        // Push induction variable increments last to allow other uses
        // of the phi to be scheduled first. The select() method breaks
        // ties in scheduling by worklist order.
        delay.push(m);
827 828 829 830
      } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) {
        // Force the CreateEx to the top of the list so it's processed
        // first and ends up at the start of the block.
        worklist.insert(0, m);
D
duke 已提交
831 832 833 834 835 836 837 838 839 840 841
      } else {
        worklist.push(m);         // Then on to worklist!
      }
    }
  }
  while (delay.size()) {
    Node* d = delay.pop();
    worklist.push(d);
  }

  // Warm up the 'next_call' heuristic bits
842
  needed_for_next_call(block, block->head(), next_call);
D
duke 已提交
843 844

#ifndef PRODUCT
845 846 847
    if (trace_opto_pipelining()) {
      for (uint j=0; j< block->number_of_nodes(); j++) {
        Node     *n = block->get_node(j);
D
duke 已提交
848
        int     idx = n->_idx;
849
        tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
850
        tty->print("latency:%3d  ", get_latency_for_node(n));
D
duke 已提交
851 852 853 854 855
        tty->print("%4d: %s\n", idx, n->Name());
      }
    }
#endif

856
  uint max_idx = (uint)ready_cnt.length();
D
duke 已提交
857 858 859 860
  // Pull from worklist and schedule
  while( worklist.size() ) {    // Worklist is not ready

#ifndef PRODUCT
861
    if (trace_opto_pipelining()) {
D
duke 已提交
862 863 864 865 866 867 868 869 870 871
      tty->print("#   ready list:");
      for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
        Node *n = worklist[i];      // Get Node on worklist
        tty->print(" %d", n->_idx);
      }
      tty->cr();
    }
#endif

    // Select and pop a ready guy from worklist
872 873
    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
    block->map_node(n, phi_cnt++);    // Schedule him next
D
duke 已提交
874 875

#ifndef PRODUCT
876
    if (trace_opto_pipelining()) {
D
duke 已提交
877
      tty->print("#    select %d: %s", n->_idx, n->Name());
878
      tty->print(", latency:%d", get_latency_for_node(n));
D
duke 已提交
879 880 881 882 883 884 885 886 887 888 889 890 891 892
      n->dump();
      if (Verbose) {
        tty->print("#   ready list:");
        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
          Node *n = worklist[i];      // Get Node on worklist
          tty->print(" %d", n->_idx);
        }
        tty->cr();
      }
    }

#endif
    if( n->is_MachCall() ) {
      MachCallNode *mcall = n->as_MachCall();
893
      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
D
duke 已提交
894 895
      continue;
    }
896 897 898

    if (n->is_Mach() && n->as_Mach()->has_call()) {
      RegMask regs;
899
      regs.Insert(_matcher.c_frame_pointer());
900 901
      regs.OR(n->out_RegMask());

902 903 904
      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
      map_node_to_block(proj, block);
      block->insert_node(proj, phi_cnt++);
905

906
      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
907 908
    }

D
duke 已提交
909 910 911
    // Children are now all ready
    for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
      Node* m = n->fast_out(i5); // Get user
912
      if (get_block_for_node(m) != block) {
913 914
        continue;
      }
D
duke 已提交
915
      if( m->is_Phi() ) continue;
916
      if (m->_idx >= max_idx) { // new node, skip it
917 918 919
        assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
        continue;
      }
920 921 922
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
D
duke 已提交
923 924 925 926
        worklist.push(m);
    }
  }

927
  if( phi_cnt != block->end_idx() ) {
D
duke 已提交
928 929 930 931 932 933 934 935 936 937 938 939
    // did not schedule all.  Retry, Bailout, or Die
    if (C->subsume_loads() == true && !C->failing()) {
      // Retry with subsume_loads == false
      // If this is the first failure, the sentinel string will "stick"
      // to the Compile object, and the C2Compiler will see it and retry.
      C->record_failure(C2Compiler::retry_no_subsuming_loads());
    }
    // assert( phi_cnt == end_idx(), "did not schedule all" );
    return false;
  }

#ifndef PRODUCT
940
  if (trace_opto_pipelining()) {
D
duke 已提交
941 942
    tty->print_cr("#");
    tty->print_cr("# after schedule_local");
943
    for (uint i = 0;i < block->number_of_nodes();i++) {
D
duke 已提交
944
      tty->print("# ");
945
      block->get_node(i)->fast_dump();
D
duke 已提交
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
    }
    tty->cr();
  }
#endif


  return true;
}

//--------------------------catch_cleanup_fix_all_inputs-----------------------
static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) {
  for (uint l = 0; l < use->len(); l++) {
    if (use->in(l) == old_def) {
      if (l < use->req()) {
        use->set_req(l, new_def);
      } else {
        use->rm_prec(l);
        use->add_prec(new_def);
        l--;
      }
    }
  }
}

//------------------------------catch_cleanup_find_cloned_def------------------
971
Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
D
duke 已提交
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
  assert( use_blk != def_blk, "Inter-block cleanup only");

  // The use is some block below the Catch.  Find and return the clone of the def
  // that dominates the use. If there is no clone in a dominating block, then
  // create a phi for the def in a dominating block.

  // Find which successor block dominates this use.  The successor
  // blocks must all be single-entry (from the Catch only; I will have
  // split blocks to make this so), hence they all dominate.
  while( use_blk->_dom_depth > def_blk->_dom_depth+1 )
    use_blk = use_blk->_idom;

  // Find the successor
  Node *fixup = NULL;

  uint j;
  for( j = 0; j < def_blk->_num_succs; j++ )
    if( use_blk == def_blk->_succs[j] )
      break;

  if( j == def_blk->_num_succs ) {
    // Block at same level in dom-tree is not a successor.  It needs a
    // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
    Node_Array inputs = new Node_List(Thread::current()->resource_area());
    for(uint k = 1; k < use_blk->num_preds(); k++) {
997 998
      Block* block = get_block_for_node(use_blk->pred(k));
      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
D
duke 已提交
999 1000 1001 1002 1003
    }

    // Check to see if the use_blk already has an identical phi inserted.
    // If it exists, it will be at the first position since all uses of a
    // def are processed together.
1004
    Node *phi = use_blk->get_node(1);
D
duke 已提交
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
    if( phi->is_Phi() ) {
      fixup = phi;
      for (uint k = 1; k < use_blk->num_preds(); k++) {
        if (phi->in(k) != inputs[k]) {
          // Not a match
          fixup = NULL;
          break;
        }
      }
    }

    // If an existing PhiNode was not found, make a new one.
    if (fixup == NULL) {
      Node *new_phi = PhiNode::make(use_blk->head(), def);
1019
      use_blk->insert_node(new_phi, 1);
1020
      map_node_to_block(new_phi, use_blk);
D
duke 已提交
1021 1022 1023 1024 1025 1026 1027 1028
      for (uint k = 1; k < use_blk->num_preds(); k++) {
        new_phi->set_req(k, inputs[k]);
      }
      fixup = new_phi;
    }

  } else {
    // Found the use just below the Catch.  Make it use the clone.
1029
    fixup = use_blk->get_node(n_clone_idx);
D
duke 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
  }

  return fixup;
}

//--------------------------catch_cleanup_intra_block--------------------------
// Fix all input edges in use that reference "def".  The use is in the same
// block as the def and both have been cloned in each successor block.
static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) {

  // Both the use and def have been cloned. For each successor block,
  // get the clone of the use, and make its input the clone of the def
  // found in that block.

  uint use_idx = blk->find_node(use);
  uint offset_idx = use_idx - beg;
  for( uint k = 0; k < blk->_num_succs; k++ ) {
    // Get clone in each successor block
    Block *sb = blk->_succs[k];
1049
    Node *clone = sb->get_node(offset_idx+1);
D
duke 已提交
1050 1051 1052
    assert( clone->Opcode() == use->Opcode(), "" );

    // Make use-clone reference the def-clone
1053
    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
D
duke 已提交
1054 1055 1056 1057 1058 1059
  }
}

//------------------------------catch_cleanup_inter_block---------------------
// Fix all input edges in use that reference "def".  The use is in a different
// block than the def.
1060
void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
D
duke 已提交
1061 1062
  if( !use_blk ) return;        // Can happen if the use is a precedence edge

1063
  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
D
duke 已提交
1064 1065 1066 1067 1068 1069
  catch_cleanup_fix_all_inputs(use, def, new_def);
}

//------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
1070
void PhaseCFG::call_catch_cleanup(Block* block) {
D
duke 已提交
1071 1072

  // End of region to clone
1073 1074
  uint end = block->end_idx();
  if( !block->get_node(end)->is_Catch() ) return;
D
duke 已提交
1075 1076
  // Start of region to clone
  uint beg = end;
1077 1078
  while(!block->get_node(beg-1)->is_MachProj() ||
        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
D
duke 已提交
1079 1080 1081 1082 1083 1084 1085 1086
    beg--;
    assert(beg > 0,"Catch cleanup walking beyond block boundary");
  }
  // Range of inserted instructions is [beg, end)
  if( beg == end ) return;

  // Clone along all Catch output paths.  Clone area between the 'beg' and
  // 'end' indices.
1087 1088
  for( uint i = 0; i < block->_num_succs; i++ ) {
    Block *sb = block->_succs[i];
D
duke 已提交
1089 1090
    // Clone the entire area; ignoring the edge fixup for now.
    for( uint j = end; j > beg; j-- ) {
1091
      Node *clone = block->get_node(j-1)->clone();
1092
      sb->insert_node(clone, 1);
1093
      map_node_to_block(clone, sb);
1094 1095 1096
      if (clone->needs_anti_dependence_check()) {
        insert_anti_dependences(sb, clone);
      }
D
duke 已提交
1097 1098 1099 1100 1101 1102 1103
    }
  }


  // Fixup edges.  Check the def-use info per cloned Node
  for(uint i2 = beg; i2 < end; i2++ ) {
    uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
1104
    Node *n = block->get_node(i2);        // Node that got cloned
D
duke 已提交
1105 1106 1107 1108 1109 1110 1111 1112
    // Need DU safe iterator because of edge manipulation in calls.
    Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
    for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
      out->push(n->fast_out(j1));
    }
    uint max = out->size();
    for (uint j = 0; j < max; j++) {// For all users
      Node *use = out->pop();
1113
      Block *buse = get_block_for_node(use);
D
duke 已提交
1114 1115 1116
      if( use->is_Phi() ) {
        for( uint k = 1; k < use->req(); k++ )
          if( use->in(k) == n ) {
1117 1118
            Block* b = get_block_for_node(buse->pred(k));
            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
D
duke 已提交
1119 1120 1121
            use->set_req(k, fixup);
          }
      } else {
1122 1123
        if (block == buse) {
          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
D
duke 已提交
1124
        } else {
1125
          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
D
duke 已提交
1126 1127 1128 1129 1130 1131 1132 1133
        }
      }
    } // End for all users

  } // End of for all Nodes in cloned area

  // Remove the now-dead cloned ops
  for(uint i3 = beg; i3 < end; i3++ ) {
1134 1135
    block->get_node(beg)->disconnect_inputs(NULL, C);
    block->remove_node(beg);
D
duke 已提交
1136 1137 1138
  }

  // If the successor blocks have a CreateEx node, move it back to the top
1139 1140
  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
    Block *sb = block->_succs[i4];
D
duke 已提交
1141 1142 1143
    uint new_cnt = end - beg;
    // Remove any newly created, but dead, nodes.
    for( uint j = new_cnt; j > 0; j-- ) {
1144
      Node *n = sb->get_node(j);
D
duke 已提交
1145 1146
      if (n->outcnt() == 0 &&
          (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
1147
        n->disconnect_inputs(NULL, C);
1148
        sb->remove_node(j);
D
duke 已提交
1149 1150 1151 1152 1153
        new_cnt--;
      }
    }
    // If any newly created nodes remain, move the CreateEx node to the top
    if (new_cnt > 0) {
1154
      Node *cex = sb->get_node(1+new_cnt);
D
duke 已提交
1155
      if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
1156 1157
        sb->remove_node(1+new_cnt);
        sb->insert_node(cex, 1);
D
duke 已提交
1158 1159 1160 1161
      }
    }
  }
}