compile.cpp 142.4 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25
#include "precompiled.hpp"
26 27
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
28
#include "ci/ciReplay.hpp"
29 30 31 32
#include "classfile/systemDictionary.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileLog.hpp"
33
#include "compiler/disassembler.hpp"
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
#include "compiler/oopMap.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/escape.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/macro.hpp"
#include "opto/matcher.hpp"
51
#include "opto/mathexactnode.hpp"
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/output.hpp"
#include "opto/parse.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/stringopts.hpp"
#include "opto/type.hpp"
#include "opto/vectornode.hpp"
#include "runtime/arguments.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timer.hpp"
S
sla 已提交
68
#include "trace/tracing.hpp"
69
#include "utilities/copy.hpp"
70 71 72
#if defined AD_MD_HPP
# include AD_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
73
# include "adfiles/ad_x86_32.hpp"
74
#elif defined TARGET_ARCH_MODEL_x86_64
75
# include "adfiles/ad_x86_64.hpp"
76
#elif defined TARGET_ARCH_MODEL_sparc
77
# include "adfiles/ad_sparc.hpp"
78
#elif defined TARGET_ARCH_MODEL_zero
79
# include "adfiles/ad_zero.hpp"
80
#elif defined TARGET_ARCH_MODEL_ppc_64
81
# include "adfiles/ad_ppc_64.hpp"
82
#endif
D
duke 已提交
83

84 85 86 87 88 89 90 91 92 93 94 95

// -------------------- Compile::mach_constant_base_node -----------------------
// Constant table base node singleton.
MachConstantBaseNode* Compile::mach_constant_base_node() {
  if (_mach_constant_base_node == NULL) {
    _mach_constant_base_node = new (C) MachConstantBaseNode();
    _mach_constant_base_node->add_req(C->root());
  }
  return _mach_constant_base_node;
}


D
duke 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
/// Support for intrinsics.

// Return the index at which m must be inserted (or already exists).
// The sort order is by the address of the ciMethod, with is_virtual as minor key.
int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
#ifdef ASSERT
  for (int i = 1; i < _intrinsics->length(); i++) {
    CallGenerator* cg1 = _intrinsics->at(i-1);
    CallGenerator* cg2 = _intrinsics->at(i);
    assert(cg1->method() != cg2->method()
           ? cg1->method()     < cg2->method()
           : cg1->is_virtual() < cg2->is_virtual(),
           "compiler intrinsics list must stay sorted");
  }
#endif
  // Binary search sorted list, in decreasing intervals [lo, hi].
  int lo = 0, hi = _intrinsics->length()-1;
  while (lo <= hi) {
    int mid = (uint)(hi + lo) / 2;
    ciMethod* mid_m = _intrinsics->at(mid)->method();
    if (m < mid_m) {
      hi = mid-1;
    } else if (m > mid_m) {
      lo = mid+1;
    } else {
      // look at minor sort key
      bool mid_virt = _intrinsics->at(mid)->is_virtual();
      if (is_virtual < mid_virt) {
        hi = mid-1;
      } else if (is_virtual > mid_virt) {
        lo = mid+1;
      } else {
        return mid;  // exact match
      }
    }
  }
  return lo;  // inexact match
}

void Compile::register_intrinsic(CallGenerator* cg) {
  if (_intrinsics == NULL) {
R
roland 已提交
137
    _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
D
duke 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
  }
  // This code is stolen from ciObjectFactory::insert.
  // Really, GrowableArray should have methods for
  // insert_at, remove_at, and binary_search.
  int len = _intrinsics->length();
  int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
  if (index == len) {
    _intrinsics->append(cg);
  } else {
#ifdef ASSERT
    CallGenerator* oldcg = _intrinsics->at(index);
    assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
#endif
    _intrinsics->append(_intrinsics->at(len-1));
    int pos;
    for (pos = len-2; pos >= index; pos--) {
      _intrinsics->at_put(pos+1,_intrinsics->at(pos));
    }
    _intrinsics->at_put(index, cg);
  }
  assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
}

CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
  assert(m->is_loaded(), "don't try this on unloaded methods");
  if (_intrinsics != NULL) {
    int index = intrinsic_insertion_index(m, is_virtual);
    if (index < _intrinsics->length()
        && _intrinsics->at(index)->method() == m
        && _intrinsics->at(index)->is_virtual() == is_virtual) {
      return _intrinsics->at(index);
    }
  }
  // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
172 173
  if (m->intrinsic_id() != vmIntrinsics::_none &&
      m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
D
duke 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
    CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
    if (cg != NULL) {
      // Save it for next time:
      register_intrinsic(cg);
      return cg;
    } else {
      gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
    }
  }
  return NULL;
}

// Compile:: register_library_intrinsics and make_vm_intrinsic are defined
// in library_call.cpp.


#ifndef PRODUCT
// statistics gathering...

juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};

bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
  assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
  int oflags = _intrinsic_hist_flags[id];
  assert(flags != 0, "what happened?");
  if (is_virtual) {
    flags |= _intrinsic_virtual;
  }
  bool changed = (flags != oflags);
  if ((flags & _intrinsic_worked) != 0) {
    juint count = (_intrinsic_hist_count[id] += 1);
    if (count == 1) {
      changed = true;           // first time
    }
    // increment the overall count also:
    _intrinsic_hist_count[vmIntrinsics::_none] += 1;
  }
  if (changed) {
    if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
      // Something changed about the intrinsic's virtuality.
      if ((flags & _intrinsic_virtual) != 0) {
        // This is the first use of this intrinsic as a virtual call.
        if (oflags != 0) {
          // We already saw it as a non-virtual, so note both cases.
          flags |= _intrinsic_both;
        }
      } else if ((oflags & _intrinsic_both) == 0) {
        // This is the first use of this intrinsic as a non-virtual
        flags |= _intrinsic_both;
      }
    }
    _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
  }
  // update the overall flags also:
  _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
  return changed;
}

static char* format_flags(int flags, char* buf) {
  buf[0] = 0;
  if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
  if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
  if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
  if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
  if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
  if (buf[0] == 0)  strcat(buf, ",");
  assert(buf[0] == ',', "must be");
  return &buf[1];
}

void Compile::print_intrinsic_statistics() {
  char flagsbuf[100];
  ttyLocker ttyl;
  if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
  tty->print_cr("Compiler intrinsic usage:");
  juint total = _intrinsic_hist_count[vmIntrinsics::_none];
  if (total == 0)  total = 1;  // avoid div0 in case of no successes
  #define PRINT_STAT_LINE(name, c, f) \
    tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
  for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
    vmIntrinsics::ID id = (vmIntrinsics::ID) index;
    int   flags = _intrinsic_hist_flags[id];
    juint count = _intrinsic_hist_count[id];
    if ((flags | count) != 0) {
      PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
    }
  }
  PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
  if (xtty != NULL)  xtty->tail("statistics");
}

void Compile::print_statistics() {
  { ttyLocker ttyl;
    if (xtty != NULL)  xtty->head("statistics type='opto'");
    Parse::print_statistics();
    PhaseCCP::print_statistics();
    PhaseRegAlloc::print_statistics();
    Scheduling::print_statistics();
    PhasePeephole::print_statistics();
    PhaseIdealLoop::print_statistics();
    if (xtty != NULL)  xtty->tail("statistics");
  }
  if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
    // put this under its own <statistics> element.
    print_intrinsic_statistics();
  }
}
#endif //PRODUCT

// Support for bundling info
Bundle* Compile::node_bundling(const Node *n) {
  assert(valid_bundle_info(n), "oob");
  return &_node_bundling_base[n->_idx];
}

bool Compile::valid_bundle_info(const Node *n) {
  return (_node_bundling_limit > n->_idx);
}


295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
void Compile::gvn_replace_by(Node* n, Node* nn) {
  for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
    Node* use = n->last_out(i);
    bool is_in_table = initial_gvn()->hash_delete(use);
    uint uses_found = 0;
    for (uint j = 0; j < use->len(); j++) {
      if (use->in(j) == n) {
        if (j < use->req())
          use->set_req(j, nn);
        else
          use->set_prec(j, nn);
        uses_found++;
      }
    }
    if (is_in_table) {
      // reinsert into table
      initial_gvn()->hash_find_insert(use);
    }
    record_for_igvn(use);
    i -= uses_found;    // we deleted 1 or more copies of this edge
  }
}


319 320 321 322 323 324
static inline bool not_a_node(const Node* n) {
  if (n == NULL)                   return true;
  if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
  if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
  return false;
}
325

D
duke 已提交
326 327 328 329
// Identify all nodes that are reachable from below, useful.
// Use breadth-first pass that records state in a Unique_Node_List,
// recursive traversal is slower.
void Compile::identify_useful_nodes(Unique_Node_List &useful) {
330
  int estimated_worklist_size = live_nodes();
D
duke 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344
  useful.map( estimated_worklist_size, NULL );  // preallocate space

  // Initialize worklist
  if (root() != NULL)     { useful.push(root()); }
  // If 'top' is cached, declare it useful to preserve cached node
  if( cached_top_node() ) { useful.push(cached_top_node()); }

  // Push all useful nodes onto the list, breadthfirst
  for( uint next = 0; next < useful.size(); ++next ) {
    assert( next < unique(), "Unique useful nodes < total nodes");
    Node *n  = useful.at(next);
    uint max = n->len();
    for( uint i = 0; i < max; ++i ) {
      Node *m = n->in(i);
345
      if (not_a_node(m))  continue;
D
duke 已提交
346 347 348 349 350
      useful.push(m);
    }
  }
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
// Update dead_node_list with any missing dead nodes using useful
// list. Consider all non-useful nodes to be useless i.e., dead nodes.
void Compile::update_dead_node_list(Unique_Node_List &useful) {
  uint max_idx = unique();
  VectorSet& useful_node_set = useful.member_set();

  for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
    // If node with index node_idx is not in useful set,
    // mark it as dead in dead node list.
    if (! useful_node_set.test(node_idx) ) {
      record_dead_node(node_idx);
    }
  }
}

R
roland 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
  int shift = 0;
  for (int i = 0; i < inlines->length(); i++) {
    CallGenerator* cg = inlines->at(i);
    CallNode* call = cg->call_node();
    if (shift > 0) {
      inlines->at_put(i-shift, cg);
    }
    if (!useful.member(call)) {
      shift++;
    }
  }
  inlines->trunc_to(inlines->length()-shift);
}

D
duke 已提交
381 382 383
// Disconnect all useless nodes by disconnecting those at the boundary.
void Compile::remove_useless_nodes(Unique_Node_List &useful) {
  uint next = 0;
384
  while (next < useful.size()) {
D
duke 已提交
385
    Node *n = useful.at(next++);
386 387 388 389 390
    if (n->is_SafePoint()) {
      // We're done with a parsing phase. Replaced nodes are not valid
      // beyond that point.
      n->as_SafePoint()->delete_replaced_nodes();
    }
D
duke 已提交
391 392
    // Use raw traversal of out edges since this code removes out edges
    int max = n->outcnt();
393
    for (int j = 0; j < max; ++j) {
D
duke 已提交
394
      Node* child = n->raw_out(j);
395 396 397
      if (! useful.member(child)) {
        assert(!child->is_top() || child != top(),
               "If top is cached in Compile object it is in useful list");
D
duke 已提交
398 399 400 401 402 403 404
        // Only need to remove this out-edge to the useless node
        n->raw_del_out(j);
        --j;
        --max;
      }
    }
    if (n->outcnt() == 1 && n->has_special_unique_user()) {
405 406 407 408 409 410 411 412
      record_for_igvn(n->unique_out());
    }
  }
  // Remove useless macro and predicate opaq nodes
  for (int i = C->macro_count()-1; i >= 0; i--) {
    Node* n = C->macro_node(i);
    if (!useful.member(n)) {
      remove_macro_node(n);
D
duke 已提交
413 414
    }
  }
415 416 417 418 419 420 421
  // Remove useless expensive node
  for (int i = C->expensive_count()-1; i >= 0; i--) {
    Node* n = C->expensive_node(i);
    if (!useful.member(n)) {
      remove_expensive_node(n);
    }
  }
R
roland 已提交
422 423
  // clean up the late inline lists
  remove_useless_late_inlines(&_string_late_inlines, useful);
424
  remove_useless_late_inlines(&_boxing_late_inlines, useful);
R
roland 已提交
425
  remove_useless_late_inlines(&_late_inlines, useful);
D
duke 已提交
426 427 428 429 430 431 432 433 434 435 436 437 438
  debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
}

//------------------------------frame_size_in_words-----------------------------
// frame_slots in units of words
int Compile::frame_size_in_words() const {
  // shift is 0 in LP32 and 1 in LP64
  const int shift = (LogBytesPerWord - LogBytesPerInt);
  int words = _frame_slots >> shift;
  assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
  return words;
}

439 440 441 442 443 444 445 446
// To bang the stack of this compiled method we use the stack size
// that the interpreter would need in case of a deoptimization. This
// removes the need to bang the stack in the deoptimization blob which
// in turn simplifies stack overflow handling.
int Compile::bang_size_in_bytes() const {
  return MAX2(_interpreter_frame_size, frame_size_in_bytes());
}

D
duke 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
// ============================================================================
//------------------------------CompileWrapper---------------------------------
class CompileWrapper : public StackObj {
  Compile *const _compile;
 public:
  CompileWrapper(Compile* compile);

  ~CompileWrapper();
};

CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
  // the Compile* pointer is stored in the current ciEnv:
  ciEnv* env = compile->env();
  assert(env == ciEnv::current(), "must already be a ciEnv active");
  assert(env->compiler_data() == NULL, "compile already active?");
  env->set_compiler_data(compile);
  assert(compile == Compile::current(), "sanity");

  compile->set_type_dict(NULL);
  compile->set_type_hwm(NULL);
  compile->set_type_last_size(0);
  compile->set_last_tf(NULL, NULL);
  compile->set_indexSet_arena(NULL);
  compile->set_indexSet_free_block_list(NULL);
  compile->init_type_arena();
  Type::Initialize(compile);
  _compile->set_scratch_buffer_blob(NULL);
  _compile->begin_method();
}
CompileWrapper::~CompileWrapper() {
  _compile->end_method();
  if (_compile->scratch_buffer_blob() != NULL)
    BufferBlob::free(_compile->scratch_buffer_blob());
  _compile->env()->set_compiler_data(NULL);
}


//----------------------------print_compile_messages---------------------------
void Compile::print_compile_messages() {
#ifndef PRODUCT
  // Check if recompiling
  if (_subsume_loads == false && PrintOpto) {
    // Recompiling without allowing machine instructions to subsume loads
    tty->print_cr("*********************************************************");
    tty->print_cr("** Bailout: Recompile without subsuming loads          **");
    tty->print_cr("*********************************************************");
  }
494 495 496 497 498 499
  if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
    // Recompiling without escape analysis
    tty->print_cr("*********************************************************");
    tty->print_cr("** Bailout: Recompile without escape analysis          **");
    tty->print_cr("*********************************************************");
  }
500 501 502 503 504 505
  if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
    // Recompiling without boxing elimination
    tty->print_cr("*********************************************************");
    tty->print_cr("** Bailout: Recompile without boxing elimination       **");
    tty->print_cr("*********************************************************");
  }
D
duke 已提交
506
  if (env()->break_at_compile()) {
T
twisti 已提交
507
    // Open the debugger when compiling this method.
D
duke 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    tty->print("### Breaking when compiling: ");
    method()->print_short_name();
    tty->cr();
    BREAKPOINT;
  }

  if( PrintOpto ) {
    if (is_osr_compilation()) {
      tty->print("[OSR]%3d", _compile_id);
    } else {
      tty->print("%3d", _compile_id);
    }
  }
#endif
}


525 526
//-----------------------init_scratch_buffer_blob------------------------------
// Construct a temporary BufferBlob and cache it for this compile.
527
void Compile::init_scratch_buffer_blob(int const_size) {
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
  // If there is already a scratch buffer blob allocated and the
  // constant section is big enough, use it.  Otherwise free the
  // current and allocate a new one.
  BufferBlob* blob = scratch_buffer_blob();
  if ((blob != NULL) && (const_size <= _scratch_const_size)) {
    // Use the current blob.
  } else {
    if (blob != NULL) {
      BufferBlob::free(blob);
    }

    ResourceMark rm;
    _scratch_const_size = const_size;
    int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
    blob = BufferBlob::create("Compile::scratch_buffer", size);
    // Record the buffer blob for next time.
    set_scratch_buffer_blob(blob);
    // Have we run out of code space?
    if (scratch_buffer_blob() == NULL) {
      // Let CompilerBroker disable further compilations.
      record_failure("Not enough space for scratch buffer in CodeCache");
      return;
    }
551
  }
D
duke 已提交
552 553

  // Initialize the relocation buffers
T
twisti 已提交
554
  relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
D
duke 已提交
555 556 557 558 559 560 561
  set_scratch_locs_memory(locs_buf);
}


//-----------------------scratch_emit_size-------------------------------------
// Helper function that computes size by emitting code
uint Compile::scratch_emit_size(const Node* n) {
562 563 564
  // Start scratch_emit_size section.
  set_in_scratch_emit_size(true);

D
duke 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578
  // Emit into a trash buffer and count bytes emitted.
  // This is a pretty expensive way to compute a size,
  // but it works well enough if seldom used.
  // All common fixed-size instructions are given a size
  // method by the AD file.
  // Note that the scratch buffer blob and locs memory are
  // allocated at the beginning of the compile task, and
  // may be shared by several calls to scratch_emit_size.
  // The allocation of the scratch buffer blob is particularly
  // expensive, since it has to grab the code cache lock.
  BufferBlob* blob = this->scratch_buffer_blob();
  assert(blob != NULL, "Initialize BufferBlob at start");
  assert(blob->size() > MAX_inst_size, "sanity");
  relocInfo* locs_buf = scratch_locs_memory();
T
twisti 已提交
579
  address blob_begin = blob->content_begin();
D
duke 已提交
580
  address blob_end   = (address)locs_buf;
T
twisti 已提交
581
  assert(blob->content_contains(blob_end), "sanity");
D
duke 已提交
582
  CodeBuffer buf(blob_begin, blob_end - blob_begin);
583
  buf.initialize_consts_size(_scratch_const_size);
D
duke 已提交
584 585
  buf.initialize_stubs_size(MAX_stubs_size);
  assert(locs_buf != NULL, "sanity");
586 587 588 589 590 591
  int lsize = MAX_locs_size / 3;
  buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
  buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
  buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);

  // Do the emission.
K
kvn 已提交
592 593

  Label fakeL; // Fake label for branch instructions.
594 595 596
  Label*   saveL = NULL;
  uint save_bnum = 0;
  bool is_branch = n->is_MachBranch();
K
kvn 已提交
597 598 599
  if (is_branch) {
    MacroAssembler masm(&buf);
    masm.bind(fakeL);
600 601
    n->as_MachBranch()->save_label(&saveL, &save_bnum);
    n->as_MachBranch()->label_set(&fakeL, 0);
K
kvn 已提交
602
  }
D
duke 已提交
603
  n->emit(buf, this->regalloc());
604 605
  if (is_branch) // Restore label.
    n->as_MachBranch()->label_set(saveL, save_bnum);
606 607 608 609

  // End scratch_emit_size section.
  set_in_scratch_emit_size(false);

T
twisti 已提交
610
  return buf.insts_size();
D
duke 已提交
611 612 613 614 615 616 617 618 619 620 621
}


// ============================================================================
//------------------------------Compile standard-------------------------------
debug_only( int Compile::_debug_idx = 100000; )

// Compile a method.  entry_bci is -1 for normal compilations and indicates
// the continuation bci for on stack replacement.


622 623
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
                  bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
D
duke 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636 637
                : Phase(Compiler),
                  _env(ci_env),
                  _log(ci_env->log()),
                  _compile_id(ci_env->compile_id()),
                  _save_argument_registers(false),
                  _stub_name(NULL),
                  _stub_function(NULL),
                  _stub_entry_point(NULL),
                  _method(target),
                  _entry_bci(osr_bci),
                  _initial_gvn(NULL),
                  _for_igvn(NULL),
                  _warm_calls(NULL),
                  _subsume_loads(subsume_loads),
638
                  _do_escape_analysis(do_escape_analysis),
639
                  _eliminate_boxing(eliminate_boxing),
D
duke 已提交
640 641 642 643
                  _failure_reason(NULL),
                  _code_buffer("Compile::Fill_buffer"),
                  _orig_pc_slot(0),
                  _orig_pc_slot_offset_in_bytes(0),
644
                  _has_method_handle_invokes(false),
645
                  _mach_constant_base_node(NULL),
D
duke 已提交
646 647
                  _node_bundling_limit(0),
                  _node_bundling_base(NULL),
648 649
                  _java_calls(0),
                  _inner_loops(0),
650 651
                  _scratch_const_size(-1),
                  _in_scratch_emit_size(false),
652 653
                  _dead_node_list(comp_arena()),
                  _dead_node_count(0),
D
duke 已提交
654 655
#ifndef PRODUCT
                  _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
656
                  _in_dump_cnt(0),
D
duke 已提交
657 658
                  _printer(IdealGraphPrinter::printer()),
#endif
659
                  _congraph(NULL),
660 661 662 663
                  _comp_arena(mtCompiler),
                  _node_arena(mtCompiler),
                  _old_arena(mtCompiler),
                  _Compile_types(mtCompiler),
664
                  _replay_inline_data(NULL),
R
roland 已提交
665 666
                  _late_inlines(comp_arena(), 2, 0, NULL),
                  _string_late_inlines(comp_arena(), 2, 0, NULL),
667
                  _boxing_late_inlines(comp_arena(), 2, 0, NULL),
R
roland 已提交
668 669 670 671
                  _late_inlines_pos(0),
                  _number_of_mh_late_inlines(0),
                  _inlining_progress(false),
                  _inlining_incrementally(false),
672
                  _print_inlining_list(NULL),
673
                  _print_inlining_idx(0),
674 675
                  _interpreter_frame_size(0),
                  _max_node_limit(MaxNodeLimit) {
D
duke 已提交
676 677 678 679 680 681 682 683 684 685 686 687 688
  C = this;

  CompileWrapper cw(this);
#ifndef PRODUCT
  if (TimeCompiler2) {
    tty->print(" ");
    target->holder()->name()->print();
    tty->print(".");
    target->print_short_name();
    tty->print("  ");
  }
  TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
  TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
689 690 691 692 693 694 695 696 697
  bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
  if (!print_opto_assembly) {
    bool print_assembly = (PrintAssembly || _method->should_print_assembly());
    if (print_assembly && !Disassembler::can_decode()) {
      tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
      print_opto_assembly = true;
    }
  }
  set_print_assembly(print_opto_assembly);
698
  set_parsed_irreducible_loop(false);
699 700 701 702

  if (method()->has_option("ReplayInline")) {
    _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
  }
D
duke 已提交
703
#endif
704 705
  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
706
  set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
D
duke 已提交
707

708
  if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
D
duke 已提交
709 710
    // Make sure the method being compiled gets its own MDO,
    // so we can at least track the decompile_count().
711
    // Need MDO to record RTM code generation state.
712
    method()->ensure_method_data();
D
duke 已提交
713 714 715 716 717 718 719
  }

  Init(::AliasLevel);


  print_compile_messages();

S
shade 已提交
720
  _ilt = InlineTree::build_inline_tree_root();
D
duke 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

  // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
  assert(num_alias_types() >= AliasIdxRaw, "");

#define MINIMUM_NODE_HASH  1023
  // Node list that Iterative GVN will start with
  Unique_Node_List for_igvn(comp_arena());
  set_for_igvn(&for_igvn);

  // GVN that will be run immediately on new nodes
  uint estimated_size = method()->code_size()*4+64;
  estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
  PhaseGVN gvn(node_arena(), estimated_size);
  set_initial_gvn(&gvn);

736
  if (print_inlining() || print_intrinsics()) {
737 738
    _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
  }
D
duke 已提交
739 740 741 742 743 744 745
  { // Scope for timing the parser
    TracePhase t3("parse", &_t_parser, true);

    // Put top into the hash table ASAP.
    initial_gvn()->transform_no_reclaim(top());

    // Set up tf(), start(), and find a CallGenerator.
746
    CallGenerator* cg = NULL;
D
duke 已提交
747 748 749 750
    if (is_osr_compilation()) {
      const TypeTuple *domain = StartOSRNode::osr_domain();
      const TypeTuple *range = TypeTuple::make_range(method()->signature());
      init_tf(TypeFunc::make(domain, range));
751
      StartNode* s = new (this) StartOSRNode(root(), domain);
D
duke 已提交
752 753 754 755 756 757
      initial_gvn()->set_type_bottom(s);
      init_start(s);
      cg = CallGenerator::for_osr(method(), entry_bci());
    } else {
      // Normal case.
      init_tf(TypeFunc::make(method()));
758
      StartNode* s = new (this) StartNode(root(), tf()->domain());
D
duke 已提交
759 760
      initial_gvn()->set_type_bottom(s);
      init_start(s);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
      if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
        // With java.lang.ref.reference.get() we must go through the
        // intrinsic when G1 is enabled - even when get() is the root
        // method of the compile - so that, if necessary, the value in
        // the referent field of the reference object gets recorded by
        // the pre-barrier code.
        // Specifically, if G1 is enabled, the value in the referent
        // field is recorded by the G1 SATB pre barrier. This will
        // result in the referent being marked live and the reference
        // object removed from the list of discovered references during
        // reference processing.
        cg = find_intrinsic(method(), false);
      }
      if (cg == NULL) {
        float past_uses = method()->interpreter_invocation_count();
        float expected_uses = past_uses;
        cg = CallGenerator::for_inline(method(), expected_uses);
      }
D
duke 已提交
779 780 781 782 783 784 785
    }
    if (failing())  return;
    if (cg == NULL) {
      record_method_not_compilable_all_tiers("cannot parse method");
      return;
    }
    JVMState* jvms = build_start_state(start(), tf());
786
    if ((jvms = cg->generate(jvms)) == NULL) {
D
duke 已提交
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
      record_method_not_compilable("method parse failed");
      return;
    }
    GraphKit kit(jvms);

    if (!kit.stopped()) {
      // Accept return values, and transfer control we know not where.
      // This is done by a special, unique ReturnNode bound to root.
      return_values(kit.jvms());
    }

    if (kit.has_exceptions()) {
      // Any exceptions that escape from this call must be rethrown
      // to whatever caller is dynamically above us on the stack.
      // This is done by a special, unique RethrowNode bound to root.
      rethrow_exceptions(kit.transfer_exceptions_into_jvms());
    }

R
roland 已提交
805
    assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
806

R
roland 已提交
807 808
    if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
      inline_string_calls(true);
809
    }
R
roland 已提交
810 811

    if (failing())  return;
812

S
sla 已提交
813
    print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
814

D
duke 已提交
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
    // Remove clutter produced by parsing.
    if (!failing()) {
      ResourceMark rm;
      PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
    }
  }

  // Note:  Large methods are capped off in do_one_bytecode().
  if (failing())  return;

  // After parsing, node notes are no longer automagic.
  // They must be propagated by register_new_node_with_optimizer(),
  // clone(), or the like.
  set_default_node_notes(NULL);

  for (;;) {
    int successes = Inline_Warm();
    if (failing())  return;
    if (successes == 0)  break;
  }

  // Drain the list.
  Finish_Warm();
#ifndef PRODUCT
  if (_printer) {
    _printer->print_inlining(this);
  }
#endif

  if (failing())  return;
  NOT_PRODUCT( verify_graph_edges(); )

  // Now optimize
  Optimize();
  if (failing())  return;
  NOT_PRODUCT( verify_graph_edges(); )

#ifndef PRODUCT
  if (PrintIdeal) {
    ttyLocker ttyl;  // keep the following output all in one block
    // This output goes directly to the tty, not the compiler log.
    // To enable tools to match it up with the compilation activity,
    // be sure to tag this tty output with the compile ID.
    if (xtty != NULL) {
      xtty->head("ideal compile_id='%d'%s", compile_id(),
                 is_osr_compilation()    ? " compile_kind='osr'" :
                 "");
    }
    root()->dump(9999);
    if (xtty != NULL) {
      xtty->tail("ideal");
    }
  }
#endif

870
  NOT_PRODUCT( verify_barriers(); )
871 872 873 874 875 876 877 878 879

  // Dump compilation data to replay it.
  if (method()->has_option("DumpReplay")) {
    env()->dump_replay_data(_compile_id);
  }
  if (method()->has_option("DumpInline") && (ilt() != NULL)) {
    env()->dump_inline_data(_compile_id);
  }

D
duke 已提交
880 881 882 883 884 885 886
  // Now that we know the size of all the monitors we can add a fixed slot
  // for the original deopt pc.

  _orig_pc_slot =  fixed_slots();
  int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
  set_fixed_slots(next_slot);

887 888 889 890
  // Compute when to use implicit null checks. Used by matching trap based
  // nodes and NullCheck optimization.
  set_allowed_deopt_reasons();

D
duke 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
  // Now generate code
  Code_Gen();
  if (failing())  return;

  // Check if we want to skip execution of all compiled code.
  {
#ifndef PRODUCT
    if (OptoNoExecute) {
      record_method_not_compilable("+OptoNoExecute");  // Flag as failed
      return;
    }
    TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
#endif

    if (is_osr_compilation()) {
      _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
      _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
    } else {
      _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
      _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
    }

    env()->register_method(_method, _entry_bci,
                           &_code_offsets,
                           _orig_pc_slot_offset_in_bytes,
                           code_buffer(),
                           frame_size_in_words(), _oop_map_set,
                           &_handler_table, &_inc_table,
                           compiler,
                           env()->comp_level(),
921
                           has_unsafe_access(),
922 923
                           SharedRuntime::is_wide_vector(max_vector_size()),
                           rtm_state()
D
duke 已提交
924
                           );
V
vlivanov 已提交
925 926 927

    if (log() != NULL) // Print code cache state into compiler log
      log()->code_cache_state();
D
duke 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
  }
}

//------------------------------Compile----------------------------------------
// Compile a runtime stub
Compile::Compile( ciEnv* ci_env,
                  TypeFunc_generator generator,
                  address stub_function,
                  const char *stub_name,
                  int is_fancy_jump,
                  bool pass_tls,
                  bool save_arg_registers,
                  bool return_pc )
  : Phase(Compiler),
    _env(ci_env),
    _log(ci_env->log()),
944
    _compile_id(0),
D
duke 已提交
945 946 947 948 949 950 951 952 953 954 955 956
    _save_argument_registers(save_arg_registers),
    _method(NULL),
    _stub_name(stub_name),
    _stub_function(stub_function),
    _stub_entry_point(NULL),
    _entry_bci(InvocationEntryBci),
    _initial_gvn(NULL),
    _for_igvn(NULL),
    _warm_calls(NULL),
    _orig_pc_slot(0),
    _orig_pc_slot_offset_in_bytes(0),
    _subsume_loads(true),
957
    _do_escape_analysis(false),
958
    _eliminate_boxing(false),
D
duke 已提交
959 960
    _failure_reason(NULL),
    _code_buffer("Compile::Fill_buffer"),
961
    _has_method_handle_invokes(false),
962
    _mach_constant_base_node(NULL),
D
duke 已提交
963 964
    _node_bundling_limit(0),
    _node_bundling_base(NULL),
965 966
    _java_calls(0),
    _inner_loops(0),
D
duke 已提交
967 968
#ifndef PRODUCT
    _trace_opto_output(TraceOptoOutput),
969
    _in_dump_cnt(0),
D
duke 已提交
970 971
    _printer(NULL),
#endif
972 973 974 975
    _comp_arena(mtCompiler),
    _node_arena(mtCompiler),
    _old_arena(mtCompiler),
    _Compile_types(mtCompiler),
976 977
    _dead_node_list(comp_arena()),
    _dead_node_count(0),
978
    _congraph(NULL),
979
    _replay_inline_data(NULL),
R
roland 已提交
980 981 982
    _number_of_mh_late_inlines(0),
    _inlining_progress(false),
    _inlining_incrementally(false),
983
    _print_inlining_list(NULL),
984
    _print_inlining_idx(0),
985
    _allowed_reasons(0),
986 987
    _interpreter_frame_size(0),
    _max_node_limit(MaxNodeLimit) {
D
duke 已提交
988 989 990 991 992 993
  C = this;

#ifndef PRODUCT
  TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
  TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
  set_print_assembly(PrintFrameConverterAssembly);
994
  set_parsed_irreducible_loop(false);
D
duke 已提交
995
#endif
996 997
  set_has_irreducible_loop(false); // no loops

D
duke 已提交
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
  CompileWrapper cw(this);
  Init(/*AliasLevel=*/ 0);
  init_tf((*generator)());

  {
    // The following is a dummy for the sake of GraphKit::gen_stub
    Unique_Node_List for_igvn(comp_arena());
    set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
    PhaseGVN gvn(Thread::current()->resource_area(),255);
    set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
    gvn.transform_no_reclaim(top());

    GraphKit kit;
    kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
  }

  NOT_PRODUCT( verify_graph_edges(); )
  Code_Gen();
  if (failing())  return;


  // Entry point will be accessed using compile->stub_entry_point();
  if (code_buffer() == NULL) {
    Matcher::soft_match_failure();
  } else {
    if (PrintAssembly && (WizardMode || Verbose))
      tty->print_cr("### Stub::%s", stub_name);

    if (!failing()) {
      assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");

      // Make the NMethod
      // For now we mark the frame as never safe for profile stackwalking
      RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
                                                      code_buffer(),
                                                      CodeOffsets::frame_never_safe,
                                                      // _code_offsets.value(CodeOffsets::Frame_Complete),
                                                      frame_size_in_words(),
                                                      _oop_map_set,
                                                      save_arg_registers);
      assert(rs != NULL && rs->is_runtime_stub(), "sanity check");

      _stub_entry_point = rs->entry_point();
    }
  }
}

//------------------------------Init-------------------------------------------
// Prepare for a single compilation
void Compile::Init(int aliaslevel) {
  _unique  = 0;
  _regalloc = NULL;

  _tf      = NULL;  // filled in later
  _top     = NULL;  // cached later
  _matcher = NULL;  // filled in later
  _cfg     = NULL;  // filled in later

  set_24_bit_selection_and_mode(Use24BitFP, false);

  _node_note_array = NULL;
  _default_node_notes = NULL;

  _immutable_memory = NULL; // filled in at first inquiry

  // Globally visible Nodes
  // First set TOP to NULL to give safe behavior during creation of RootNode
  set_cached_top_node(NULL);
1066
  set_root(new (this) RootNode());
D
duke 已提交
1067
  // Now that you have a Root to point to, create the real TOP
1068
  set_cached_top_node( new (this) ConNode(Type::TOP) );
D
duke 已提交
1069 1070 1071
  set_recent_alloc(NULL, NULL);

  // Create Debug Information Recorder to record scopes, oopmaps, etc.
1072
  env()->set_oop_recorder(new OopRecorder(env()->arena()));
D
duke 已提交
1073 1074 1075 1076 1077 1078
  env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
  env()->set_dependencies(new Dependencies(env()));

  _fixed_slots = 0;
  set_has_split_ifs(false);
  set_has_loops(has_method() && method()->has_loops()); // first approximation
1079
  set_has_stringbuilder(false);
1080
  set_has_boxed_value(false);
D
duke 已提交
1081 1082 1083
  _trap_can_recompile = false;  // no traps emitted yet
  _major_progress = true; // start out assuming good things will happen
  set_has_unsafe_access(false);
1084
  set_max_vector_size(0);
D
duke 已提交
1085 1086 1087
  Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
  set_decompile_count(0);

R
rasbold 已提交
1088
  set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
I
iveresov 已提交
1089 1090 1091 1092 1093 1094 1095
  set_num_loop_opts(LoopOptsCount);
  set_do_inlining(Inline);
  set_max_inline_size(MaxInlineSize);
  set_freq_inline_size(FreqInlineSize);
  set_do_scheduling(OptoScheduling);
  set_do_count_invocations(false);
  set_do_method_data_update(false);
1096
  set_rtm_state(NoRTM); // No RTM lock eliding by default
1097
  method_has_option_value("MaxNodeLimit", _max_node_limit);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
#if INCLUDE_RTM_OPT
  if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
    int rtm_state = method()->method_data()->rtm_state();
    if (method_has_option("NoRTMLockEliding") || ((rtm_state & NoRTM) != 0)) {
      // Don't generate RTM lock eliding code.
      set_rtm_state(NoRTM);
    } else if (method_has_option("UseRTMLockEliding") || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
      // Generate RTM lock eliding code without abort ratio calculation code.
      set_rtm_state(UseRTM);
    } else if (UseRTMDeopt) {
      // Generate RTM lock eliding code and include abort ratio calculation
      // code if UseRTMDeopt is on.
      set_rtm_state(ProfileRTM);
    }
  }
#endif
D
duke 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
  if (debug_info()->recording_non_safepoints()) {
    set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
                        (comp_arena(), 8, 0, NULL));
    set_default_node_notes(Node_Notes::make(this));
  }

  // // -- Initialize types before each compile --
  // // Update cached type information
  // if( _method && _method->constants() )
  //   Type::update_loaded_types(_method, _method->constants());

  // Init alias_type map.
1126
  if (!_do_escape_analysis && aliaslevel == 3)
D
duke 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
    aliaslevel = 2;  // No unique types without escape analysis
  _AliasLevel = aliaslevel;
  const int grow_ats = 16;
  _max_alias_types = grow_ats;
  _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
  AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
  Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
  {
    for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
  }
  // Initialize the first few types.
  _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
  _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
  _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
  _num_alias_types = AliasIdxRaw+1;
  // Zero out the alias type cache.
  Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
  // A NULL adr_type hits in the cache right away.  Preload the right answer.
  probe_alias_cache(NULL)->_index = AliasIdxTop;

  _intrinsics = NULL;
1148 1149
  _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
  _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1150
  _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
D
duke 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
  register_library_intrinsics();
}

//---------------------------init_start----------------------------------------
// Install the StartNode on this compile object.
void Compile::init_start(StartNode* s) {
  if (failing())
    return; // already failing
  assert(s == start(), "");
}

StartNode* Compile::start() const {
  assert(!failing(), "");
  for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
    Node* start = root()->fast_out(i);
    if( start->is_Start() )
      return start->as_Start();
  }
1169
  fatal("Did not find Start node!");
D
duke 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
  return NULL;
}

//-------------------------------immutable_memory-------------------------------------
// Access immutable memory
Node* Compile::immutable_memory() {
  if (_immutable_memory != NULL) {
    return _immutable_memory;
  }
  StartNode* s = start();
  for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
    Node *p = s->fast_out(i);
    if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
      _immutable_memory = p;
      return _immutable_memory;
    }
  }
  ShouldNotReachHere();
  return NULL;
}

//----------------------set_cached_top_node------------------------------------
// Install the cached top node, and make sure Node::is_top works correctly.
void Compile::set_cached_top_node(Node* tn) {
  if (tn != NULL)  verify_top(tn);
  Node* old_top = _top;
  _top = tn;
  // Calling Node::setup_is_top allows the nodes the chance to adjust
  // their _out arrays.
  if (_top != NULL)     _top->setup_is_top();
  if (old_top != NULL)  old_top->setup_is_top();
  assert(_top == NULL || top()->is_top(), "");
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
#ifdef ASSERT
uint Compile::count_live_nodes_by_graph_walk() {
  Unique_Node_List useful(comp_arena());
  // Get useful node list by walking the graph.
  identify_useful_nodes(useful);
  return useful.size();
}

void Compile::print_missing_nodes() {

  // Return if CompileLog is NULL and PrintIdealNodeCount is false.
  if ((_log == NULL) && (! PrintIdealNodeCount)) {
    return;
  }

  // This is an expensive function. It is executed only when the user
  // specifies VerifyIdealNodeCount option or otherwise knows the
  // additional work that needs to be done to identify reachable nodes
  // by walking the flow graph and find the missing ones using
  // _dead_node_list.

  Unique_Node_List useful(comp_arena());
  // Get useful node list by walking the graph.
  identify_useful_nodes(useful);

  uint l_nodes = C->live_nodes();
  uint l_nodes_by_walk = useful.size();

  if (l_nodes != l_nodes_by_walk) {
    if (_log != NULL) {
      _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
      _log->stamp();
      _log->end_head();
    }
    VectorSet& useful_member_set = useful.member_set();
    int last_idx = l_nodes_by_walk;
    for (int i = 0; i < last_idx; i++) {
      if (useful_member_set.test(i)) {
        if (_dead_node_list.test(i)) {
          if (_log != NULL) {
            _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
          }
          if (PrintIdealNodeCount) {
            // Print the log message to tty
              tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
              useful.at(i)->dump();
          }
        }
      }
      else if (! _dead_node_list.test(i)) {
        if (_log != NULL) {
          _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
        }
        if (PrintIdealNodeCount) {
          // Print the log message to tty
          tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
        }
      }
    }
    if (_log != NULL) {
      _log->tail("mismatched_nodes");
    }
  }
}
#endif

D
duke 已提交
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
#ifndef PRODUCT
void Compile::verify_top(Node* tn) const {
  if (tn != NULL) {
    assert(tn->is_Con(), "top node must be a constant");
    assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
    assert(tn->in(0) != NULL, "must have live top node");
  }
}
#endif


///-------------------Managing Per-Node Debug & Profile Info-------------------

void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
  guarantee(arr != NULL, "");
  int num_blocks = arr->length();
  if (grow_by < num_blocks)  grow_by = num_blocks;
  int num_notes = grow_by * _node_notes_block_size;
  Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
  Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
  while (num_notes > 0) {
    arr->append(notes);
    notes     += _node_notes_block_size;
    num_notes -= _node_notes_block_size;
  }
  assert(num_notes == 0, "exact multiple, please");
}

bool Compile::copy_node_notes_to(Node* dest, Node* source) {
  if (source == NULL || dest == NULL)  return false;

  if (dest->is_Con())
    return false;               // Do not push debug info onto constants.

#ifdef ASSERT
  // Leave a bread crumb trail pointing to the original node:
  if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
    dest->set_debug_orig(source);
  }
#endif

  if (node_note_array() == NULL)
    return false;               // Not collecting any notes now.

  // This is a copy onto a pre-existing node, which may already have notes.
  // If both nodes have notes, do not overwrite any pre-existing notes.
  Node_Notes* source_notes = node_notes_at(source->_idx);
  if (source_notes == NULL || source_notes->is_clear())  return false;
  Node_Notes* dest_notes   = node_notes_at(dest->_idx);
  if (dest_notes == NULL || dest_notes->is_clear()) {
    return set_node_notes_at(dest->_idx, source_notes);
  }

  Node_Notes merged_notes = (*source_notes);
  // The order of operations here ensures that dest notes will win...
  merged_notes.update_from(dest_notes);
  return set_node_notes_at(dest->_idx, &merged_notes);
}


//--------------------------allow_range_check_smearing-------------------------
// Gating condition for coalescing similar range checks.
// Sometimes we try 'speculatively' replacing a series of a range checks by a
// single covering check that is at least as strong as any of them.
// If the optimization succeeds, the simplified (strengthened) range check
// will always succeed.  If it fails, we will deopt, and then give up
// on the optimization.
bool Compile::allow_range_check_smearing() const {
  // If this method has already thrown a range-check,
  // assume it was because we already tried range smearing
  // and it failed.
  uint already_trapped = trap_count(Deoptimization::Reason_range_check);
  return !already_trapped;
}


//------------------------------flatten_alias_type-----------------------------
const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
  int offset = tj->offset();
  TypePtr::PTR ptr = tj->ptr();

1351 1352 1353 1354
  // Known instance (scalarizable allocation) alias only with itself.
  bool is_known_inst = tj->isa_oopptr() != NULL &&
                       tj->is_oopptr()->is_known_instance();

D
duke 已提交
1355 1356 1357
  // Process weird unsafe references.
  if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
    assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
1358
    assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
D
duke 已提交
1359 1360 1361 1362 1363 1364 1365
    tj = TypeOopPtr::BOTTOM;
    ptr = tj->ptr();
    offset = tj->offset();
  }

  // Array pointers need some flattening
  const TypeAryPtr *ta = tj->isa_aryptr();
1366 1367 1368 1369
  if (ta && ta->is_stable()) {
    // Erase stability property for alias analysis.
    tj = ta = ta->cast_to_stable(false);
  }
1370 1371 1372 1373 1374 1375 1376
  if( ta && is_known_inst ) {
    if ( offset != Type::OffsetBot &&
         offset > arrayOopDesc::length_offset_in_bytes() ) {
      offset = Type::OffsetBot; // Flatten constant access into array body only
      tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
    }
  } else if( ta && _AliasLevel >= 2 ) {
D
duke 已提交
1377 1378 1379 1380
    // For arrays indexed by constant indices, we flatten the alias
    // space to include all of the array body.  Only the header, klass
    // and array length can be accessed un-aliased.
    if( offset != Type::OffsetBot ) {
1381
      if( ta->const_oop() ) { // MethodData* or Method*
D
duke 已提交
1382
        offset = Type::OffsetBot;   // Flatten constant access into array body
1383
        tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
D
duke 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
      } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
        // range is OK as-is.
        tj = ta = TypeAryPtr::RANGE;
      } else if( offset == oopDesc::klass_offset_in_bytes() ) {
        tj = TypeInstPtr::KLASS; // all klass loads look alike
        ta = TypeAryPtr::RANGE; // generic ignored junk
        ptr = TypePtr::BotPTR;
      } else if( offset == oopDesc::mark_offset_in_bytes() ) {
        tj = TypeInstPtr::MARK;
        ta = TypeAryPtr::RANGE; // generic ignored junk
        ptr = TypePtr::BotPTR;
      } else {                  // Random constant offset into array body
        offset = Type::OffsetBot;   // Flatten constant access into array body
1397
        tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
D
duke 已提交
1398 1399 1400 1401 1402
      }
    }
    // Arrays of fixed size alias with arrays of unknown size.
    if (ta->size() != TypeInt::POS) {
      const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1403
      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
D
duke 已提交
1404 1405
    }
    // Arrays of known objects become arrays of unknown objects.
1406 1407
    if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
      const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1408
      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1409
    }
D
duke 已提交
1410 1411
    if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
      const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1412
      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
D
duke 已提交
1413 1414 1415 1416 1417 1418
    }
    // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
    // cannot be distinguished by bytecode alone.
    if (ta->elem() == TypeInt::BOOL) {
      const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
      ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1419
      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
D
duke 已提交
1420 1421 1422 1423
    }
    // During the 2nd round of IterGVN, NotNull castings are removed.
    // Make sure the Bottom and NotNull variants alias the same.
    // Also, make sure exact and non-exact variants alias the same.
1424
    if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1425
      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
D
duke 已提交
1426 1427 1428 1429 1430 1431
    }
  }

  // Oop pointers need some flattening
  const TypeInstPtr *to = tj->isa_instptr();
  if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1432
    ciInstanceKlass *k = to->klass()->as_instance_klass();
D
duke 已提交
1433
    if( ptr == TypePtr::Constant ) {
1434 1435 1436 1437 1438 1439 1440
      if (to->klass() != ciEnv::current()->Class_klass() ||
          offset < k->size_helper() * wordSize) {
        // No constant oop pointers (such as Strings); they alias with
        // unknown strings.
        assert(!is_known_inst, "not scalarizable allocation");
        tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
      }
1441
    } else if( is_known_inst ) {
1442
      tj = to; // Keep NotNull and klass_is_exact for instance type
D
duke 已提交
1443 1444 1445 1446
    } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
      // During the 2nd round of IterGVN, NotNull castings are removed.
      // Make sure the Bottom and NotNull variants alias the same.
      // Also, make sure exact and non-exact variants alias the same.
1447
      tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
D
duke 已提交
1448
    }
1449 1450 1451
    if (to->speculative() != NULL) {
      tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
    }
D
duke 已提交
1452
    // Canonicalize the holder of this field
1453
    if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
D
duke 已提交
1454 1455
      // First handle header references such as a LoadKlassNode, even if the
      // object's klass is unloaded at compile time (4965979).
1456 1457 1458
      if (!is_known_inst) { // Do it only for non-instance types
        tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
      }
D
duke 已提交
1459
    } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1460 1461 1462 1463 1464 1465 1466
      // Static fields are in the space above the normal instance
      // fields in the java.lang.Class instance.
      if (to->klass() != ciEnv::current()->Class_klass()) {
        to = NULL;
        tj = TypeOopPtr::BOTTOM;
        offset = tj->offset();
      }
D
duke 已提交
1467 1468 1469
    } else {
      ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
      if (!k->equals(canonical_holder) || tj->offset() != offset) {
1470 1471 1472 1473 1474
        if( is_known_inst ) {
          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
        } else {
          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
        }
D
duke 已提交
1475 1476 1477 1478 1479 1480 1481 1482 1483
      }
    }
  }

  // Klass pointers to object array klasses need some flattening
  const TypeKlassPtr *tk = tj->isa_klassptr();
  if( tk ) {
    // If we are referencing a field within a Klass, we need
    // to assume the worst case of an Object.  Both exact and
1484 1485
    // inexact types must flatten to the same alias class so
    // use NotNull as the PTR.
D
duke 已提交
1486 1487
    if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {

1488
      tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
D
duke 已提交
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
                                   TypeKlassPtr::OBJECT->klass(),
                                   offset);
    }

    ciKlass* klass = tk->klass();
    if( klass->is_obj_array_klass() ) {
      ciKlass* k = TypeAryPtr::OOPS->klass();
      if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
        k = TypeInstPtr::BOTTOM->klass();
      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
    }

    // Check for precise loads from the primary supertype array and force them
    // to the supertype cache alias index.  Check for generic array loads from
    // the primary supertype array and also force them to the supertype cache
    // alias index.  Since the same load can reach both, we need to merge
    // these 2 disparate memories into the same alias class.  Since the
    // primary supertype array is read-only, there's no chance of confusion
    // where we bypass an array load and an array store.
1508
    int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1509 1510 1511
    if (offset == Type::OffsetBot ||
        (offset >= primary_supers_offset &&
         offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1512 1513
        offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
      offset = in_bytes(Klass::secondary_super_cache_offset());
D
duke 已提交
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
    }
  }

  // Flatten all Raw pointers together.
  if (tj->base() == Type::RawPtr)
    tj = TypeRawPtr::BOTTOM;

  if (tj->base() == Type::AnyPtr)
    tj = TypePtr::BOTTOM;      // An error, which the caller must check for.

  // Flatten all to bottom for now
  switch( _AliasLevel ) {
  case 0:
    tj = TypePtr::BOTTOM;
    break;
  case 1:                       // Flatten to: oop, static, field or array
    switch (tj->base()) {
    //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
    case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
    case Type::AryPtr:   // do not distinguish arrays at all
    case Type::InstPtr:  tj = TypeInstPtr::BOTTOM;  break;
    case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
    case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
    default: ShouldNotReachHere();
    }
    break;
T
twisti 已提交
1541 1542
  case 2:                       // No collapsing at level 2; keep all splits
  case 3:                       // No collapsing at level 3; keep all splits
D
duke 已提交
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
    break;
  default:
    Unimplemented();
  }

  offset = tj->offset();
  assert( offset != Type::OffsetTop, "Offset has fallen from constant" );

  assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
          (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
          (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
          (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
          (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
          (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
          (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
          "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
  assert( tj->ptr() != TypePtr::TopPTR &&
          tj->ptr() != TypePtr::AnyNull &&
          tj->ptr() != TypePtr::Null, "No imprecise addresses" );
//    assert( tj->ptr() != TypePtr::Constant ||
//            tj->base() == Type::RawPtr ||
//            tj->base() == Type::KlassPtr, "No constant oop addresses" );

  return tj;
}

void Compile::AliasType::Init(int i, const TypePtr* at) {
  _index = i;
  _adr_type = at;
  _field = NULL;
1573
  _element = NULL;
D
duke 已提交
1574 1575
  _is_rewritable = true; // default
  const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1576 1577
  if (atoop != NULL && atoop->is_known_instance()) {
    const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
D
duke 已提交
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
    _general_index = Compile::current()->get_alias_index(gt);
  } else {
    _general_index = 0;
  }
}

//---------------------------------print_on------------------------------------
#ifndef PRODUCT
void Compile::AliasType::print_on(outputStream* st) {
  if (index() < 10)
        st->print("@ <%d> ", index());
  else  st->print("@ <%d>",  index());
  st->print(is_rewritable() ? "   " : " RO");
  int offset = adr_type()->offset();
  if (offset == Type::OffsetBot)
        st->print(" +any");
  else  st->print(" +%-3d", offset);
  st->print(" in ");
  adr_type()->dump_on(st);
  const TypeOopPtr* tjp = adr_type()->isa_oopptr();
  if (field() != NULL && tjp) {
    if (tjp->klass()  != field()->holder() ||
        tjp->offset() != field()->offset_in_bytes()) {
      st->print(" != ");
      field()->print();
      st->print(" ***");
    }
  }
}

void print_alias_types() {
  Compile* C = Compile::current();
  tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
  for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
    C->alias_type(idx)->print_on(tty);
    tty->cr();
  }
}
#endif


//----------------------------probe_alias_cache--------------------------------
Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
  intptr_t key = (intptr_t) adr_type;
  key ^= key >> logAliasCacheSize;
  return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
}


//-----------------------------grow_alias_types--------------------------------
void Compile::grow_alias_types() {
  const int old_ats  = _max_alias_types; // how many before?
  const int new_ats  = old_ats;          // how many more?
  const int grow_ats = old_ats+new_ats;  // how many now?
  _max_alias_types = grow_ats;
  _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
  AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
  Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
  for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
}


//--------------------------------find_alias_type------------------------------
1641
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
D
duke 已提交
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
  if (_AliasLevel == 0)
    return alias_type(AliasIdxBot);

  AliasCacheEntry* ace = probe_alias_cache(adr_type);
  if (ace->_adr_type == adr_type) {
    return alias_type(ace->_index);
  }

  // Handle special cases.
  if (adr_type == NULL)             return alias_type(AliasIdxTop);
  if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);

  // Do it the slow way.
  const TypePtr* flat = flatten_alias_type(adr_type);

#ifdef ASSERT
  assert(flat == flatten_alias_type(flat), "idempotent");
  assert(flat != TypePtr::BOTTOM,     "cannot alias-analyze an untyped ptr");
  if (flat->isa_oopptr() && !flat->isa_klassptr()) {
    const TypeOopPtr* foop = flat->is_oopptr();
1662 1663 1664
    // Scalarizable allocations have exact klass always.
    bool exact = !foop->klass_is_exact() || foop->is_known_instance();
    const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
D
duke 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
    assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
  }
  assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
#endif

  int idx = AliasIdxTop;
  for (int i = 0; i < num_alias_types(); i++) {
    if (alias_type(i)->adr_type() == flat) {
      idx = i;
      break;
    }
  }

  if (idx == AliasIdxTop) {
    if (no_create)  return NULL;
    // Grow the array if necessary.
    if (_num_alias_types == _max_alias_types)  grow_alias_types();
    // Add a new alias type.
    idx = _num_alias_types++;
    _alias_types[idx]->Init(idx, flat);
    if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
    if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
    if (flat->isa_instptr()) {
      if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
          && flat->is_instptr()->klass() == env()->Class_klass())
        alias_type(idx)->set_rewritable(false);
    }
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
    if (flat->isa_aryptr()) {
#ifdef ASSERT
      const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
      // (T_BYTE has the weakest alignment and size restrictions...)
      assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
#endif
      if (flat->offset() == TypePtr::OffsetBot) {
        alias_type(idx)->set_element(flat->is_aryptr()->elem());
      }
    }
D
duke 已提交
1702
    if (flat->isa_klassptr()) {
1703
      if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
D
duke 已提交
1704
        alias_type(idx)->set_rewritable(false);
1705
      if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
D
duke 已提交
1706
        alias_type(idx)->set_rewritable(false);
1707
      if (flat->offset() == in_bytes(Klass::access_flags_offset()))
D
duke 已提交
1708
        alias_type(idx)->set_rewritable(false);
1709
      if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
D
duke 已提交
1710 1711 1712 1713 1714 1715
        alias_type(idx)->set_rewritable(false);
    }
    // %%% (We would like to finalize JavaThread::threadObj_offset(),
    // but the base pointer type is not distinctive enough to identify
    // references into JavaThread.)

1716
    // Check for final fields.
D
duke 已提交
1717
    const TypeInstPtr* tinst = flat->isa_instptr();
1718
    if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
      ciField* field;
      if (tinst->const_oop() != NULL &&
          tinst->klass() == ciEnv::current()->Class_klass() &&
          tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
        // static field
        ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
        field = k->get_field_by_offset(tinst->offset(), true);
      } else {
        ciInstanceKlass *k = tinst->klass()->as_instance_klass();
        field = k->get_field_by_offset(tinst->offset(), false);
      }
      assert(field == NULL ||
             original_field == NULL ||
             (field->holder() == original_field->holder() &&
              field->offset() == original_field->offset() &&
              field->is_static() == original_field->is_static()), "wrong field?");
D
duke 已提交
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
      // Set field() and is_rewritable() attributes.
      if (field != NULL)  alias_type(idx)->set_field(field);
    }
  }

  // Fill the cache for next time.
  ace->_adr_type = adr_type;
  ace->_index    = idx;
  assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");

  // Might as well try to fill the cache for the flattened version, too.
  AliasCacheEntry* face = probe_alias_cache(flat);
  if (face->_adr_type == NULL) {
    face->_adr_type = flat;
    face->_index    = idx;
    assert(alias_type(flat) == alias_type(idx), "flat type must work too");
  }

  return alias_type(idx);
}


Compile::AliasType* Compile::alias_type(ciField* field) {
  const TypeOopPtr* t;
  if (field->is_static())
1760
    t = TypeInstPtr::make(field->holder()->java_mirror());
D
duke 已提交
1761 1762
  else
    t = TypeOopPtr::make_from_klass_raw(field->holder());
1763
  AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1764
  assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
D
duke 已提交
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
  return atp;
}


//------------------------------have_alias_type--------------------------------
bool Compile::have_alias_type(const TypePtr* adr_type) {
  AliasCacheEntry* ace = probe_alias_cache(adr_type);
  if (ace->_adr_type == adr_type) {
    return true;
  }

  // Handle special cases.
  if (adr_type == NULL)             return true;
  if (adr_type == TypePtr::BOTTOM)  return true;

1780
  return find_alias_type(adr_type, true, NULL) != NULL;
D
duke 已提交
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
}

//-----------------------------must_alias--------------------------------------
// True if all values of the given address type are in the given alias category.
bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
  if (alias_idx == AliasIdxBot)         return true;  // the universal category
  if (adr_type == NULL)                 return true;  // NULL serves as TypePtr::TOP
  if (alias_idx == AliasIdxTop)         return false; // the empty category
  if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins

  // the only remaining possible overlap is identity
  int adr_idx = get_alias_index(adr_type);
  assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  assert(adr_idx == alias_idx ||
         (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
          && adr_type                       != TypeOopPtr::BOTTOM),
         "should not be testing for overlap with an unsafe pointer");
  return adr_idx == alias_idx;
}

//------------------------------can_alias--------------------------------------
// True if any values of the given address type are in the given alias category.
bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
  if (alias_idx == AliasIdxTop)         return false; // the empty category
  if (adr_type == NULL)                 return false; // NULL serves as TypePtr::TOP
  if (alias_idx == AliasIdxBot)         return true;  // the universal category
  if (adr_type->base() == Type::AnyPtr) return true;  // TypePtr::BOTTOM or its twins

  // the only remaining possible overlap is identity
  int adr_idx = get_alias_index(adr_type);
  assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  return adr_idx == alias_idx;
}



//---------------------------pop_warm_call-------------------------------------
WarmCallInfo* Compile::pop_warm_call() {
  WarmCallInfo* wci = _warm_calls;
  if (wci != NULL)  _warm_calls = wci->remove_from(wci);
  return wci;
}

//----------------------------Inline_Warm--------------------------------------
int Compile::Inline_Warm() {
  // If there is room, try to inline some more warm call sites.
  // %%% Do a graph index compaction pass when we think we're out of space?
  if (!InlineWarmCalls)  return 0;

  int calls_made_hot = 0;
  int room_to_grow   = NodeCountInliningCutoff - unique();
  int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
  int amount_grown   = 0;
  WarmCallInfo* call;
  while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
    int est_size = (int)call->size();
    if (est_size > (room_to_grow - amount_grown)) {
      // This one won't fit anyway.  Get rid of it.
      call->make_cold();
      continue;
    }
    call->make_hot();
    calls_made_hot++;
    amount_grown   += est_size;
    amount_to_grow -= est_size;
  }

  if (calls_made_hot > 0)  set_major_progress();
  return calls_made_hot;
}


//----------------------------Finish_Warm--------------------------------------
void Compile::Finish_Warm() {
  if (!InlineWarmCalls)  return;
  if (failing())  return;
  if (warm_calls() == NULL)  return;

  // Clean up loose ends, if we are out of space for inlining.
  WarmCallInfo* call;
  while ((call = pop_warm_call()) != NULL) {
    call->make_cold();
  }
}

1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
//---------------------cleanup_loop_predicates-----------------------
// Remove the opaque nodes that protect the predicates so that all unused
// checks and uncommon_traps will be eliminated from the ideal graph
void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
  if (predicate_count()==0) return;
  for (int i = predicate_count(); i > 0; i--) {
    Node * n = predicate_opaque1_node(i-1);
    assert(n->Opcode() == Op_Opaque1, "must be");
    igvn.replace_node(n, n->in(1));
  }
  assert(predicate_count()==0, "should be clean!");
}
D
duke 已提交
1878

R
roland 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
// StringOpts and late inlining of string methods
void Compile::inline_string_calls(bool parse_time) {
  {
    // remove useless nodes to make the usage analysis simpler
    ResourceMark rm;
    PhaseRemoveUseless pru(initial_gvn(), for_igvn());
  }

  {
    ResourceMark rm;
S
sla 已提交
1889
    print_method(PHASE_BEFORE_STRINGOPTS, 3);
R
roland 已提交
1890
    PhaseStringOpts pso(initial_gvn(), for_igvn());
S
sla 已提交
1891
    print_method(PHASE_AFTER_STRINGOPTS, 3);
R
roland 已提交
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
  }

  // now inline anything that we skipped the first time around
  if (!parse_time) {
    _late_inlines_pos = _late_inlines.length();
  }

  while (_string_late_inlines.length() > 0) {
    CallGenerator* cg = _string_late_inlines.pop();
    cg->do_late_inline();
    if (failing())  return;
  }
  _string_late_inlines.trunc_to(0);
}

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
// Late inlining of boxing methods
void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
  if (_boxing_late_inlines.length() > 0) {
    assert(has_boxed_value(), "inconsistent");

    PhaseGVN* gvn = initial_gvn();
    set_inlining_incrementally(true);

    assert( igvn._worklist.size() == 0, "should be done with igvn" );
    for_igvn()->clear();
    gvn->replace_with(&igvn);

1919 1920
    _late_inlines_pos = _late_inlines.length();

1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
    while (_boxing_late_inlines.length() > 0) {
      CallGenerator* cg = _boxing_late_inlines.pop();
      cg->do_late_inline();
      if (failing())  return;
    }
    _boxing_late_inlines.trunc_to(0);

    {
      ResourceMark rm;
      PhaseRemoveUseless pru(gvn, for_igvn());
    }

    igvn = PhaseIterGVN(gvn);
    igvn.optimize();

    set_inlining_progress(false);
    set_inlining_incrementally(false);
  }
}

R
roland 已提交
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
  assert(IncrementalInline, "incremental inlining should be on");
  PhaseGVN* gvn = initial_gvn();

  set_inlining_progress(false);
  for_igvn()->clear();
  gvn->replace_with(&igvn);

  int i = 0;

  for (; i <_late_inlines.length() && !inlining_progress(); i++) {
    CallGenerator* cg = _late_inlines.at(i);
    _late_inlines_pos = i+1;
    cg->do_late_inline();
    if (failing())  return;
  }
  int j = 0;
  for (; i < _late_inlines.length(); i++, j++) {
    _late_inlines.at_put(j, _late_inlines.at(i));
  }
  _late_inlines.trunc_to(j);

  {
    ResourceMark rm;
1965
    PhaseRemoveUseless pru(gvn, for_igvn());
R
roland 已提交
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
  }

  igvn = PhaseIterGVN(gvn);
}

// Perform incremental inlining until bound on number of live nodes is reached
void Compile::inline_incrementally(PhaseIterGVN& igvn) {
  PhaseGVN* gvn = initial_gvn();

  set_inlining_incrementally(true);
  set_inlining_progress(true);
  uint low_live_nodes = 0;

  while(inlining_progress() && _late_inlines.length() > 0) {

    if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
      if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
        // PhaseIdealLoop is expensive so we only try it once we are
1984 1985
        // out of live nodes and we only try it again if the previous
        // helped got the number of nodes down significantly
R
roland 已提交
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
        PhaseIdealLoop ideal_loop( igvn, false, true );
        if (failing())  return;
        low_live_nodes = live_nodes();
        _major_progress = true;
      }

      if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
        break;
      }
    }

    inline_incrementally_one(igvn);

    if (failing())  return;

    igvn.optimize();

    if (failing())  return;
  }

  assert( igvn._worklist.size() == 0, "should be done with igvn" );

  if (_string_late_inlines.length() > 0) {
    assert(has_stringbuilder(), "inconsistent");
    for_igvn()->clear();
    initial_gvn()->replace_with(&igvn);

    inline_string_calls(false);

    if (failing())  return;

    {
      ResourceMark rm;
      PhaseRemoveUseless pru(initial_gvn(), for_igvn());
    }

    igvn = PhaseIterGVN(gvn);

    igvn.optimize();
  }

  set_inlining_incrementally(false);
}


D
duke 已提交
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
//------------------------------Optimize---------------------------------------
// Given a graph, optimize it.
void Compile::Optimize() {
  TracePhase t1("optimizer", &_t_optimizer, true);

#ifndef PRODUCT
  if (env()->break_at_compile()) {
    BREAKPOINT;
  }

#endif

  ResourceMark rm;
  int          loop_opts_cnt;

  NOT_PRODUCT( verify_graph_edges(); )

S
sla 已提交
2048
  print_method(PHASE_AFTER_PARSING);
D
duke 已提交
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058

 {
  // Iterative Global Value Numbering, including ideal transforms
  // Initialize IterGVN with types and values from parse-time GVN
  PhaseIterGVN igvn(initial_gvn());
  {
    NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
    igvn.optimize();
  }

S
sla 已提交
2059
  print_method(PHASE_ITER_GVN1, 2);
D
duke 已提交
2060 2061 2062

  if (failing())  return;

2063 2064 2065 2066
  {
    NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
    inline_incrementally(igvn);
  }
R
roland 已提交
2067

S
sla 已提交
2068
  print_method(PHASE_INCREMENTAL_INLINE, 2);
R
roland 已提交
2069 2070 2071

  if (failing())  return;

2072 2073 2074 2075 2076
  if (eliminate_boxing()) {
    NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
    // Inline valueOf() methods now.
    inline_boxing_calls(igvn);

2077 2078 2079 2080
    if (AlwaysIncrementalInline) {
      inline_incrementally(igvn);
    }

S
sla 已提交
2081
    print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2082 2083 2084 2085

    if (failing())  return;
  }

2086 2087 2088 2089 2090 2091
  // Remove the speculative part of types and clean up the graph from
  // the extra CastPP nodes whose only purpose is to carry them. Do
  // that early so that optimizations are not disrupted by the extra
  // CastPP nodes.
  remove_speculative_types(igvn);

2092 2093 2094 2095
  // No more new expensive nodes will be added to the list from here
  // so keep only the actual candidates for optimizations.
  cleanup_expensive_nodes(igvn);

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
  if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
    NOT_PRODUCT(Compile::TracePhase t2("", &_t_renumberLive, TimeCompiler);)
    initial_gvn()->replace_with(&igvn);
    for_igvn()->clear();
    Unique_Node_List new_worklist(C->comp_arena());
    {
      ResourceMark rm;
      PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
    }
    set_for_igvn(&new_worklist);
    igvn = PhaseIterGVN(initial_gvn());
    igvn.optimize();
  }

2110 2111
  // Perform escape analysis
  if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2112 2113 2114 2115
    if (has_loops()) {
      // Cleanup graph (remove dead nodes).
      TracePhase t2("idealLoop", &_t_idealLoop, true);
      PhaseIdealLoop ideal_loop( igvn, false, true );
S
sla 已提交
2116
      if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2117 2118
      if (failing())  return;
    }
2119 2120 2121 2122
    ConnectionGraph::do_analysis(this, &igvn);

    if (failing())  return;

2123
    // Optimize out fields loads from scalar replaceable allocations.
2124
    igvn.optimize();
S
sla 已提交
2125
    print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2126 2127 2128

    if (failing())  return;

2129
    if (congraph() != NULL && macro_count() > 0) {
2130
      NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
2131 2132 2133 2134 2135
      PhaseMacroExpand mexp(igvn);
      mexp.eliminate_macro_nodes();
      igvn.set_delay_transform(false);

      igvn.optimize();
S
sla 已提交
2136
      print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2137 2138 2139

      if (failing())  return;
    }
2140 2141
  }

D
duke 已提交
2142 2143 2144 2145 2146 2147 2148 2149
  // Loop transforms on the ideal graph.  Range Check Elimination,
  // peeling, unrolling, etc.

  // Set loop opts counter
  loop_opts_cnt = num_loop_opts();
  if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
    {
      TracePhase t2("idealLoop", &_t_idealLoop, true);
2150
      PhaseIdealLoop ideal_loop( igvn, true );
D
duke 已提交
2151
      loop_opts_cnt--;
S
sla 已提交
2152
      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
D
duke 已提交
2153 2154 2155 2156 2157
      if (failing())  return;
    }
    // Loop opts pass if partial peeling occurred in previous pass
    if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
      TracePhase t3("idealLoop", &_t_idealLoop, true);
2158
      PhaseIdealLoop ideal_loop( igvn, false );
D
duke 已提交
2159
      loop_opts_cnt--;
S
sla 已提交
2160
      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
D
duke 已提交
2161 2162 2163 2164 2165
      if (failing())  return;
    }
    // Loop opts pass for loop-unrolling before CCP
    if(major_progress() && (loop_opts_cnt > 0)) {
      TracePhase t4("idealLoop", &_t_idealLoop, true);
2166
      PhaseIdealLoop ideal_loop( igvn, false );
D
duke 已提交
2167
      loop_opts_cnt--;
S
sla 已提交
2168
      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
D
duke 已提交
2169
    }
2170 2171 2172 2173 2174
    if (!failing()) {
      // Verify that last round of loop opts produced a valid graph
      NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
      PhaseIdealLoop::verify(igvn);
    }
D
duke 已提交
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
  }
  if (failing())  return;

  // Conditional Constant Propagation;
  PhaseCCP ccp( &igvn );
  assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
  {
    TracePhase t2("ccp", &_t_ccp, true);
    ccp.do_transform();
  }
S
sla 已提交
2185
  print_method(PHASE_CPP1, 2);
D
duke 已提交
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195

  assert( true, "Break here to ccp.dump_old2new_map()");

  // Iterative Global Value Numbering, including ideal transforms
  {
    NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
    igvn = ccp;
    igvn.optimize();
  }

S
sla 已提交
2196
  print_method(PHASE_ITER_GVN2, 2);
D
duke 已提交
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206

  if (failing())  return;

  // Loop transforms on the ideal graph.  Range Check Elimination,
  // peeling, unrolling, etc.
  if(loop_opts_cnt > 0) {
    debug_only( int cnt = 0; );
    while(major_progress() && (loop_opts_cnt > 0)) {
      TracePhase t2("idealLoop", &_t_idealLoop, true);
      assert( cnt++ < 40, "infinite cycle in loop optimization" );
2207
      PhaseIdealLoop ideal_loop( igvn, true);
D
duke 已提交
2208
      loop_opts_cnt--;
S
sla 已提交
2209
      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
D
duke 已提交
2210 2211 2212
      if (failing())  return;
    }
  }
2213 2214 2215 2216 2217 2218 2219 2220

  {
    // Verify that all previous optimizations produced a valid graph
    // at least to this point, even if no loop optimizations were done.
    NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
    PhaseIdealLoop::verify(igvn);
  }

D
duke 已提交
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
  {
    NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
    PhaseMacroExpand  mex(igvn);
    if (mex.expand_macro_nodes()) {
      assert(failing(), "must bail out w/ explicit message");
      return;
    }
  }

 } // (End scope of igvn; run destructor if necessary for asserts.)

2232
  dump_inlining();
D
duke 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241
  // A method with only infinite loops has no edges entering loops from root
  {
    NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
    if (final_graph_reshaping()) {
      assert(failing(), "must bail out w/ explicit message");
      return;
    }
  }

S
sla 已提交
2242
  print_method(PHASE_OPTIMIZE_FINISHED, 2);
D
duke 已提交
2243 2244 2245 2246 2247 2248
}


//------------------------------Code_Gen---------------------------------------
// Given a graph, generate code for it
void Compile::Code_Gen() {
2249 2250 2251
  if (failing()) {
    return;
  }
D
duke 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262

  // Perform instruction selection.  You might think we could reclaim Matcher
  // memory PDQ, but actually the Matcher is used in generating spill code.
  // Internals of the Matcher (including some VectorSets) must remain live
  // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
  // set a bit in reclaimed memory.

  // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  // nodes.  Mapping is only valid at the root of each matched subtree.
  NOT_PRODUCT( verify_graph_edges(); )

2263 2264
  Matcher matcher;
  _matcher = &matcher;
D
duke 已提交
2265 2266
  {
    TracePhase t2("matcher", &_t_matcher, true);
2267
    matcher.match();
D
duke 已提交
2268 2269 2270 2271 2272 2273 2274
  }
  // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  // nodes.  Mapping is only valid at the root of each matched subtree.
  NOT_PRODUCT( verify_graph_edges(); )

  // If you have too many nodes, or if matching has failed, bail out
  check_node_count(0, "out of nodes matching instructions");
2275 2276 2277
  if (failing()) {
    return;
  }
D
duke 已提交
2278 2279

  // Build a proper-looking CFG
2280
  PhaseCFG cfg(node_arena(), root(), matcher);
D
duke 已提交
2281 2282 2283
  _cfg = &cfg;
  {
    NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
2284 2285 2286 2287
    bool success = cfg.do_global_code_motion();
    if (!success) {
      return;
    }
D
duke 已提交
2288

S
sla 已提交
2289
    print_method(PHASE_GLOBAL_CODE_MOTION, 2);
D
duke 已提交
2290 2291 2292 2293
    NOT_PRODUCT( verify_graph_edges(); )
    debug_only( cfg.verify(); )
  }

2294
  PhaseChaitin regalloc(unique(), cfg, matcher);
D
duke 已提交
2295 2296 2297 2298 2299 2300 2301 2302 2303
  _regalloc = &regalloc;
  {
    TracePhase t2("regalloc", &_t_registerAllocation, true);
    // Perform register allocation.  After Chaitin, use-def chains are
    // no longer accurate (at spill code) and so must be ignored.
    // Node->LRG->reg mappings are still accurate.
    _regalloc->Register_Allocate();

    // Bail out if the allocator builds too many nodes
2304 2305 2306
    if (failing()) {
      return;
    }
D
duke 已提交
2307 2308 2309 2310 2311 2312 2313
  }

  // Prior to register allocation we kept empty basic blocks in case the
  // the allocator needed a place to spill.  After register allocation we
  // are not adding any new instructions.  If any basic block is empty, we
  // can now safely remove it.
  {
R
rasbold 已提交
2314
    NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
2315
    cfg.remove_empty_blocks();
R
rasbold 已提交
2316 2317 2318 2319 2320 2321
    if (do_freq_based_layout()) {
      PhaseBlockLayout layout(cfg);
    } else {
      cfg.set_loop_alignment();
    }
    cfg.fixup_flow();
D
duke 已提交
2322 2323 2324 2325 2326 2327 2328 2329 2330
  }

  // Apply peephole optimizations
  if( OptoPeephole ) {
    NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
    PhasePeephole peep( _regalloc, cfg);
    peep.do_transform();
  }

2331 2332 2333 2334 2335 2336
  // Do late expand if CPU requires this.
  if (Matcher::require_postalloc_expand) {
    NOT_PRODUCT(TracePhase t2c("postalloc_expand", &_t_postalloc_expand, true));
    cfg.postalloc_expand(_regalloc);
  }

D
duke 已提交
2337 2338 2339 2340 2341 2342 2343 2344
  // Convert Nodes to instruction bits in a buffer
  {
    // %%%% workspace merge brought two timers together for one job
    TracePhase t2a("output", &_t_output, true);
    NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
    Output();
  }

S
sla 已提交
2345
  print_method(PHASE_FINAL_CODE);
D
duke 已提交
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367

  // He's dead, Jim.
  _cfg     = (PhaseCFG*)0xdeadbeef;
  _regalloc = (PhaseChaitin*)0xdeadbeef;
}


//------------------------------dump_asm---------------------------------------
// Dump formatted assembly
#ifndef PRODUCT
void Compile::dump_asm(int *pcs, uint pc_limit) {
  bool cut_short = false;
  tty->print_cr("#");
  tty->print("#  ");  _tf->dump();  tty->cr();
  tty->print_cr("#");

  // For all blocks
  int pc = 0x0;                 // Program counter
  char starts_bundle = ' ';
  _regalloc->dump_frame();

  Node *n = NULL;
2368 2369 2370 2371 2372 2373 2374 2375 2376
  for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
    if (VMThread::should_terminate()) {
      cut_short = true;
      break;
    }
    Block* block = _cfg->get_block(i);
    if (block->is_connector() && !Verbose) {
      continue;
    }
2377
    n = block->head();
2378
    if (pcs && n->_idx < pc_limit) {
D
duke 已提交
2379
      tty->print("%3.3x   ", pcs[n->_idx]);
2380
    } else {
D
duke 已提交
2381
      tty->print("      ");
2382 2383 2384
    }
    block->dump_head(_cfg);
    if (block->is_connector()) {
D
duke 已提交
2385
      tty->print_cr("        # Empty connector block");
2386
    } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
D
duke 已提交
2387 2388 2389 2390 2391
      tty->print_cr("        # Block is sole successor of call");
    }

    // For all instructions
    Node *delay = NULL;
2392
    for (uint j = 0; j < block->number_of_nodes(); j++) {
2393 2394 2395 2396
      if (VMThread::should_terminate()) {
        cut_short = true;
        break;
      }
2397
      n = block->get_node(j);
D
duke 已提交
2398
      if (valid_bundle_info(n)) {
2399
        Bundle* bundle = node_bundling(n);
D
duke 已提交
2400 2401 2402 2403
        if (bundle->used_in_unconditional_delay()) {
          delay = n;
          continue;
        }
2404
        if (bundle->starts_bundle()) {
D
duke 已提交
2405
          starts_bundle = '+';
2406
        }
D
duke 已提交
2407 2408
      }

2409 2410 2411
      if (WizardMode) {
        n->dump();
      }
2412

D
duke 已提交
2413 2414 2415 2416
      if( !n->is_Region() &&    // Dont print in the Assembly
          !n->is_Phi() &&       // a few noisely useless nodes
          !n->is_Proj() &&
          !n->is_MachTemp() &&
2417
          !n->is_SafePointScalarObject() &&
D
duke 已提交
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
          !n->is_Catch() &&     // Would be nice to print exception table targets
          !n->is_MergeMem() &&  // Not very interesting
          !n->is_top() &&       // Debug info table constants
          !(n->is_Con() && !n->is_Mach())// Debug info table constants
          ) {
        if (pcs && n->_idx < pc_limit)
          tty->print("%3.3x", pcs[n->_idx]);
        else
          tty->print("   ");
        tty->print(" %c ", starts_bundle);
        starts_bundle = ' ';
        tty->print("\t");
        n->format(_regalloc, tty);
        tty->cr();
      }

      // If we have an instruction with a delay slot, and have seen a delay,
      // then back up and print it
      if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
        assert(delay != NULL, "no unconditional delay instruction");
2438 2439
        if (WizardMode) delay->dump();

D
duke 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
        if (node_bundling(delay)->starts_bundle())
          starts_bundle = '+';
        if (pcs && n->_idx < pc_limit)
          tty->print("%3.3x", pcs[n->_idx]);
        else
          tty->print("   ");
        tty->print(" %c ", starts_bundle);
        starts_bundle = ' ';
        tty->print("\t");
        delay->format(_regalloc, tty);
2450
        tty->cr();
D
duke 已提交
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
        delay = NULL;
      }

      // Dump the exception table as well
      if( n->is_Catch() && (Verbose || WizardMode) ) {
        // Print the exception table for this offset
        _handler_table.print_subtable_for(pc);
      }
    }

    if (pcs && n->_idx < pc_limit)
      tty->print_cr("%3.3x", pcs[n->_idx]);
    else
2464
      tty->cr();
D
duke 已提交
2465 2466 2467 2468

    assert(cut_short || delay == NULL, "no unconditional delay branch");

  } // End of per-block dump
2469
  tty->cr();
D
duke 已提交
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482

  if (cut_short)  tty->print_cr("*** disassembly is cut short ***");
}
#endif

//------------------------------Final_Reshape_Counts---------------------------
// This class defines counters to help identify when a method
// may/must be executed using hardware with only 24-bit precision.
struct Final_Reshape_Counts : public StackObj {
  int  _call_count;             // count non-inlined 'common' calls
  int  _float_count;            // count float ops requiring 24-bit precision
  int  _double_count;           // count double ops requiring more precision
  int  _java_call_count;        // count non-inlined 'java' calls
2483
  int  _inner_loop_count;       // count loops which need alignment
D
duke 已提交
2484 2485 2486 2487
  VectorSet _visited;           // Visitation flags
  Node_List _tests;             // Set of IfNodes & PCTableNodes

  Final_Reshape_Counts() :
2488 2489
    _call_count(0), _float_count(0), _double_count(0),
    _java_call_count(0), _inner_loop_count(0),
D
duke 已提交
2490 2491 2492 2493 2494 2495
    _visited( Thread::current()->resource_area() ) { }

  void inc_call_count  () { _call_count  ++; }
  void inc_float_count () { _float_count ++; }
  void inc_double_count() { _double_count++; }
  void inc_java_call_count() { _java_call_count++; }
2496
  void inc_inner_loop_count() { _inner_loop_count++; }
D
duke 已提交
2497 2498 2499 2500 2501

  int  get_call_count  () const { return _call_count  ; }
  int  get_float_count () const { return _float_count ; }
  int  get_double_count() const { return _double_count; }
  int  get_java_call_count() const { return _java_call_count; }
2502
  int  get_inner_loop_count() const { return _inner_loop_count; }
D
duke 已提交
2503 2504
};

2505
#ifdef ASSERT
D
duke 已提交
2506 2507 2508
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
  ciInstanceKlass *k = tp->klass()->as_instance_klass();
  // Make sure the offset goes inside the instance layout.
2509
  return k->contains_field_offset(tp->offset());
D
duke 已提交
2510 2511
  // Note that OffsetBot and OffsetTop are very negative.
}
2512
#endif
D
duke 已提交
2513

2514 2515
// Eliminate trivially redundant StoreCMs and accumulate their
// precedence edges.
2516
void Compile::eliminate_redundant_card_marks(Node* n) {
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550
  assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
  if (n->in(MemNode::Address)->outcnt() > 1) {
    // There are multiple users of the same address so it might be
    // possible to eliminate some of the StoreCMs
    Node* mem = n->in(MemNode::Memory);
    Node* adr = n->in(MemNode::Address);
    Node* val = n->in(MemNode::ValueIn);
    Node* prev = n;
    bool done = false;
    // Walk the chain of StoreCMs eliminating ones that match.  As
    // long as it's a chain of single users then the optimization is
    // safe.  Eliminating partially redundant StoreCMs would require
    // cloning copies down the other paths.
    while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
      if (adr == mem->in(MemNode::Address) &&
          val == mem->in(MemNode::ValueIn)) {
        // redundant StoreCM
        if (mem->req() > MemNode::OopStore) {
          // Hasn't been processed by this code yet.
          n->add_prec(mem->in(MemNode::OopStore));
        } else {
          // Already converted to precedence edge
          for (uint i = mem->req(); i < mem->len(); i++) {
            // Accumulate any precedence edges
            if (mem->in(i) != NULL) {
              n->add_prec(mem->in(i));
            }
          }
          // Everything above this point has been processed.
          done = true;
        }
        // Eliminate the previous StoreCM
        prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
        assert(mem->outcnt() == 0, "should be dead");
2551
        mem->disconnect_inputs(NULL, this);
2552 2553 2554 2555 2556 2557 2558 2559
      } else {
        prev = mem;
      }
      mem = prev->in(MemNode::Memory);
    }
  }
}

D
duke 已提交
2560 2561
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
2562
void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
D
duke 已提交
2563

2564
  if ( n->outcnt() == 0 ) return; // dead node
D
duke 已提交
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
  uint nop = n->Opcode();

  // Check for 2-input instruction with "last use" on right input.
  // Swap to left input.  Implements item (2).
  if( n->req() == 3 &&          // two-input instruction
      n->in(1)->outcnt() > 1 && // left use is NOT a last use
      (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
      n->in(2)->outcnt() == 1 &&// right use IS a last use
      !n->in(2)->is_Con() ) {   // right use is not a constant
    // Check for commutative opcode
    switch( nop ) {
    case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
    case Op_MaxI:  case Op_MinI:
    case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
    case Op_AndL:  case Op_XorL:  case Op_OrL:
    case Op_AndI:  case Op_XorI:  case Op_OrI: {
      // Move "last use" input to left by swapping inputs
      n->swap_edges(1, 2);
      break;
    }
    default:
      break;
    }
  }

2590 2591
#ifdef ASSERT
  if( n->is_Mem() ) {
2592
    int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2593 2594 2595 2596 2597 2598 2599
    assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
            // oop will be recorded in oop map if load crosses safepoint
            n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
                             LoadNode::is_immutable_value(n->in(MemNode::Address))),
            "raw memory operations should have control edge");
  }
#endif
D
duke 已提交
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
  // Count FPU ops and common calls, implements item (3)
  switch( nop ) {
  // Count all float operations that may use FPU
  case Op_AddF:
  case Op_SubF:
  case Op_MulF:
  case Op_DivF:
  case Op_NegF:
  case Op_ModF:
  case Op_ConvI2F:
  case Op_ConF:
  case Op_CmpF:
  case Op_CmpF3:
  // case Op_ConvL2F: // longs are split into 32-bit halves
2614
    frc.inc_float_count();
D
duke 已提交
2615 2616 2617 2618
    break;

  case Op_ConvF2D:
  case Op_ConvD2F:
2619 2620
    frc.inc_float_count();
    frc.inc_double_count();
D
duke 已提交
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
    break;

  // Count all double operations that may use FPU
  case Op_AddD:
  case Op_SubD:
  case Op_MulD:
  case Op_DivD:
  case Op_NegD:
  case Op_ModD:
  case Op_ConvI2D:
  case Op_ConvD2I:
  // case Op_ConvL2D: // handled by leaf call
  // case Op_ConvD2L: // handled by leaf call
  case Op_ConD:
  case Op_CmpD:
  case Op_CmpD3:
2637
    frc.inc_double_count();
D
duke 已提交
2638 2639 2640
    break;
  case Op_Opaque1:              // Remove Opaque Nodes before matching
  case Op_Opaque2:              // Remove Opaque Nodes before matching
2641
  case Op_Opaque3:
2642
    n->subsume_by(n->in(1), this);
D
duke 已提交
2643 2644 2645 2646
    break;
  case Op_CallStaticJava:
  case Op_CallJava:
  case Op_CallDynamicJava:
2647
    frc.inc_java_call_count(); // Count java call site;
D
duke 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
  case Op_CallRuntime:
  case Op_CallLeaf:
  case Op_CallLeafNoFP: {
    assert( n->is_Call(), "" );
    CallNode *call = n->as_Call();
    // Count call sites where the FP mode bit would have to be flipped.
    // Do not count uncommon runtime calls:
    // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
    // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
    if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
2658
      frc.inc_call_count();   // Count the call site
D
duke 已提交
2659 2660 2661 2662 2663 2664 2665 2666
    } else {                  // See if uncommon argument is shared
      Node *n = call->in(TypeFunc::Parms);
      int nop = n->Opcode();
      // Clone shared simple arguments to uncommon calls, item (1).
      if( n->outcnt() > 1 &&
          !n->is_Proj() &&
          nop != Op_CreateEx &&
          nop != Op_CheckCastPP &&
2667
          nop != Op_DecodeN &&
2668
          nop != Op_DecodeNKlass &&
D
duke 已提交
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
          !n->is_Mem() ) {
        Node *x = n->clone();
        call->set_req( TypeFunc::Parms, x );
      }
    }
    break;
  }

  case Op_StoreD:
  case Op_LoadD:
  case Op_LoadD_unaligned:
2680
    frc.inc_double_count();
D
duke 已提交
2681 2682 2683
    goto handle_mem;
  case Op_StoreF:
  case Op_LoadF:
2684
    frc.inc_float_count();
D
duke 已提交
2685 2686
    goto handle_mem;

2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
  case Op_StoreCM:
    {
      // Convert OopStore dependence into precedence edge
      Node* prec = n->in(MemNode::OopStore);
      n->del_req(MemNode::OopStore);
      n->add_prec(prec);
      eliminate_redundant_card_marks(n);
    }

    // fall through

D
duke 已提交
2698 2699 2700 2701 2702
  case Op_StoreB:
  case Op_StoreC:
  case Op_StorePConditional:
  case Op_StoreI:
  case Op_StoreL:
2703
  case Op_StoreIConditional:
D
duke 已提交
2704 2705 2706 2707
  case Op_StoreLConditional:
  case Op_CompareAndSwapI:
  case Op_CompareAndSwapL:
  case Op_CompareAndSwapP:
2708
  case Op_CompareAndSwapN:
2709 2710 2711 2712 2713 2714
  case Op_GetAndAddI:
  case Op_GetAndAddL:
  case Op_GetAndSetI:
  case Op_GetAndSetL:
  case Op_GetAndSetP:
  case Op_GetAndSetN:
D
duke 已提交
2715
  case Op_StoreP:
2716
  case Op_StoreN:
2717
  case Op_StoreNKlass:
D
duke 已提交
2718
  case Op_LoadB:
2719
  case Op_LoadUB:
2720
  case Op_LoadUS:
D
duke 已提交
2721 2722
  case Op_LoadI:
  case Op_LoadKlass:
2723
  case Op_LoadNKlass:
D
duke 已提交
2724 2725 2726 2727
  case Op_LoadL:
  case Op_LoadL_unaligned:
  case Op_LoadPLocked:
  case Op_LoadP:
2728
  case Op_LoadN:
D
duke 已提交
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
  case Op_LoadRange:
  case Op_LoadS: {
  handle_mem:
#ifdef ASSERT
    if( VerifyOptoOopOffsets ) {
      assert( n->is_Mem(), "" );
      MemNode *mem  = (MemNode*)n;
      // Check to see if address types have grounded out somehow.
      const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
      assert( !tp || oop_offset_is_sane(tp), "" );
    }
#endif
    break;
  }

  case Op_AddP: {               // Assert sane base pointers
2745
    Node *addp = n->in(AddPNode::Address);
D
duke 已提交
2746 2747 2748 2749
    assert( !addp->is_AddP() ||
            addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
            addp->in(AddPNode::Base) == n->in(AddPNode::Base),
            "Base pointers must match" );
2750
#ifdef _LP64
2751
    if ((UseCompressedOops || UseCompressedClassPointers) &&
2752 2753 2754 2755 2756 2757 2758 2759
        addp->Opcode() == Op_ConP &&
        addp == n->in(AddPNode::Base) &&
        n->in(AddPNode::Offset)->is_Con()) {
      // Use addressing with narrow klass to load with offset on x86.
      // On sparc loading 32-bits constant and decoding it have less
      // instructions (4) then load 64-bits constant (7).
      // Do this transformation here since IGVN will convert ConN back to ConP.
      const Type* t = addp->bottom_type();
2760
      if (t->isa_oopptr() || t->isa_klassptr()) {
2761 2762
        Node* nn = NULL;

2763 2764
        int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;

2765
        // Look for existing ConN node of the same exact type.
2766
        Node* r  = root();
2767 2768 2769
        uint cnt = r->outcnt();
        for (uint i = 0; i < cnt; i++) {
          Node* m = r->raw_out(i);
2770
          if (m!= NULL && m->Opcode() == op &&
2771
              m->bottom_type()->make_ptr() == t) {
2772 2773 2774 2775 2776 2777 2778
            nn = m;
            break;
          }
        }
        if (nn != NULL) {
          // Decode a narrow oop to match address
          // [R12 + narrow_oop_reg<<3 + offset]
2779
          if (t->isa_oopptr()) {
2780
            nn = new (this) DecodeNNode(nn, t);
2781
          } else {
2782
            nn = new (this) DecodeNKlassNode(nn, t);
2783
          }
2784 2785 2786
          n->set_req(AddPNode::Base, nn);
          n->set_req(AddPNode::Address, nn);
          if (addp->outcnt() == 0) {
2787
            addp->disconnect_inputs(NULL, this);
2788 2789 2790 2791 2792
          }
        }
      }
    }
#endif
D
duke 已提交
2793 2794 2795
    break;
  }

2796
#ifdef _LP64
2797
  case Op_CastPP:
2798
    if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2799 2800 2801 2802 2803
      Node* in1 = n->in(1);
      const Type* t = n->bottom_type();
      Node* new_in1 = in1->clone();
      new_in1->as_DecodeN()->set_type(t);

2804
      if (!Matcher::narrow_oop_use_complex_address()) {
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819
        //
        // x86, ARM and friends can handle 2 adds in addressing mode
        // and Matcher can fold a DecodeN node into address by using
        // a narrow oop directly and do implicit NULL check in address:
        //
        // [R12 + narrow_oop_reg<<3 + offset]
        // NullCheck narrow_oop_reg
        //
        // On other platforms (Sparc) we have to keep new DecodeN node and
        // use it to do implicit NULL check in address:
        //
        // decode_not_null narrow_oop_reg, base_reg
        // [base_reg + offset]
        // NullCheck base_reg
        //
T
twisti 已提交
2820
        // Pin the new DecodeN node to non-null path on these platform (Sparc)
2821 2822 2823 2824 2825 2826
        // to keep the information to which NULL check the new DecodeN node
        // corresponds to use it as value in implicit_null_check().
        //
        new_in1->set_req(0, n->in(0));
      }

2827
      n->subsume_by(new_in1, this);
2828
      if (in1->outcnt() == 0) {
2829
        in1->disconnect_inputs(NULL, this);
2830 2831 2832 2833
      }
    }
    break;

2834
  case Op_CmpP:
2835 2836
    // Do this transformation here to preserve CmpPNode::sub() and
    // other TypePtr related Ideal optimizations (for example, ptr nullness).
2837
    if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
2838 2839
      Node* in1 = n->in(1);
      Node* in2 = n->in(2);
2840
      if (!in1->is_DecodeNarrowPtr()) {
2841 2842 2843
        in2 = in1;
        in1 = n->in(2);
      }
2844
      assert(in1->is_DecodeNarrowPtr(), "sanity");
2845 2846

      Node* new_in2 = NULL;
2847 2848
      if (in2->is_DecodeNarrowPtr()) {
        assert(in2->Opcode() == in1->Opcode(), "must be same node type");
2849 2850 2851
        new_in2 = in2->in(1);
      } else if (in2->Opcode() == Op_ConP) {
        const Type* t = in2->bottom_type();
2852
        if (t == TypePtr::NULL_PTR) {
2853
          assert(in1->is_DecodeN(), "compare klass to null?");
2854 2855 2856 2857
          // Don't convert CmpP null check into CmpN if compressed
          // oops implicit null check is not generated.
          // This will allow to generate normal oop implicit null check.
          if (Matcher::gen_narrow_oop_implicit_null_checks())
2858
            new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
          //
          // This transformation together with CastPP transformation above
          // will generated code for implicit NULL checks for compressed oops.
          //
          // The original code after Optimize()
          //
          //    LoadN memory, narrow_oop_reg
          //    decode narrow_oop_reg, base_reg
          //    CmpP base_reg, NULL
          //    CastPP base_reg // NotNull
          //    Load [base_reg + offset], val_reg
          //
          // after these transformations will be
          //
          //    LoadN memory, narrow_oop_reg
          //    CmpN narrow_oop_reg, NULL
          //    decode_not_null narrow_oop_reg, base_reg
          //    Load [base_reg + offset], val_reg
          //
          // and the uncommon path (== NULL) will use narrow_oop_reg directly
          // since narrow oops can be used in debug info now (see the code in
          // final_graph_reshaping_walk()).
          //
          // At the end the code will be matched to
          // on x86:
          //
          //    Load_narrow_oop memory, narrow_oop_reg
          //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
          //    NullCheck narrow_oop_reg
          //
          // and on sparc:
          //
          //    Load_narrow_oop memory, narrow_oop_reg
          //    decode_not_null narrow_oop_reg, base_reg
          //    Load [base_reg + offset], val_reg
          //    NullCheck base_reg
          //
2896
        } else if (t->isa_oopptr()) {
2897
          new_in2 = ConNode::make(this, t->make_narrowoop());
2898
        } else if (t->isa_klassptr()) {
2899
          new_in2 = ConNode::make(this, t->make_narrowklass());
2900 2901
        }
      }
2902
      if (new_in2 != NULL) {
2903 2904
        Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
        n->subsume_by(cmpN, this);
2905
        if (in1->outcnt() == 0) {
2906
          in1->disconnect_inputs(NULL, this);
2907 2908
        }
        if (in2->outcnt() == 0) {
2909
          in2->disconnect_inputs(NULL, this);
2910
        }
2911 2912
      }
    }
2913
    break;
2914 2915

  case Op_DecodeN:
2916 2917
  case Op_DecodeNKlass:
    assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
2918
    // DecodeN could be pinned when it can't be fold into
2919
    // an address expression, see the code for Op_CastPP above.
2920
    assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
2921 2922
    break;

2923 2924
  case Op_EncodeP:
  case Op_EncodePKlass: {
2925
    Node* in1 = n->in(1);
2926
    if (in1->is_DecodeNarrowPtr()) {
2927
      n->subsume_by(in1->in(1), this);
2928 2929 2930
    } else if (in1->Opcode() == Op_ConP) {
      const Type* t = in1->bottom_type();
      if (t == TypePtr::NULL_PTR) {
2931
        assert(t->isa_oopptr(), "null klass?");
2932
        n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
2933
      } else if (t->isa_oopptr()) {
2934
        n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
2935
      } else if (t->isa_klassptr()) {
2936
        n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
2937 2938 2939
      }
    }
    if (in1->outcnt() == 0) {
2940
      in1->disconnect_inputs(NULL, this);
2941 2942 2943 2944
    }
    break;
  }

2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
  case Op_Proj: {
    if (OptimizeStringConcat) {
      ProjNode* p = n->as_Proj();
      if (p->_is_io_use) {
        // Separate projections were used for the exception path which
        // are normally removed by a late inline.  If it wasn't inlined
        // then they will hang around and should just be replaced with
        // the original one.
        Node* proj = NULL;
        // Replace with just one
        for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
          Node *use = i.get();
          if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
            proj = use;
            break;
          }
        }
K
kvn 已提交
2962
        assert(proj != NULL, "must be found");
2963
        p->subsume_by(proj, this);
2964 2965 2966 2967 2968
      }
    }
    break;
  }

2969
  case Op_Phi:
2970
    if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
      // The EncodeP optimization may create Phi with the same edges
      // for all paths. It is not handled well by Register Allocator.
      Node* unique_in = n->in(1);
      assert(unique_in != NULL, "");
      uint cnt = n->req();
      for (uint i = 2; i < cnt; i++) {
        Node* m = n->in(i);
        assert(m != NULL, "");
        if (unique_in != m)
          unique_in = NULL;
      }
      if (unique_in != NULL) {
2983
        n->subsume_by(unique_in, this);
2984 2985 2986 2987
      }
    }
    break;

2988 2989
#endif

D
duke 已提交
2990 2991 2992 2993 2994 2995 2996
  case Op_ModI:
    if (UseDivMod) {
      // Check if a%b and a/b both exist
      Node* d = n->find_similar(Op_DivI);
      if (d) {
        // Replace them with a fused divmod if supported
        if (Matcher::has_match_rule(Op_DivModI)) {
2997 2998 2999
          DivModINode* divmod = DivModINode::make(this, n);
          d->subsume_by(divmod->div_proj(), this);
          n->subsume_by(divmod->mod_proj(), this);
D
duke 已提交
3000 3001
        } else {
          // replace a%b with a-((a/b)*b)
3002 3003 3004
          Node* mult = new (this) MulINode(d, d->in(2));
          Node* sub  = new (this) SubINode(d->in(1), mult);
          n->subsume_by(sub, this);
D
duke 已提交
3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
        }
      }
    }
    break;

  case Op_ModL:
    if (UseDivMod) {
      // Check if a%b and a/b both exist
      Node* d = n->find_similar(Op_DivL);
      if (d) {
        // Replace them with a fused divmod if supported
        if (Matcher::has_match_rule(Op_DivModL)) {
3017 3018 3019
          DivModLNode* divmod = DivModLNode::make(this, n);
          d->subsume_by(divmod->div_proj(), this);
          n->subsume_by(divmod->mod_proj(), this);
D
duke 已提交
3020 3021
        } else {
          // replace a%b with a-((a/b)*b)
3022 3023 3024
          Node* mult = new (this) MulLNode(d, d->in(2));
          Node* sub  = new (this) SubLNode(d->in(1), mult);
          n->subsume_by(sub, this);
D
duke 已提交
3025 3026 3027 3028 3029
        }
      }
    }
    break;

3030 3031
  case Op_LoadVector:
  case Op_StoreVector:
D
duke 已提交
3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
    break;

  case Op_PackB:
  case Op_PackS:
  case Op_PackI:
  case Op_PackF:
  case Op_PackL:
  case Op_PackD:
    if (n->req()-1 > 2) {
      // Replace many operand PackNodes with a binary tree for matching
      PackNode* p = (PackNode*) n;
3043 3044
      Node* btp = p->binary_tree_pack(this, 1, n->req());
      n->subsume_by(btp, this);
D
duke 已提交
3045 3046
    }
    break;
3047 3048 3049 3050 3051 3052
  case Op_Loop:
  case Op_CountedLoop:
    if (n->as_Loop()->is_inner_loop()) {
      frc.inc_inner_loop_count();
    }
    break;
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
  case Op_LShiftI:
  case Op_RShiftI:
  case Op_URShiftI:
  case Op_LShiftL:
  case Op_RShiftL:
  case Op_URShiftL:
    if (Matcher::need_masked_shift_count) {
      // The cpu's shift instructions don't restrict the count to the
      // lower 5/6 bits. We need to do the masking ourselves.
      Node* in2 = n->in(2);
      juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
      const TypeInt* t = in2->find_int_type();
      if (t != NULL && t->is_con()) {
        juint shift = t->get_con();
        if (shift > mask) { // Unsigned cmp
3068
          n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
3069 3070 3071
        }
      } else {
        if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3072
          Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
3073 3074 3075 3076
          n->set_req(2, shift);
        }
      }
      if (in2->outcnt() == 0) { // Remove dead node
3077
        in2->disconnect_inputs(NULL, this);
3078 3079 3080
      }
    }
    break;
3081
  case Op_MemBarStoreStore:
3082
  case Op_MemBarRelease:
3083 3084 3085 3086 3087 3088
    // Break the link with AllocateNode: it is no longer useful and
    // confuses register allocation.
    if (n->req() > MemBarNode::Precedent) {
      n->set_req(MemBarNode::Precedent, top());
    }
    break;
D
duke 已提交
3089 3090 3091
  default:
    assert( !n->is_Call(), "" );
    assert( !n->is_Mem(), "" );
3092
    assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
D
duke 已提交
3093 3094
    break;
  }
3095 3096 3097

  // Collect CFG split points
  if (n->is_MultiBranch())
3098
    frc._tests.push(n);
D
duke 已提交
3099 3100 3101 3102 3103
}

//------------------------------final_graph_reshaping_walk---------------------
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
// requires that the walk visits a node's inputs before visiting the node.
3104
void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3105 3106 3107
  ResourceArea *area = Thread::current()->resource_area();
  Unique_Node_List sfpt(area);

3108
  frc._visited.set(root->_idx); // first, mark node as visited
D
duke 已提交
3109 3110 3111 3112 3113 3114 3115 3116
  uint cnt = root->req();
  Node *n = root;
  uint  i = 0;
  while (true) {
    if (i < cnt) {
      // Place all non-visited non-null inputs onto stack
      Node* m = n->in(i);
      ++i;
3117
      if (m != NULL && !frc._visited.test_set(m->_idx)) {
3118 3119 3120 3121
        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
          // compute worst case interpreter size in case of a deoptimization
          update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());

3122
          sfpt.push(m);
3123
        }
D
duke 已提交
3124 3125 3126 3127 3128 3129 3130
        cnt = m->req();
        nstack.push(n, i); // put on stack parent and next input's index
        n = m;
        i = 0;
      }
    } else {
      // Now do post-visit work
3131
      final_graph_reshaping_impl( n, frc );
D
duke 已提交
3132 3133 3134 3135 3136 3137 3138 3139
      if (nstack.is_empty())
        break;             // finished
      n = nstack.node();   // Get node from stack
      cnt = n->req();
      i = nstack.index();
      nstack.pop();        // Shift to the next node on stack
    }
  }
3140

3141
  // Skip next transformation if compressed oops are not used.
3142
  if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3143
      (!UseCompressedOops && !UseCompressedClassPointers))
3144 3145
    return;

3146
  // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3147
  // It could be done for an uncommon traps or any safepoints/calls
3148
  // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
  while (sfpt.size() > 0) {
    n = sfpt.pop();
    JVMState *jvms = n->as_SafePoint()->jvms();
    assert(jvms != NULL, "sanity");
    int start = jvms->debug_start();
    int end   = n->req();
    bool is_uncommon = (n->is_CallStaticJava() &&
                        n->as_CallStaticJava()->uncommon_trap_request() != 0);
    for (int j = start; j < end; j++) {
      Node* in = n->in(j);
3159
      if (in->is_DecodeNarrowPtr()) {
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
        bool safe_to_skip = true;
        if (!is_uncommon ) {
          // Is it safe to skip?
          for (uint i = 0; i < in->outcnt(); i++) {
            Node* u = in->raw_out(i);
            if (!u->is_SafePoint() ||
                 u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
              safe_to_skip = false;
            }
          }
        }
        if (safe_to_skip) {
          n->set_req(j, in->in(1));
        }
        if (in->outcnt() == 0) {
3175
          in->disconnect_inputs(NULL, this);
3176 3177 3178 3179
        }
      }
    }
  }
D
duke 已提交
3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216
}

//------------------------------final_graph_reshaping--------------------------
// Final Graph Reshaping.
//
// (1) Clone simple inputs to uncommon calls, so they can be scheduled late
//     and not commoned up and forced early.  Must come after regular
//     optimizations to avoid GVN undoing the cloning.  Clone constant
//     inputs to Loop Phis; these will be split by the allocator anyways.
//     Remove Opaque nodes.
// (2) Move last-uses by commutative operations to the left input to encourage
//     Intel update-in-place two-address operations and better register usage
//     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
//     calls canonicalizing them back.
// (3) Count the number of double-precision FP ops, single-precision FP ops
//     and call sites.  On Intel, we can get correct rounding either by
//     forcing singles to memory (requires extra stores and loads after each
//     FP bytecode) or we can set a rounding mode bit (requires setting and
//     clearing the mode bit around call sites).  The mode bit is only used
//     if the relative frequency of single FP ops to calls is low enough.
//     This is a key transform for SPEC mpeg_audio.
// (4) Detect infinite loops; blobs of code reachable from above but not
//     below.  Several of the Code_Gen algorithms fail on such code shapes,
//     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
//     from time to time in other codes (such as -Xcomp finalizer loops, etc).
//     Detection is by looking for IfNodes where only 1 projection is
//     reachable from below or CatchNodes missing some targets.
// (5) Assert for insane oop offsets in debug mode.

bool Compile::final_graph_reshaping() {
  // an infinite loop may have been eliminated by the optimizer,
  // in which case the graph will be empty.
  if (root()->req() == 1) {
    record_method_not_compilable("trivial infinite loop");
    return true;
  }

3217 3218 3219 3220 3221 3222 3223 3224 3225
  // Expensive nodes have their control input set to prevent the GVN
  // from freely commoning them. There's no GVN beyond this point so
  // no need to keep the control input. We want the expensive nodes to
  // be freely moved to the least frequent code path by gcm.
  assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
  for (int i = 0; i < expensive_count(); i++) {
    _expensive_nodes->at(i)->set_req(0, NULL);
  }

3226
  Final_Reshape_Counts frc;
D
duke 已提交
3227 3228

  // Visit everybody reachable!
3229 3230
  // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
  Node_Stack nstack(live_nodes() >> 1);
3231
  final_graph_reshaping_walk(nstack, root(), frc);
D
duke 已提交
3232 3233

  // Check for unreachable (from below) code (i.e., infinite loops).
3234 3235
  for( uint i = 0; i < frc._tests.size(); i++ ) {
    MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
3236
    // Get number of CFG targets.
D
duke 已提交
3237
    // Note that PCTables include exception targets after calls.
3238 3239
    uint required_outcnt = n->required_outcnt();
    if (n->outcnt() != required_outcnt) {
D
duke 已提交
3240 3241 3242 3243 3244 3245
      // Check for a few special cases.  Rethrow Nodes never take the
      // 'fall-thru' path, so expected kids is 1 less.
      if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
        if (n->in(0)->in(0)->is_Call()) {
          CallNode *call = n->in(0)->in(0)->as_Call();
          if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3246
            required_outcnt--;      // Rethrow always has 1 less kid
D
duke 已提交
3247 3248 3249 3250 3251 3252 3253 3254 3255
          } else if (call->req() > TypeFunc::Parms &&
                     call->is_CallDynamicJava()) {
            // Check for null receiver. In such case, the optimizer has
            // detected that the virtual call will always result in a null
            // pointer exception. The fall-through projection of this CatchNode
            // will not be populated.
            Node *arg0 = call->in(TypeFunc::Parms);
            if (arg0->is_Type() &&
                arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3256
              required_outcnt--;
D
duke 已提交
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
            }
          } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
                     call->req() > TypeFunc::Parms+1 &&
                     call->is_CallStaticJava()) {
            // Check for negative array length. In such case, the optimizer has
            // detected that the allocation attempt will always result in an
            // exception. There is no fall-through projection of this CatchNode .
            Node *arg1 = call->in(TypeFunc::Parms+1);
            if (arg1->is_Type() &&
                arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3267
              required_outcnt--;
D
duke 已提交
3268 3269 3270 3271
            }
          }
        }
      }
3272 3273
      // Recheck with a better notion of 'required_outcnt'
      if (n->outcnt() != required_outcnt) {
D
duke 已提交
3274 3275 3276 3277 3278 3279 3280
        record_method_not_compilable("malformed control flow");
        return true;            // Not all targets reachable!
      }
    }
    // Check that I actually visited all kids.  Unreached kids
    // must be infinite loops.
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3281
      if (!frc._visited.test(n->fast_out(j)->_idx)) {
D
duke 已提交
3282 3283 3284 3285 3286 3287 3288
        record_method_not_compilable("infinite loop");
        return true;            // Found unvisited kid; must be unreach
      }
  }

  // If original bytecodes contained a mixture of floats and doubles
  // check if the optimizer has made it homogenous, item (3).
3289
  if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3290 3291 3292
      frc.get_float_count() > 32 &&
      frc.get_double_count() == 0 &&
      (10 * frc.get_call_count() < frc.get_float_count()) ) {
D
duke 已提交
3293 3294 3295
    set_24_bit_selection_and_mode( false,  true );
  }

3296 3297
  set_java_calls(frc.get_java_call_count());
  set_inner_loops(frc.get_inner_loop_count());
D
duke 已提交
3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314

  // No infinite loops, no reason to bail out.
  return false;
}

//-----------------------------too_many_traps----------------------------------
// Report if there are too many traps at the current method and bci.
// Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
bool Compile::too_many_traps(ciMethod* method,
                             int bci,
                             Deoptimization::DeoptReason reason) {
  ciMethodData* md = method->method_data();
  if (md->is_empty()) {
    // Assume the trap has not occurred, or that it occurred only
    // because of a transient condition during start-up in the interpreter.
    return false;
  }
3315 3316
  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
  if (md->has_trap_at(bci, m, reason) != 0) {
D
duke 已提交
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
    // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
    // Also, if there are multiple reasons, or if there is no per-BCI record,
    // assume the worst.
    if (log())
      log()->elem("observe trap='%s' count='%d'",
                  Deoptimization::trap_reason_name(reason),
                  md->trap_count(reason));
    return true;
  } else {
    // Ignore method/bci and see if there have been too many globally.
    return too_many_traps(reason, md);
  }
}

// Less-accurate variant which does not require a method and bci.
bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
                             ciMethodData* logmd) {
3334
  if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
D
duke 已提交
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
    // Too many traps globally.
    // Note that we use cumulative trap_count, not just md->trap_count.
    if (log()) {
      int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
      log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
                  Deoptimization::trap_reason_name(reason),
                  mcount, trap_count(reason));
    }
    return true;
  } else {
    // The coast is clear.
    return false;
  }
}

//--------------------------too_many_recompiles--------------------------------
// Report if there are too many recompiles at the current method and bci.
// Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
// Is not eager to return true, since this will cause the compiler to use
// Action_none for a trap point, to avoid too many recompilations.
bool Compile::too_many_recompiles(ciMethod* method,
                                  int bci,
                                  Deoptimization::DeoptReason reason) {
  ciMethodData* md = method->method_data();
  if (md->is_empty()) {
    // Assume the trap has not occurred, or that it occurred only
    // because of a transient condition during start-up in the interpreter.
    return false;
  }
  // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
  uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
  uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
  Deoptimization::DeoptReason per_bc_reason
    = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3369
  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
D
duke 已提交
3370
  if ((per_bc_reason == Deoptimization::Reason_none
3371
       || md->has_trap_at(bci, m, reason) != 0)
D
duke 已提交
3372
      // The trap frequency measure we care about is the recompile count:
3373
      && md->trap_recompiled_at(bci, m)
D
duke 已提交
3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
      && md->overflow_recompile_count() >= bc_cutoff) {
    // Do not emit a trap here if it has already caused recompilations.
    // Also, if there are multiple reasons, or if there is no per-BCI record,
    // assume the worst.
    if (log())
      log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
                  Deoptimization::trap_reason_name(reason),
                  md->trap_count(reason),
                  md->overflow_recompile_count());
    return true;
  } else if (trap_count(reason) != 0
             && decompile_count() >= m_cutoff) {
    // Too many recompiles globally, and we have seen this sort of trap.
    // Use cumulative decompile_count, not just md->decompile_count.
    if (log())
      log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
                  Deoptimization::trap_reason_name(reason),
                  md->trap_count(reason), trap_count(reason),
                  md->decompile_count(), decompile_count());
    return true;
  } else {
    // The coast is clear.
    return false;
  }
}

3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
// Compute when not to trap. Used by matching trap based nodes and
// NullCheck optimization.
void Compile::set_allowed_deopt_reasons() {
  _allowed_reasons = 0;
  if (is_method_compilation()) {
    for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
      assert(rs < BitsPerInt, "recode bit map");
      if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
        _allowed_reasons |= nth_bit(rs);
      }
    }
  }
}
D
duke 已提交
3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447

#ifndef PRODUCT
//------------------------------verify_graph_edges---------------------------
// Walk the Graph and verify that there is a one-to-one correspondence
// between Use-Def edges and Def-Use edges in the graph.
void Compile::verify_graph_edges(bool no_dead_code) {
  if (VerifyGraphEdges) {
    ResourceArea *area = Thread::current()->resource_area();
    Unique_Node_List visited(area);
    // Call recursive graph walk to check edges
    _root->verify_edges(visited);
    if (no_dead_code) {
      // Now make sure that no visited node is used by an unvisited node.
      bool dead_nodes = 0;
      Unique_Node_List checked(area);
      while (visited.size() > 0) {
        Node* n = visited.pop();
        checked.push(n);
        for (uint i = 0; i < n->outcnt(); i++) {
          Node* use = n->raw_out(i);
          if (checked.member(use))  continue;  // already checked
          if (visited.member(use))  continue;  // already in the graph
          if (use->is_Con())        continue;  // a dead ConNode is OK
          // At this point, we have found a dead node which is DU-reachable.
          if (dead_nodes++ == 0)
            tty->print_cr("*** Dead nodes reachable via DU edges:");
          use->dump(2);
          tty->print_cr("---");
          checked.push(use);  // No repeats; pretend it is now checked.
        }
      }
      assert(dead_nodes == 0, "using nodes must be reachable from root");
    }
  }
}
3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513

// Verify GC barriers consistency
// Currently supported:
// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
void Compile::verify_barriers() {
  if (UseG1GC) {
    // Verify G1 pre-barriers
    const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());

    ResourceArea *area = Thread::current()->resource_area();
    Unique_Node_List visited(area);
    Node_List worklist(area);
    // We're going to walk control flow backwards starting from the Root
    worklist.push(_root);
    while (worklist.size() > 0) {
      Node* x = worklist.pop();
      if (x == NULL || x == top()) continue;
      if (visited.member(x)) {
        continue;
      } else {
        visited.push(x);
      }

      if (x->is_Region()) {
        for (uint i = 1; i < x->req(); i++) {
          worklist.push(x->in(i));
        }
      } else {
        worklist.push(x->in(0));
        // We are looking for the pattern:
        //                            /->ThreadLocal
        // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
        //              \->ConI(0)
        // We want to verify that the If and the LoadB have the same control
        // See GraphKit::g1_write_barrier_pre()
        if (x->is_If()) {
          IfNode *iff = x->as_If();
          if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
            CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
            if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
                && cmp->in(1)->is_Load()) {
              LoadNode* load = cmp->in(1)->as_Load();
              if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
                  && load->in(2)->in(3)->is_Con()
                  && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {

                Node* if_ctrl = iff->in(0);
                Node* load_ctrl = load->in(0);

                if (if_ctrl != load_ctrl) {
                  // Skip possible CProj->NeverBranch in infinite loops
                  if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
                      && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
                    if_ctrl = if_ctrl->in(0)->in(0);
                  }
                }
                assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
              }
            }
          }
        }
      }
    }
  }
}

D
duke 已提交
3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
#endif

// The Compile object keeps track of failure reasons separately from the ciEnv.
// This is required because there is not quite a 1-1 relation between the
// ciEnv and its compilation task and the Compile object.  Note that one
// ciEnv might use two Compile objects, if C2Compiler::compile_method decides
// to backtrack and retry without subsuming loads.  Other than this backtracking
// behavior, the Compile's failure reason is quietly copied up to the ciEnv
// by the logic in C2Compiler.
void Compile::record_failure(const char* reason) {
  if (log() != NULL) {
    log()->elem("failure reason='%s' phase='compile'", reason);
  }
  if (_failure_reason == NULL) {
    // Record the first failure reason.
    _failure_reason = reason;
  }
S
sla 已提交
3531 3532 3533 3534 3535 3536 3537 3538

  EventCompilerFailure event;
  if (event.should_commit()) {
    event.set_compileID(Compile::compile_id());
    event.set_failure(reason);
    event.commit();
  }

3539
  if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
S
sla 已提交
3540
    C->print_method(PHASE_FAILURE);
3541
  }
D
duke 已提交
3542 3543 3544 3545
  _root = NULL;  // flush the graph, too
}

Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
3546 3547
  : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
    _phase_name(name), _dolog(dolog)
D
duke 已提交
3548 3549 3550 3551 3552 3553 3554 3555 3556
{
  if (dolog) {
    C = Compile::current();
    _log = C->log();
  } else {
    C = NULL;
    _log = NULL;
  }
  if (_log != NULL) {
3557
    _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
D
duke 已提交
3558 3559 3560 3561 3562 3563
    _log->stamp();
    _log->end_head();
  }
}

Compile::TracePhase::~TracePhase() {
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582

  C = Compile::current();
  if (_dolog) {
    _log = C->log();
  } else {
    _log = NULL;
  }

#ifdef ASSERT
  if (PrintIdealNodeCount) {
    tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
                  _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
  }

  if (VerifyIdealNodeCount) {
    Compile::current()->print_missing_nodes();
  }
#endif

D
duke 已提交
3583
  if (_log != NULL) {
3584
    _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
D
duke 已提交
3585 3586
  }
}
3587 3588 3589 3590 3591 3592 3593 3594

//=============================================================================
// Two Constant's are equal when the type and the value are equal.
bool Compile::Constant::operator==(const Constant& other) {
  if (type()          != other.type()         )  return false;
  if (can_be_reused() != other.can_be_reused())  return false;
  // For floating point values we compare the bit pattern.
  switch (type()) {
3595
  case T_FLOAT:   return (_v._value.i == other._v._value.i);
3596
  case T_LONG:
3597
  case T_DOUBLE:  return (_v._value.j == other._v._value.j);
3598
  case T_OBJECT:
3599 3600
  case T_ADDRESS: return (_v._value.l == other._v._value.l);
  case T_VOID:    return (_v._value.l == other._v._value.l);  // jump-table entries
3601
  case T_METADATA: return (_v._metadata == other._v._metadata);
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
  default: ShouldNotReachHere();
  }
  return false;
}

static int type_to_size_in_bytes(BasicType t) {
  switch (t) {
  case T_LONG:    return sizeof(jlong  );
  case T_FLOAT:   return sizeof(jfloat );
  case T_DOUBLE:  return sizeof(jdouble);
3612
  case T_METADATA: return sizeof(Metadata*);
3613
    // We use T_VOID as marker for jump-table entries (labels) which
3614
    // need an internal word relocation.
3615 3616 3617 3618 3619 3620 3621 3622 3623
  case T_VOID:
  case T_ADDRESS:
  case T_OBJECT:  return sizeof(jobject);
  }

  ShouldNotReachHere();
  return -1;
}

3624 3625 3626 3627 3628 3629 3630
int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
  // sort descending
  if (a->freq() > b->freq())  return -1;
  if (a->freq() < b->freq())  return  1;
  return 0;
}

3631
void Compile::ConstantTable::calculate_offsets_and_size() {
3632 3633
  // First, sort the array by frequencies.
  _constants.sort(qsort_comparator);
3634

3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646
#ifdef ASSERT
  // Make sure all jump-table entries were sorted to the end of the
  // array (they have a negative frequency).
  bool found_void = false;
  for (int i = 0; i < _constants.length(); i++) {
    Constant con = _constants.at(i);
    if (con.type() == T_VOID)
      found_void = true;  // jump-tables
    else
      assert(!found_void, "wrong sorting");
  }
#endif
3647

3648 3649 3650
  int offset = 0;
  for (int i = 0; i < _constants.length(); i++) {
    Constant* con = _constants.adr_at(i);
3651

3652 3653 3654 3655
    // Align offset for type.
    int typesize = type_to_size_in_bytes(con->type());
    offset = align_size_up(offset, typesize);
    con->set_offset(offset);   // set constant's offset
3656

3657 3658 3659 3660 3661
    if (con->type() == T_VOID) {
      MachConstantNode* n = (MachConstantNode*) con->get_jobject();
      offset = offset + typesize * n->outcnt();  // expand jump-table
    } else {
      offset = offset + typesize;
3662 3663 3664 3665 3666 3667
    }
  }

  // Align size up to the next section start (which is insts; see
  // CodeBuffer::align_at_start).
  assert(_size == -1, "already set?");
3668
  _size = align_size_up(offset, CodeEntryAlignment);
3669 3670 3671 3672
}

void Compile::ConstantTable::emit(CodeBuffer& cb) {
  MacroAssembler _masm(&cb);
3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702
  for (int i = 0; i < _constants.length(); i++) {
    Constant con = _constants.at(i);
    address constant_addr;
    switch (con.type()) {
    case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
    case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
    case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
    case T_OBJECT: {
      jobject obj = con.get_jobject();
      int oop_index = _masm.oop_recorder()->find_index(obj);
      constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
      break;
    }
    case T_ADDRESS: {
      address addr = (address) con.get_jobject();
      constant_addr = _masm.address_constant(addr);
      break;
    }
    // We use T_VOID as marker for jump-table entries (labels) which
    // need an internal word relocation.
    case T_VOID: {
      MachConstantNode* n = (MachConstantNode*) con.get_jobject();
      // Fill the jump-table with a dummy word.  The real value is
      // filled in later in fill_jump_table.
      address dummy = (address) n;
      constant_addr = _masm.address_constant(dummy);
      // Expand jump-table
      for (uint i = 1; i < n->outcnt(); i++) {
        address temp_addr = _masm.address_constant(dummy + i);
        assert(temp_addr, "consts section too small");
3703
      }
3704 3705
      break;
    }
3706 3707 3708 3709 3710 3711
    case T_METADATA: {
      Metadata* obj = con.get_metadata();
      int metadata_index = _masm.oop_recorder()->find_index(obj);
      constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index));
      break;
    }
3712
    default: ShouldNotReachHere();
3713
    }
3714
    assert(constant_addr, "consts section too small");
3715 3716
    assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),
            err_msg_res("must be: %d == %d", (int) (constant_addr - _masm.code()->consts()->start()), (int)(con.offset())));
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731
  }
}

int Compile::ConstantTable::find_offset(Constant& con) const {
  int idx = _constants.find(con);
  assert(idx != -1, "constant must be in constant table");
  int offset = _constants.at(idx).offset();
  assert(offset != -1, "constant table not emitted yet?");
  return offset;
}

void Compile::ConstantTable::add(Constant& con) {
  if (con.can_be_reused()) {
    int idx = _constants.find(con);
    if (idx != -1 && _constants.at(idx).can_be_reused()) {
3732
      _constants.adr_at(idx)->inc_freq(con.freq());  // increase the frequency by the current value
3733 3734 3735 3736 3737 3738
      return;
    }
  }
  (void) _constants.append(con);
}

3739
Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
3740
  Block* b = Compile::current()->cfg()->get_block_for_node(n);
3741
  Constant con(type, value, b->_freq);
3742 3743 3744 3745
  add(con);
  return con;
}

3746 3747 3748 3749 3750 3751
Compile::Constant Compile::ConstantTable::add(Metadata* metadata) {
  Constant con(metadata);
  add(con);
  return con;
}

3752
Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) {
3753 3754 3755 3756 3757 3758 3759 3760
  jvalue value;
  BasicType type = oper->type()->basic_type();
  switch (type) {
  case T_LONG:    value.j = oper->constantL(); break;
  case T_FLOAT:   value.f = oper->constantF(); break;
  case T_DOUBLE:  value.d = oper->constantD(); break;
  case T_OBJECT:
  case T_ADDRESS: value.l = (jobject) oper->constant(); break;
3761 3762
  case T_METADATA: return add((Metadata*)oper->constant()); break;
  default: guarantee(false, err_msg_res("unhandled type: %s", type2name(type)));
3763
  }
3764
  return add(n, type, value);
3765 3766
}

3767
Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) {
3768 3769 3770 3771 3772 3773
  jvalue value;
  // We can use the node pointer here to identify the right jump-table
  // as this method is called from Compile::Fill_buffer right before
  // the MachNodes are emitted and the jump-table is filled (means the
  // MachNode pointers do not change anymore).
  value.l = (jobject) n;
3774 3775
  Constant con(T_VOID, value, next_jump_table_freq(), false);  // Labels of a jump-table cannot be reused.
  add(con);
3776 3777 3778 3779 3780 3781 3782 3783
  return con;
}

void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
  // If called from Compile::scratch_emit_size do nothing.
  if (Compile::current()->in_scratch_emit_size())  return;

  assert(labels.is_nonempty(), "must be");
3784
  assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt()));
3785 3786 3787 3788 3789 3790 3791 3792 3793

  // Since MachConstantNode::constant_offset() also contains
  // table_base_offset() we need to subtract the table_base_offset()
  // to get the plain offset into the constant table.
  int offset = n->constant_offset() - table_base_offset();

  MacroAssembler _masm(&cb);
  address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);

3794
  for (uint i = 0; i < n->outcnt(); i++) {
3795
    address* constant_addr = &jump_table_base[i];
3796
    assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, p2i(*constant_addr), p2i(((address) n) + i)));
3797 3798 3799 3800
    *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
    cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
  }
}
3801 3802

void Compile::dump_inlining() {
3803
  if (print_inlining() || print_intrinsics()) {
R
roland 已提交
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
    // Print inlining message for candidates that we couldn't inline
    // for lack of space or non constant receiver
    for (int i = 0; i < _late_inlines.length(); i++) {
      CallGenerator* cg = _late_inlines.at(i);
      cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff");
    }
    Unique_Node_List useful;
    useful.push(root());
    for (uint next = 0; next < useful.size(); ++next) {
      Node* n  = useful.at(next);
      if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
        CallNode* call = n->as_Call();
        CallGenerator* cg = call->generator();
        cg->print_inlining_late("receiver not constant");
      }
      uint max = n->len();
      for ( uint i = 0; i < max; ++i ) {
        Node *m = n->in(i);
        if ( m == NULL ) continue;
        useful.push(m);
      }
    }
3826
    for (int i = 0; i < _print_inlining_list->length(); i++) {
3827
      tty->print("%s", _print_inlining_list->adr_at(i)->ss()->as_string());
3828 3829 3830
    }
  }
}
3831

3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
// Dump inlining replay data to the stream.
// Don't change thread state and acquire any locks.
void Compile::dump_inline_data(outputStream* out) {
  InlineTree* inl_tree = ilt();
  if (inl_tree != NULL) {
    out->print(" inline %d", inl_tree->count());
    inl_tree->dump_replay_data(out);
  }
}

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963
int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
  if (n1->Opcode() < n2->Opcode())      return -1;
  else if (n1->Opcode() > n2->Opcode()) return 1;

  assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
  for (uint i = 1; i < n1->req(); i++) {
    if (n1->in(i) < n2->in(i))      return -1;
    else if (n1->in(i) > n2->in(i)) return 1;
  }

  return 0;
}

int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
  Node* n1 = *n1p;
  Node* n2 = *n2p;

  return cmp_expensive_nodes(n1, n2);
}

void Compile::sort_expensive_nodes() {
  if (!expensive_nodes_sorted()) {
    _expensive_nodes->sort(cmp_expensive_nodes);
  }
}

bool Compile::expensive_nodes_sorted() const {
  for (int i = 1; i < _expensive_nodes->length(); i++) {
    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
      return false;
    }
  }
  return true;
}

bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
  if (_expensive_nodes->length() == 0) {
    return false;
  }

  assert(OptimizeExpensiveOps, "optimization off?");

  // Take this opportunity to remove dead nodes from the list
  int j = 0;
  for (int i = 0; i < _expensive_nodes->length(); i++) {
    Node* n = _expensive_nodes->at(i);
    if (!n->is_unreachable(igvn)) {
      assert(n->is_expensive(), "should be expensive");
      _expensive_nodes->at_put(j, n);
      j++;
    }
  }
  _expensive_nodes->trunc_to(j);

  // Then sort the list so that similar nodes are next to each other
  // and check for at least two nodes of identical kind with same data
  // inputs.
  sort_expensive_nodes();

  for (int i = 0; i < _expensive_nodes->length()-1; i++) {
    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
      return true;
    }
  }

  return false;
}

void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
  if (_expensive_nodes->length() == 0) {
    return;
  }

  assert(OptimizeExpensiveOps, "optimization off?");

  // Sort to bring similar nodes next to each other and clear the
  // control input of nodes for which there's only a single copy.
  sort_expensive_nodes();

  int j = 0;
  int identical = 0;
  int i = 0;
  for (; i < _expensive_nodes->length()-1; i++) {
    assert(j <= i, "can't write beyond current index");
    if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
      identical++;
      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
      continue;
    }
    if (identical > 0) {
      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
      identical = 0;
    } else {
      Node* n = _expensive_nodes->at(i);
      igvn.hash_delete(n);
      n->set_req(0, NULL);
      igvn.hash_insert(n);
    }
  }
  if (identical > 0) {
    _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
  } else if (_expensive_nodes->length() >= 1) {
    Node* n = _expensive_nodes->at(i);
    igvn.hash_delete(n);
    n->set_req(0, NULL);
    igvn.hash_insert(n);
  }
  _expensive_nodes->trunc_to(j);
}

void Compile::add_expensive_node(Node * n) {
  assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
  assert(n->is_expensive(), "expensive nodes with non-null control here only");
  assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
  if (OptimizeExpensiveOps) {
    _expensive_nodes->append(n);
  } else {
    // Clear control input and let IGVN optimize expensive nodes if
    // OptimizeExpensiveOps is off.
    n->set_req(0, NULL);
  }
}
3964

3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
/**
 * Remove the speculative part of types and clean up the graph
 */
void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
  if (UseTypeSpeculation) {
    Unique_Node_List worklist;
    worklist.push(root());
    int modified = 0;
    // Go over all type nodes that carry a speculative type, drop the
    // speculative part of the type and enqueue the node for an igvn
    // which may optimize it out.
    for (uint next = 0; next < worklist.size(); ++next) {
      Node *n  = worklist.at(next);
3978
      if (n->is_Type()) {
3979
        TypeNode* tn = n->as_Type();
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989
        const Type* t = tn->type();
        const Type* t_no_spec = t->remove_speculative();
        if (t_no_spec != t) {
          bool in_hash = igvn.hash_delete(n);
          assert(in_hash, "node should be in igvn hash table");
          tn->set_type(t_no_spec);
          igvn.hash_insert(n);
          igvn._worklist.push(n); // give it a chance to go away
          modified++;
        }
3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
      }
      uint max = n->len();
      for( uint i = 0; i < max; ++i ) {
        Node *m = n->in(i);
        if (not_a_node(m))  continue;
        worklist.push(m);
      }
    }
    // Drop the speculative part of all types in the igvn's type table
    igvn.remove_speculative_types();
    if (modified > 0) {
      igvn.optimize();
    }
4003 4004 4005 4006 4007 4008
#ifdef ASSERT
    // Verify that after the IGVN is over no speculative type has resurfaced
    worklist.clear();
    worklist.push(root());
    for (uint next = 0; next < worklist.size(); ++next) {
      Node *n  = worklist.at(next);
4009 4010
      const Type* t = igvn.type_or_null(n);
      assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
      if (n->is_Type()) {
        t = n->as_Type()->type();
        assert(t == t->remove_speculative(), "no more speculative types");
      }
      uint max = n->len();
      for( uint i = 0; i < max; ++i ) {
        Node *m = n->in(i);
        if (not_a_node(m))  continue;
        worklist.push(m);
      }
    }
    igvn.check_no_speculative_types();
#endif
4024 4025 4026
  }
}

4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060
// Auxiliary method to support randomized stressing/fuzzing.
//
// This method can be called the arbitrary number of times, with current count
// as the argument. The logic allows selecting a single candidate from the
// running list of candidates as follows:
//    int count = 0;
//    Cand* selected = null;
//    while(cand = cand->next()) {
//      if (randomized_select(++count)) {
//        selected = cand;
//      }
//    }
//
// Including count equalizes the chances any candidate is "selected".
// This is useful when we don't have the complete list of candidates to choose
// from uniformly. In this case, we need to adjust the randomicity of the
// selection, or else we will end up biasing the selection towards the latter
// candidates.
//
// Quick back-envelope calculation shows that for the list of n candidates
// the equal probability for the candidate to persist as "best" can be
// achieved by replacing it with "next" k-th candidate with the probability
// of 1/k. It can be easily shown that by the end of the run, the
// probability for any candidate is converged to 1/n, thus giving the
// uniform distribution among all the candidates.
//
// We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
#define RANDOMIZED_DOMAIN_POW 29
#define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
#define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
bool Compile::randomized_select(int count) {
  assert(count > 0, "only positive");
  return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
}