memnode.hpp 62.6 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32
#ifndef SHARE_VM_OPTO_MEMNODE_HPP
#define SHARE_VM_OPTO_MEMNODE_HPP

#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/type.hpp"

D
duke 已提交
33 34 35 36 37 38 39 40 41
// Portions of code courtesy of Clifford Click

class MultiNode;
class PhaseCCP;
class PhaseTransform;

//------------------------------MemNode----------------------------------------
// Load or Store, possibly throwing a NULL pointer exception
class MemNode : public Node {
42 43 44
private:
  bool _unaligned_access; // Unaligned access from unsafe
  bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
D
duke 已提交
45 46 47 48
protected:
#ifdef ASSERT
  const TypePtr* _adr_type;     // What kind of memory is being addressed?
#endif
49
  virtual uint size_of() const;
D
duke 已提交
50 51 52 53 54 55 56
public:
  enum { Control,               // When is it safe to do this load?
         Memory,                // Chunk of memory is being loaded from
         Address,               // Actually address, derived from base
         ValueIn,               // Value to store
         OopStore               // Preceeding oop store, only in StoreCM
  };
57 58 59 60
  typedef enum { unordered = 0,
                 acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
                 release        // Store has to release or be preceded by MemBarRelease.
  } MemOrd;
D
duke 已提交
61 62
protected:
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
63
    : Node(c0,c1,c2   ), _unaligned_access(false), _mismatched_access(false) {
D
duke 已提交
64 65 66 67
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
68
    : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
D
duke 已提交
69 70 71 72
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
73
    : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
D
duke 已提交
74 75 76 77
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }

78 79
  static bool check_if_adr_maybe_raw(Node* adr);

K
kvn 已提交
80
public:
D
duke 已提交
81 82 83 84 85 86
  // Helpers for the optimizer.  Documented in memnode.cpp.
  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
                                      Node* p2, AllocateNode* a2,
                                      PhaseTransform* phase);
  static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);

87 88
  static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
D
duke 已提交
89
  // This one should probably be a phase-specific function:
90
  static bool all_controls_dominate(Node* dom, Node* sub);
D
duke 已提交
91

92 93
  // Find any cast-away of null-ness and keep its control.
  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
D
duke 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );

  virtual const class TypePtr *adr_type() const;  // returns bottom_type of address

  // Shared code for Ideal methods:
  Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.

  // Helper function for adr_type() implementations.
  static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);

  // Raw access function, to allow copying of adr_type efficiently in
  // product builds and retain the debug info for debug builds.
  const TypePtr *raw_adr_type() const {
#ifdef ASSERT
    return _adr_type;
#else
    return 0;
#endif
  }

  // Map a load or store opcode to its corresponding store opcode.
  // (Return -1 if unknown.)
  virtual int store_Opcode() const { return -1; }

  // What is the type of the value in memory?  (T_VOID mean "unspecified".)
  virtual BasicType memory_type() const = 0;
120 121 122 123 124 125 126
  virtual int memory_size() const {
#ifdef ASSERT
    return type2aelembytes(memory_type(), true);
#else
    return type2aelembytes(memory_type());
#endif
  }
D
duke 已提交
127 128 129 130 131 132 133 134 135 136

  // Search through memory states which precede this node (load or store).
  // Look for an exact match for the address, with no intervening
  // aliased stores.
  Node* find_previous_store(PhaseTransform* phase);

  // Can this node (load or store) accurately see a stored value in
  // the given memory state?  (The state may or may not be in(Memory).)
  Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;

137 138 139 140 141
  void set_unaligned_access() { _unaligned_access = true; }
  bool is_unaligned_access() const { return _unaligned_access; }
  void set_mismatched_access() { _mismatched_access = true; }
  bool is_mismatched_access() const { return _mismatched_access; }

D
duke 已提交
142 143 144 145 146 147 148 149 150
#ifndef PRODUCT
  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
  virtual void dump_spec(outputStream *st) const;
#endif
};

//------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address
class LoadNode : public MemNode {
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
public:
  // Some loads (from unsafe) should be pinned: they don't depend only
  // on the dominating test.  The boolean field _depends_only_on_test
  // below records whether that node depends only on the dominating
  // test.
  // Methods used to build LoadNodes pass an argument of type enum
  // ControlDependency instead of a boolean because those methods
  // typically have multiple boolean parameters with default values:
  // passing the wrong boolean to one of these parameters by mistake
  // goes easily unnoticed. Using an enum, the compiler can check that
  // the type of a value and the type of the parameter match.
  enum ControlDependency {
    Pinned,
    DependsOnlyOnTest
  };
166
private:
167 168 169 170 171 172 173 174 175 176 177
  // LoadNode::hash() doesn't take the _depends_only_on_test field
  // into account: If the graph already has a non-pinned LoadNode and
  // we add a pinned LoadNode with the same inputs, it's safe for GVN
  // to replace the pinned LoadNode with the non-pinned LoadNode,
  // otherwise it wouldn't be safe to have a non pinned LoadNode with
  // those inputs in the first place. If the graph already has a
  // pinned LoadNode and we add a non pinned LoadNode with the same
  // inputs, it's safe (but suboptimal) for GVN to replace the
  // non-pinned LoadNode by the pinned LoadNode.
  bool _depends_only_on_test;

178 179 180 181 182 183
  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
  // loads that can be reordered, and such requiring acquire semantics to
  // adhere to the Java specification.  The required behaviour is stored in
  // this field.
  const MemOrd _mo;

D
duke 已提交
184
protected:
185
  virtual uint cmp(const Node &n) const;
D
duke 已提交
186
  virtual uint size_of() const; // Size is bigger
Z
zmajo 已提交
187 188
  // Should LoadNode::Ideal() attempt to remove control edges?
  virtual bool can_remove_control() const;
D
duke 已提交
189 190 191
  const Type* const _type;      // What kind of value is loaded?
public:

192 193
  LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
    : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
D
duke 已提交
194 195
    init_class_id(Class_Load);
  }
196 197 198 199 200
  inline bool is_unordered() const { return !is_acquire(); }
  inline bool is_acquire() const {
    assert(_mo == unordered || _mo == acquire, "unexpected");
    return _mo == acquire;
  }
D
duke 已提交
201 202

  // Polymorphic factory method:
203
   static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
204 205
                     const TypePtr* at, const Type *rt, BasicType bt,
                     MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
D
duke 已提交
206 207 208 209 210 211 212

  virtual uint hash()   const;  // Check the type

  // Handle algebraic identities here.  If we have an identity, return the Node
  // we are equivalent to.  We look for Load of a Store.
  virtual Node *Identity( PhaseTransform *phase );

Z
zmajo 已提交
213
  // If the load is from Field memory and the pointer is non-null, it might be possible to
D
duke 已提交
214
  // zero out the control input.
Z
zmajo 已提交
215 216
  // If the offset is constant and the base is an object allocation,
  // try to hook me up to the exact initializing store.
D
duke 已提交
217 218
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

219 220 221
  // Split instance field load through Phi.
  Node* split_through_phi(PhaseGVN *phase);

222 223 224
  // Recover original value from boxed values
  Node *eliminate_autobox(PhaseGVN *phase);

D
duke 已提交
225 226 227 228
  // Compute a new Type for this node.  Basically we just do the pre-check,
  // then call the virtual add() to set the type.
  virtual const Type *Value( PhaseTransform *phase ) const;

229 230 231 232
  // Common methods for LoadKlass and LoadNKlass nodes.
  const Type *klass_value_common( PhaseTransform *phase ) const;
  Node *klass_identity_common( PhaseTransform *phase );

D
duke 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
  virtual uint ideal_reg() const;
  virtual const Type *bottom_type() const;
  // Following method is copied from TypeNode:
  void set_type(const Type* t) {
    assert(t != NULL, "sanity");
    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
    *(const Type**)&_type = t;   // cast away const-ness
    // If this node is in the hash table, make sure it doesn't need a rehash.
    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
  }
  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };

  // Do not match memory edge
  virtual uint match_edge(uint idx) const;

  // Map a load opcode to its corresponding store opcode.
  virtual int store_Opcode() const = 0;

251 252 253
  // Check if the load's memory input is a Phi node with the same control.
  bool is_instance_field_load_with_local_phi(Node* ctrl);

D
duke 已提交
254 255 256
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
257 258 259 260
#ifdef ASSERT
  // Helper function to allow a raw load without control edge for some cases
  static bool is_immutable_value(Node* adr);
#endif
D
duke 已提交
261 262 263
protected:
  const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                     ciKlass* klass) const;
264 265 266 267 268 269 270 271 272
  // depends_only_on_test is almost always true, and needs to be almost always
  // true to enable key hoisting & commoning optimizations.  However, for the
  // special case of RawPtr loads from TLS top & end, and other loads performed by
  // GC barriers, the control edge carries the dependence preventing hoisting past
  // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
  // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
  // which produce results (new raw memory state) inside of loops preventing all
  // manner of other optimizations).  Basically, it's ugly but so is the alternative.
  // See comment in macro.cpp, around line 125 expand_allocate_common().
273
  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
D
duke 已提交
274 275 276 277 278 279
};

//------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory
class LoadBNode : public LoadNode {
public:
280 281
  LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
D
duke 已提交
282 283 284
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
285
  virtual const Type *Value(PhaseTransform *phase) const;
D
duke 已提交
286 287 288 289
  virtual int store_Opcode() const { return Op_StoreB; }
  virtual BasicType memory_type() const { return T_BYTE; }
};

290 291 292 293
//------------------------------LoadUBNode-------------------------------------
// Load a unsigned byte (8bits unsigned) from memory
class LoadUBNode : public LoadNode {
public:
294 295
  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
296 297 298
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
299
  virtual const Type *Value(PhaseTransform *phase) const;
300 301 302 303
  virtual int store_Opcode() const { return Op_StoreB; }
  virtual BasicType memory_type() const { return T_BYTE; }
};

304 305 306
//------------------------------LoadUSNode-------------------------------------
// Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode {
D
duke 已提交
307
public:
308 309
  LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
D
duke 已提交
310 311 312
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
313
  virtual const Type *Value(PhaseTransform *phase) const;
D
duke 已提交
314 315 316 317
  virtual int store_Opcode() const { return Op_StoreC; }
  virtual BasicType memory_type() const { return T_CHAR; }
};

318 319 320 321
//------------------------------LoadSNode--------------------------------------
// Load a short (16bits signed) from memory
class LoadSNode : public LoadNode {
public:
322 323
  LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
324 325 326 327 328 329 330 331
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
  virtual int store_Opcode() const { return Op_StoreC; }
  virtual BasicType memory_type() const { return T_SHORT; }
};

D
duke 已提交
332 333 334 335
//------------------------------LoadINode--------------------------------------
// Load an integer from memory
class LoadINode : public LoadNode {
public:
336 337
  LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
D
duke 已提交
338 339 340 341 342 343 344 345 346 347
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual int store_Opcode() const { return Op_StoreI; }
  virtual BasicType memory_type() const { return T_INT; }
};

//------------------------------LoadRangeNode----------------------------------
// Load an array length from the array
class LoadRangeNode : public LoadINode {
public:
348 349
  LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
    : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
D
duke 已提交
350 351 352
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
353
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
D
duke 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367
};

//------------------------------LoadLNode--------------------------------------
// Load a long from memory
class LoadLNode : public LoadNode {
  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
      && LoadNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise load forbidden?

public:
368
  LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
369 370
            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
    : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
D
duke 已提交
371 372 373 374
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegL; }
  virtual int store_Opcode() const { return Op_StoreL; }
  virtual BasicType memory_type() const { return T_LONG; }
375
  bool require_atomic_access() const { return _require_atomic_access; }
376
  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
377
                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
D
duke 已提交
378 379 380 381 382 383 384 385 386 387 388 389
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    LoadNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
};

//------------------------------LoadL_unalignedNode----------------------------
// Load a long from unaligned memory
class LoadL_unalignedNode : public LoadLNode {
public:
390 391
  LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
D
duke 已提交
392 393 394 395 396 397 398
  virtual int Opcode() const;
};

//------------------------------LoadFNode--------------------------------------
// Load a float (64 bits) from memory
class LoadFNode : public LoadNode {
public:
399 400
  LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
D
duke 已提交
401 402 403 404 405 406 407 408 409
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegF; }
  virtual int store_Opcode() const { return Op_StoreF; }
  virtual BasicType memory_type() const { return T_FLOAT; }
};

//------------------------------LoadDNode--------------------------------------
// Load a double (64 bits) from memory
class LoadDNode : public LoadNode {
410 411 412 413 414 415 416 417
  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
      && LoadNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise load forbidden?

D
duke 已提交
418
public:
419
  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
420 421
            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
D
duke 已提交
422 423 424 425
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegD; }
  virtual int store_Opcode() const { return Op_StoreD; }
  virtual BasicType memory_type() const { return T_DOUBLE; }
426 427
  bool require_atomic_access() const { return _require_atomic_access; }
  static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
428
                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
429 430 431 432 433 434
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    LoadNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
D
duke 已提交
435 436 437 438 439 440
};

//------------------------------LoadD_unalignedNode----------------------------
// Load a double from unaligned memory
class LoadD_unalignedNode : public LoadDNode {
public:
441 442
  LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
D
duke 已提交
443 444 445 446 447 448 449
  virtual int Opcode() const;
};

//------------------------------LoadPNode--------------------------------------
// Load a pointer from memory (either object or array)
class LoadPNode : public LoadNode {
public:
450 451
  LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
D
duke 已提交
452 453 454 455 456 457
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegP; }
  virtual int store_Opcode() const { return Op_StoreP; }
  virtual BasicType memory_type() const { return T_ADDRESS; }
};

458 459 460 461 462

//------------------------------LoadNNode--------------------------------------
// Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode {
public:
463 464
  LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
465 466 467 468 469 470
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegN; }
  virtual int store_Opcode() const { return Op_StoreN; }
  virtual BasicType memory_type() const { return T_NARROWOOP; }
};

D
duke 已提交
471 472 473
//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
class LoadKlassNode : public LoadPNode {
Z
zmajo 已提交
474 475 476 477
protected:
  // In most cases, LoadKlassNode does not have the control input set. If the control
  // input is set, it must not be removed (by LoadNode::Ideal()).
  virtual bool can_remove_control() const;
D
duke 已提交
478
public:
479 480
  LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
    : LoadPNode(c, mem, adr, at, tk, mo) {}
D
duke 已提交
481 482 483 484
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual bool depends_only_on_test() const { return true; }
485 486

  // Polymorphic factory method:
Z
zmajo 已提交
487 488
  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
D
duke 已提交
489 490
};

491 492 493 494
//------------------------------LoadNKlassNode---------------------------------
// Load a narrow Klass from an object.
class LoadNKlassNode : public LoadNNode {
public:
495 496
  LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
    : LoadNNode(c, mem, adr, at, tk, mo) {}
497 498
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegN; }
499 500
  virtual int store_Opcode() const { return Op_StoreNKlass; }
  virtual BasicType memory_type() const { return T_NARROWKLASS; }
501 502 503 504 505 506 507

  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual bool depends_only_on_test() const { return true; }
};


D
duke 已提交
508 509 510
//------------------------------StoreNode--------------------------------------
// Store value; requires Store, Address and Value
class StoreNode : public MemNode {
511 512 513 514 515 516 517 518
private:
  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
  // stores that can be reordered, and such requiring release semantics to
  // adhere to the Java specification.  The required behaviour is stored in
  // this field.
  const MemOrd _mo;
  // Needed for proper cloning.
  virtual uint size_of() const { return sizeof(*this); }
D
duke 已提交
519 520 521 522 523 524 525 526
protected:
  virtual uint cmp( const Node &n ) const;
  virtual bool depends_only_on_test() const { return false; }

  Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
  Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);

public:
527 528 529 530 531 532 533
  // We must ensure that stores of object references will be visible
  // only after the object's initialization. So the callers of this
  // procedure must indicate that the store requires `release'
  // semantics, if the stored value is an object reference that might
  // point to a new object and may become externally visible.
  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : MemNode(c, mem, adr, at, val), _mo(mo) {
D
duke 已提交
534 535
    init_class_id(Class_Store);
  }
536 537
  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
    : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
D
duke 已提交
538 539 540
    init_class_id(Class_Store);
  }

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
  inline bool is_unordered() const { return !is_release(); }
  inline bool is_release() const {
    assert((_mo == unordered || _mo == release), "unexpected");
    return _mo == release;
  }

  // Conservatively release stores of object references in order to
  // ensure visibility of object initialization.
  static inline MemOrd release_if_reference(const BasicType t) {
    const MemOrd mo = (t == T_ARRAY ||
                       t == T_ADDRESS || // Might be the address of an object reference (`boxing').
                       t == T_OBJECT) ? release : unordered;
    return mo;
  }

  // Polymorphic factory method
  //
  // We must ensure that stores of object references will be visible
  // only after the object's initialization. So the callers of this
  // procedure must indicate that the store requires `release'
  // semantics, if the stored value is an object reference that might
  // point to a new object and may become externally visible.
  static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
                         const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
D
duke 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594

  virtual uint hash() const;    // Check the type

  // If the store is to Field memory and the pointer is non-null, we can
  // zero out the control input.
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

  // Compute a new Type for this node.  Basically we just do the pre-check,
  // then call the virtual add() to set the type.
  virtual const Type *Value( PhaseTransform *phase ) const;

  // Check for identity function on memory (Load then Store at same address)
  virtual Node *Identity( PhaseTransform *phase );

  // Do not match memory edge
  virtual uint match_edge(uint idx) const;

  virtual const Type *bottom_type() const;  // returns Type::MEMORY

  // Map a store opcode to its corresponding own opcode, trivially.
  virtual int store_Opcode() const { return Opcode(); }

  // have all possible loads of the value stored been optimized away?
  bool value_never_loaded(PhaseTransform *phase) const;
};

//------------------------------StoreBNode-------------------------------------
// Store byte to memory
class StoreBNode : public StoreNode {
public:
595 596
  StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
D
duke 已提交
597 598 599 600 601 602 603 604 605
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual BasicType memory_type() const { return T_BYTE; }
};

//------------------------------StoreCNode-------------------------------------
// Store char/short to memory
class StoreCNode : public StoreNode {
public:
606 607
  StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
D
duke 已提交
608 609 610 611 612 613 614 615 616
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual BasicType memory_type() const { return T_CHAR; }
};

//------------------------------StoreINode-------------------------------------
// Store int to memory
class StoreINode : public StoreNode {
public:
617 618
  StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
D
duke 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_INT; }
};

//------------------------------StoreLNode-------------------------------------
// Store long to memory
class StoreLNode : public StoreNode {
  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise store forbidden?

public:
635 636
  StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
D
duke 已提交
637 638
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_LONG; }
639
  bool require_atomic_access() const { return _require_atomic_access; }
640
  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
D
duke 已提交
641 642 643 644 645 646 647 648 649 650 651 652
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    StoreNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
};

//------------------------------StoreFNode-------------------------------------
// Store float to memory
class StoreFNode : public StoreNode {
public:
653 654
  StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
D
duke 已提交
655 656 657 658 659 660 661
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_FLOAT; }
};

//------------------------------StoreDNode-------------------------------------
// Store double to memory
class StoreDNode : public StoreNode {
662 663 664 665 666 667 668
  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise store forbidden?
D
duke 已提交
669
public:
670 671 672
  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
             MemOrd mo, bool require_atomic_access = false)
    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
D
duke 已提交
673 674
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_DOUBLE; }
675 676 677 678 679 680 681 682 683
  bool require_atomic_access() const { return _require_atomic_access; }
  static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    StoreNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif

D
duke 已提交
684 685 686 687 688 689
};

//------------------------------StorePNode-------------------------------------
// Store pointer to memory
class StorePNode : public StoreNode {
public:
690 691
  StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
D
duke 已提交
692 693 694 695
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_ADDRESS; }
};

696 697 698 699
//------------------------------StoreNNode-------------------------------------
// Store narrow oop to memory
class StoreNNode : public StoreNode {
public:
700 701
  StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
702 703 704 705
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_NARROWOOP; }
};

706 707 708 709
//------------------------------StoreNKlassNode--------------------------------------
// Store narrow klass to memory
class StoreNKlassNode : public StoreNNode {
public:
710 711
  StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNNode(c, mem, adr, at, val, mo) {}
712 713 714 715
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_NARROWKLASS; }
};

D
duke 已提交
716 717 718 719 720
//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
// Preceeding equivalent StoreCMs may be eliminated.
class StoreCMNode : public StoreNode {
721
 private:
722 723 724 725 726 727
  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
  virtual uint cmp( const Node &n ) const {
    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
728
  int _oop_alias_idx;   // The alias_idx of OopStore
729

D
duke 已提交
730
public:
731
  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
732
    StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
733 734 735 736 737
    _oop_alias_idx(oop_alias_idx) {
    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
           "bad oop alias idx");
  }
D
duke 已提交
738 739
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
740
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
D
duke 已提交
741 742
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual BasicType memory_type() const { return T_VOID; } // unspecific
743
  int oop_alias_idx() const { return _oop_alias_idx; }
D
duke 已提交
744 745 746 747 748 749 750 751
};

//------------------------------LoadPLockedNode---------------------------------
// Load-locked a pointer from memory (either object or array).
// On Sparc & Intel this is implemented as a normal pointer load.
// On PowerPC and friends it's a real load-locked.
class LoadPLockedNode : public LoadPNode {
public:
752 753
  LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
    : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
D
duke 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
  virtual int Opcode() const;
  virtual int store_Opcode() const { return Op_StorePConditional; }
  virtual bool depends_only_on_test() const { return true; }
};

//------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory  state of a store conditional node.
// These nodes return a value, but also update memory.
class SCMemProjNode : public ProjNode {
public:
  enum {SCMEMPROJCON = (uint)-2};
  SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
  virtual int Opcode() const;
  virtual bool      is_CFG() const  { return false; }
  virtual const Type *bottom_type() const {return Type::MEMORY;}
  virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
  virtual const Type *Value( PhaseTransform *phase ) const;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {};
#endif
};

//------------------------------LoadStoreNode---------------------------
778
// Note: is_Mem() method returns 'true' for this class.
D
duke 已提交
779
class LoadStoreNode : public Node {
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
private:
  const Type* const _type;      // What kind of value is loaded?
  const TypePtr* _adr_type;     // What kind of memory is being addressed?
  virtual uint size_of() const; // Size is bigger
public:
  LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
  virtual bool depends_only_on_test() const { return false; }
  virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }

  virtual const Type *bottom_type() const { return _type; }
  virtual uint ideal_reg() const;
  virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address

  bool result_not_used() const;
};

class LoadStoreConditionalNode : public LoadStoreNode {
D
duke 已提交
797 798 799 800
public:
  enum {
    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
  };
801
  LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
D
duke 已提交
802 803 804 805 806
};

//------------------------------StorePConditionalNode---------------------------
// Conditionally store pointer to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
807
class StorePConditionalNode : public LoadStoreConditionalNode {
D
duke 已提交
808
public:
809
  StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
D
duke 已提交
810 811 812 813 814
  virtual int Opcode() const;
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
};

815 816 817
//------------------------------StoreIConditionalNode---------------------------
// Conditionally store int to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
818
class StoreIConditionalNode : public LoadStoreConditionalNode {
819
public:
820
  StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
821 822 823 824 825
  virtual int Opcode() const;
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
};

D
duke 已提交
826 827 828
//------------------------------StoreLConditionalNode---------------------------
// Conditionally store long to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
829
class StoreLConditionalNode : public LoadStoreConditionalNode {
D
duke 已提交
830
public:
831
  StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
D
duke 已提交
832
  virtual int Opcode() const;
833 834
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
D
duke 已提交
835 836 837 838
};


//------------------------------CompareAndSwapLNode---------------------------
839
class CompareAndSwapLNode : public LoadStoreConditionalNode {
D
duke 已提交
840
public:
841
  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
D
duke 已提交
842 843 844 845 846
  virtual int Opcode() const;
};


//------------------------------CompareAndSwapINode---------------------------
847
class CompareAndSwapINode : public LoadStoreConditionalNode {
D
duke 已提交
848
public:
849
  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
D
duke 已提交
850 851 852 853 854
  virtual int Opcode() const;
};


//------------------------------CompareAndSwapPNode---------------------------
855
class CompareAndSwapPNode : public LoadStoreConditionalNode {
D
duke 已提交
856
public:
857
  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
D
duke 已提交
858 859 860
  virtual int Opcode() const;
};

861
//------------------------------CompareAndSwapNNode---------------------------
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
class CompareAndSwapNNode : public LoadStoreConditionalNode {
public:
  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
  virtual int Opcode() const;
};

//------------------------------GetAndAddINode---------------------------
class GetAndAddINode : public LoadStoreNode {
public:
  GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndAddLNode---------------------------
class GetAndAddLNode : public LoadStoreNode {
public:
  GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
  virtual int Opcode() const;
};


//------------------------------GetAndSetINode---------------------------
class GetAndSetINode : public LoadStoreNode {
public:
  GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetINode---------------------------
class GetAndSetLNode : public LoadStoreNode {
public:
  GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetPNode---------------------------
class GetAndSetPNode : public LoadStoreNode {
public:
  GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetNNode---------------------------
class GetAndSetNNode : public LoadStoreNode {
906
public:
907
  GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
908 909 910
  virtual int Opcode() const;
};

D
duke 已提交
911 912 913
//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
public:
914 915 916 917
  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
    : Node(ctrl,arymem,word_cnt,base) {
    init_class_id(Class_ClearArray);
  }
D
duke 已提交
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
  virtual int         Opcode() const;
  virtual const Type *bottom_type() const { return Type::MEMORY; }
  // ClearArray modifies array elements, and so affects only the
  // array memory addressed by the bottom_type of its base address.
  virtual const class TypePtr *adr_type() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint match_edge(uint idx) const;

  // Clear the given area of an object or array.
  // The start offset must always be aligned mod BytesPerInt.
  // The end offset must always be aligned mod BytesPerLong.
  // Return the new memory.
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            intptr_t start_offset,
                            intptr_t end_offset,
                            PhaseGVN* phase);
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            intptr_t start_offset,
                            Node* end_offset,
                            PhaseGVN* phase);
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            Node* start_offset,
                            Node* end_offset,
                            PhaseGVN* phase);
943 944 945
  // Return allocation input memory edge if it is different instance
  // or itself if it is the one we are looking for.
  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
D
duke 已提交
946 947
};

948 949 950
//------------------------------StrIntrinsic-------------------------------
// Base class for Ideal nodes used in String instrinsic code.
class StrIntrinsicNode: public Node {
D
duke 已提交
951
public:
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* c1, Node* s2, Node* c2):
    Node(control, char_array_mem, s1, c1, s2, c2) {
  }

  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* s2, Node* c):
    Node(control, char_array_mem, s1, s2, c) {
  }

  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* s2):
    Node(control, char_array_mem, s1, s2) {
  }

D
duke 已提交
967
  virtual bool depends_only_on_test() const { return false; }
968
  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
D
duke 已提交
969 970 971
  virtual uint match_edge(uint idx) const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
972
  virtual const Type *Value(PhaseTransform *phase) const;
D
duke 已提交
973 974
};

975 976 977 978 979 980 981 982 983 984
//------------------------------StrComp-------------------------------------
class StrCompNode: public StrIntrinsicNode {
public:
  StrCompNode(Node* control, Node* char_array_mem,
              Node* s1, Node* c1, Node* s2, Node* c2):
    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::INT; }
};

C
cfang 已提交
985
//------------------------------StrEquals-------------------------------------
986
class StrEqualsNode: public StrIntrinsicNode {
C
cfang 已提交
987
public:
988
  StrEqualsNode(Node* control, Node* char_array_mem,
989 990
                Node* s1, Node* s2, Node* c):
    StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
C
cfang 已提交
991 992 993 994 995
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};

//------------------------------StrIndexOf-------------------------------------
996
class StrIndexOfNode: public StrIntrinsicNode {
C
cfang 已提交
997
public:
998
  StrIndexOfNode(Node* control, Node* char_array_mem,
999 1000
              Node* s1, Node* c1, Node* s2, Node* c2):
    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
C
cfang 已提交
1001 1002 1003 1004
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::INT; }
};

1005
//------------------------------AryEq---------------------------------------
1006
class AryEqNode: public StrIntrinsicNode {
1007
public:
1008 1009
  AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
    StrIntrinsicNode(control, char_array_mem, s1, s2) {};
1010 1011 1012 1013
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

//------------------------------EncodeISOArray--------------------------------
// encode char[] to byte[] in ISO_8859_1
class EncodeISOArrayNode: public Node {
public:
  EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
  virtual int Opcode() const;
  virtual bool depends_only_on_test() const { return false; }
  virtual const Type* bottom_type() const { return TypeInt::INT; }
  virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
  virtual uint match_edge(uint idx) const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
};

D
duke 已提交
1030 1031 1032 1033 1034
//------------------------------MemBar-----------------------------------------
// There are different flavors of Memory Barriers to match the Java Memory
// Model.  Monitor-enter and volatile-load act as Aquires: no following ref
// can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
// volatile-load.  Monitor-exit and volatile-store act as Release: no
T
twisti 已提交
1035
// preceding ref can be moved to after them.  We insert a MemBar-Release
D
duke 已提交
1036 1037
// before a FastUnlock or volatile-store.  All volatiles need to be
// serialized, so we follow all volatile-stores with a MemBar-Volatile to
T
twisti 已提交
1038
// separate it from any following volatile-load.
D
duke 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
class MemBarNode: public MultiNode {
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self

  virtual uint size_of() const { return sizeof(*this); }
  // Memory type this node is serializing.  Usually either rawptr or bottom.
  const TypePtr* _adr_type;

public:
  enum {
    Precedent = TypeFunc::Parms  // optional edge to force precedence
  };
  MemBarNode(Compile* C, int alias_idx, Node* precedent);
  virtual int Opcode() const = 0;
  virtual const class TypePtr *adr_type() const { return _adr_type; }
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint match_edge(uint idx) const { return 0; }
  virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
  virtual Node *match( const ProjNode *proj, const Matcher *m );
  // Factory method.  Builds a wide or narrow membar.
  // Optional 'precedent' becomes an extra edge if not null.
  static MemBarNode* make(Compile* C, int opcode,
                          int alias_idx = Compile::AliasIdxBot,
                          Node* precedent = NULL);
};

// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
1068
// visibility.  Inserted after a volatile load.
D
duke 已提交
1069 1070 1071 1072 1073 1074 1075
class MemBarAcquireNode: public MemBarNode {
public:
  MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
// visibility.  Inserted independ of any load, as required
// for intrinsic sun.misc.Unsafe.loadFence().
class LoadFenceNode: public MemBarNode {
public:
  LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

D
duke 已提交
1087 1088
// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
1089
// multi-cpu visibility.  Inserted before a volatile store.
D
duke 已提交
1090 1091 1092 1093 1094
class MemBarReleaseNode: public MemBarNode {
public:
  MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
};

// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
// multi-cpu visibility.  Inserted independent of any store, as required
// for intrinsic sun.misc.Unsafe.storeFence().
class StoreFenceNode: public MemBarNode {
public:
  StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
D
duke 已提交
1106 1107
};

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
// visibility.  Inserted after a FastLock.
class MemBarAcquireLockNode: public MemBarNode {
public:
  MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
// multi-cpu visibility.  Inserted before a FastUnLock.
class MemBarReleaseLockNode: public MemBarNode {
public:
  MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

1128 1129 1130 1131 1132 1133 1134 1135 1136
class MemBarStoreStoreNode: public MemBarNode {
public:
  MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {
    init_class_id(Class_MemBarStoreStore);
  }
  virtual int Opcode() const;
};

D
duke 已提交
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
// Ordering between a volatile store and a following volatile load.
// Requires multi-CPU visibility?
class MemBarVolatileNode: public MemBarNode {
public:
  MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// Ordering within the same CPU.  Used to order unsafe memory references
// inside the compiler when we lack alias info.  Not needed "outside" the
// compiler because the CPU does all the ordering for us.
class MemBarCPUOrderNode: public MemBarNode {
public:
  MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
};

// Isolation of object setup after an AllocateNode and before next safepoint.
// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
class InitializeNode: public MemBarNode {
  friend class AllocateNode;

1162 1163 1164 1165 1166 1167
  enum {
    Incomplete    = 0,
    Complete      = 1,
    WithArraycopy = 2
  };
  int _is_complete;
D
duke 已提交
1168

1169 1170
  bool _does_not_escape;

D
duke 已提交
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
public:
  enum {
    Control    = TypeFunc::Control,
    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
  };

  InitializeNode(Compile* C, int adr_type, Node* rawoop);
  virtual int Opcode() const;
  virtual uint size_of() const { return sizeof(*this); }
  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress

  // Manage incoming memory edges via a MergeMem on in(Memory):
  Node* memory(uint alias_idx);

  // The raw memory edge coming directly from the Allocation.
  // The contents of this memory are *always* all-zero-bits.
  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }

  // Return the corresponding allocation for this initialization (or null if none).
  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
  AllocateNode* allocation();

  // Anything other than zeroing in this init?
  bool is_non_zero();

  // An InitializeNode must completed before macro expansion is done.
  // Completion requires that the AllocateNode must be followed by
  // initialization of the new memory to zero, then to any initializers.
1203 1204
  bool is_complete() { return _is_complete != Incomplete; }
  bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
D
duke 已提交
1205 1206 1207

  // Mark complete.  (Must not yet be complete.)
  void set_complete(PhaseGVN* phase);
1208
  void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
D
duke 已提交
1209

1210 1211 1212
  bool does_not_escape() { return _does_not_escape; }
  void set_does_not_escape() { _does_not_escape = true; }

D
duke 已提交
1213 1214 1215 1216 1217 1218 1219
#ifdef ASSERT
  // ensure all non-degenerate stores are ordered and non-overlapping
  bool stores_are_sane(PhaseTransform* phase);
#endif //ASSERT

  // See if this store can be captured; return offset where it initializes.
  // Return 0 if the store cannot be moved (any sort of problem).
1220
  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
D
duke 已提交
1221 1222 1223

  // Capture another store; reformat it to write my internal raw memory.
  // Return the captured copy, else NULL if there is some sort of problem.
1224
  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
D
duke 已提交
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246

  // Find captured store which corresponds to the range [start..start+size).
  // Return my own memory projection (meaning the initial zero bits)
  // if there is no such store.  Return NULL if there is a problem.
  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);

  // Called when the associated AllocateNode is expanded into CFG.
  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
                        intptr_t header_size, Node* size_in_bytes,
                        PhaseGVN* phase);

 private:
  void remove_extra_zeroes();

  // Find out where a captured store should be placed (or already is placed).
  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
                                     PhaseTransform* phase);

  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);

  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);

1247
  bool detect_init_independence(Node* n, int& count);
D
duke 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509

  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
                               PhaseGVN* phase);

  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
};

//------------------------------MergeMem---------------------------------------
// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
class MergeMemNode: public Node {
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  friend class MergeMemStream;
  MergeMemNode(Node* def);  // clients use MergeMemNode::make

public:
  // If the input is a whole memory state, clone it with all its slices intact.
  // Otherwise, make a new memory state with just that base memory input.
  // In either case, the result is a newly created MergeMem.
  static MergeMemNode* make(Compile* C, Node* base_memory);

  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return 0; }
  virtual const RegMask &out_RegMask() const;
  virtual const Type *bottom_type() const { return Type::MEMORY; }
  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  // sparse accessors
  // Fetch the previously stored "set_memory_at", or else the base memory.
  // (Caller should clone it if it is a phi-nest.)
  Node* memory_at(uint alias_idx) const;
  // set the memory, regardless of its previous value
  void set_memory_at(uint alias_idx, Node* n);
  // the "base" is the memory that provides the non-finite support
  Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  // warning: setting the base can implicitly set any of the other slices too
  void set_base_memory(Node* def);
  // sentinel value which denotes a copy of the base memory:
  Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  static Node* make_empty_memory(); // where the sentinel comes from
  bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  // hook for the iterator, to perform any necessary setup
  void iteration_setup(const MergeMemNode* other = NULL);
  // push sentinels until I am at least as long as the other (semantic no-op)
  void grow_to_match(const MergeMemNode* other);
  bool verify_sparse() const PRODUCT_RETURN0;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};

class MergeMemStream : public StackObj {
 private:
  MergeMemNode*       _mm;
  const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  Node*               _mm_base;  // loop-invariant base memory of _mm
  int                 _idx;
  int                 _cnt;
  Node*               _mem;
  Node*               _mem2;
  int                 _cnt2;

  void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
    // subsume_node will break sparseness at times, whenever a memory slice
    // folds down to a copy of the base ("fat") memory.  In such a case,
    // the raw edge will update to base, although it should be top.
    // This iterator will recognize either top or base_memory as an
    // "empty" slice.  See is_empty, is_empty2, and next below.
    //
    // The sparseness property is repaired in MergeMemNode::Ideal.
    // As long as access to a MergeMem goes through this iterator
    // or the memory_at accessor, flaws in the sparseness will
    // never be observed.
    //
    // Also, iteration_setup repairs sparseness.
    assert(mm->verify_sparse(), "please, no dups of base");
    assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");

    _mm  = mm;
    _mm_base = mm->base_memory();
    _mm2 = mm2;
    _cnt = mm->req();
    _idx = Compile::AliasIdxBot-1; // start at the base memory
    _mem = NULL;
    _mem2 = NULL;
  }

#ifdef ASSERT
  Node* check_memory() const {
    if (at_base_memory())
      return _mm->base_memory();
    else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
      return _mm->memory_at(_idx);
    else
      return _mm_base;
  }
  Node* check_memory2() const {
    return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  }
#endif

  static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  void assert_synch() const {
    assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
           "no side-effects except through the stream");
  }

 public:

  // expected usages:
  // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }

  // iterate over one merge
  MergeMemStream(MergeMemNode* mm) {
    mm->iteration_setup();
    init(mm);
    debug_only(_cnt2 = 999);
  }
  // iterate in parallel over two merges
  // only iterates through non-empty elements of mm2
  MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
    assert(mm2, "second argument must be a MergeMem also");
    ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
    mm->iteration_setup(mm2);
    init(mm, mm2);
    _cnt2 = mm2->req();
  }
#ifdef ASSERT
  ~MergeMemStream() {
    assert_synch();
  }
#endif

  MergeMemNode* all_memory() const {
    return _mm;
  }
  Node* base_memory() const {
    assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
    return _mm_base;
  }
  const MergeMemNode* all_memory2() const {
    assert(_mm2 != NULL, "");
    return _mm2;
  }
  bool at_base_memory() const {
    return _idx == Compile::AliasIdxBot;
  }
  int alias_idx() const {
    assert(_mem, "must call next 1st");
    return _idx;
  }

  const TypePtr* adr_type() const {
    return Compile::current()->get_adr_type(alias_idx());
  }

  const TypePtr* adr_type(Compile* C) const {
    return C->get_adr_type(alias_idx());
  }
  bool is_empty() const {
    assert(_mem, "must call next 1st");
    assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
    return _mem->is_top();
  }
  bool is_empty2() const {
    assert(_mem2, "must call next 1st");
    assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
    return _mem2->is_top();
  }
  Node* memory() const {
    assert(!is_empty(), "must not be empty");
    assert_synch();
    return _mem;
  }
  // get the current memory, regardless of empty or non-empty status
  Node* force_memory() const {
    assert(!is_empty() || !at_base_memory(), "");
    // Use _mm_base to defend against updates to _mem->base_memory().
    Node *mem = _mem->is_top() ? _mm_base : _mem;
    assert(mem == check_memory(), "");
    return mem;
  }
  Node* memory2() const {
    assert(_mem2 == check_memory2(), "");
    return _mem2;
  }
  void set_memory(Node* mem) {
    if (at_base_memory()) {
      // Note that this does not change the invariant _mm_base.
      _mm->set_base_memory(mem);
    } else {
      _mm->set_memory_at(_idx, mem);
    }
    _mem = mem;
    assert_synch();
  }

  // Recover from a side effect to the MergeMemNode.
  void set_memory() {
    _mem = _mm->in(_idx);
  }

  bool next()  { return next(false); }
  bool next2() { return next(true); }

  bool next_non_empty()  { return next_non_empty(false); }
  bool next_non_empty2() { return next_non_empty(true); }
  // next_non_empty2 can yield states where is_empty() is true

 private:
  // find the next item, which might be empty
  bool next(bool have_mm2) {
    assert((_mm2 != NULL) == have_mm2, "use other next");
    assert_synch();
    if (++_idx < _cnt) {
      // Note:  This iterator allows _mm to be non-sparse.
      // It behaves the same whether _mem is top or base_memory.
      _mem = _mm->in(_idx);
      if (have_mm2)
        _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
      return true;
    }
    return false;
  }

  // find the next non-empty item
  bool next_non_empty(bool have_mm2) {
    while (next(have_mm2)) {
      if (!is_empty()) {
        // make sure _mem2 is filled in sensibly
        if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
        return true;
      } else if (have_mm2 && !is_empty2()) {
        return true;   // is_empty() == true
      }
    }
    return false;
  }
};

//------------------------------Prefetch---------------------------------------

// Non-faulting prefetch load.  Prefetch for many reads.
class PrefetchReadNode : public Node {
public:
  PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
  virtual const Type *bottom_type() const { return Type::ABIO; }
};

// Non-faulting prefetch load.  Prefetch for many reads & many writes.
class PrefetchWriteNode : public Node {
public:
  PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
  virtual const Type *bottom_type() const { return Type::ABIO; }
};

// Allocation prefetch which may fault, TLAB size have to be adjusted.
class PrefetchAllocationNode : public Node {
public:
  PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
1520
  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
D
duke 已提交
1521
};
1522 1523

#endif // SHARE_VM_OPTO_MEMNODE_HPP