compiledIC.hpp 9.3 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#ifndef SHARE_VM_CODE_COMPILEDIC_HPP
#define SHARE_VM_CODE_COMPILEDIC_HPP

#include "interpreter/linkResolver.hpp"
#include "oops/compiledICHolderKlass.hpp"
#include "oops/compiledICHolderOop.hpp"
#include "oops/klassOop.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
41 42 43 44 45 46
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
47

D
duke 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
//
// In order to make patching of the inline cache MT-safe, we only allow the following
// transitions (when not at a safepoint):
//
//
//         [1] --<--  Clean -->---  [1]
//            /       (null)      \
//           /                     \      /-<-\
//          /          [2]          \    /     \
//      Interpreted  ---------> Monomorphic     | [3]
//  (compiledICHolderOop)        (klassOop)     |
//          \                        /   \     /
//       [4] \                      / [4] \->-/
//            \->-  Megamorphic -<-/
//                  (methodOop)
//
// The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
//
// The numbers in square brackets refere to the kind of transition:
// [1]: Initial fixup. Receiver it found from debug information
// [2]: Compilation of a method
// [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
// [4]: Inline cache miss. We go directly to megamorphic call.
//
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
// transition is made to a stub.
//
class CompiledIC;

class CompiledICInfo {
  friend class CompiledIC;
 private:
  address _entry;              // entry point for call
  Handle  _cached_oop;         // Value of cached_oop (either in stub or inline cache)
  bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  bool    _to_interpreter;     // Call it to interpreter
 public:
  address entry() const        { return _entry; }
  Handle  cached_oop() const   { return _cached_oop; }
  bool    is_optimized() const { return _is_optimized; }
};

class CompiledIC: public ResourceObj {
  friend class InlineCacheBuffer;
  friend class ICStub;


 private:
  NativeCall*   _ic_call;       // the call instruction
  oop*          _oop_addr;      // patchable oop cell for this IC
  RelocIterator _oops;          // iteration over any and all set-oop instructions
  bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)

  CompiledIC(NativeCall* ic_call);
  CompiledIC(Relocation* ic_reloc);    // Must be of virtual_call_type/opt_virtual_call_type

  // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
  // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
  // changes to a transition stub.
  void set_ic_destination(address entry_point);
  void set_cached_oop(oop cache);

  // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
  // associated with the inline cache.
  address stub_address() const;
  bool is_in_transition_state() const;  // Use InlineCacheBuffer

 public:
  // conversion (machine PC to CompiledIC*)
  friend CompiledIC* CompiledIC_before(address return_addr);
  friend CompiledIC* CompiledIC_at(address call_site);
  friend CompiledIC* CompiledIC_at(Relocation* call_site);

  // Return the cached_oop/destination associated with this inline cache. If the cache currently points
  // to a transition stub, it will read the values from the transition stub.
  oop  cached_oop() const;
  address ic_destination() const;

  bool is_optimized() const   { return _is_optimized; }

  // State
  bool is_clean() const;
  bool is_megamorphic() const;
  bool is_call_to_compiled() const;
  bool is_call_to_interpreted() const;

  address end_of_call() { return  _ic_call->return_address(); }

  // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
  // so you are guaranteed that no patching takes place. The same goes for verify.
  //
  // Note: We do not provide any direct access to the stub code, to prevent parts of the code
  // to manipulate the inline cache in MT-unsafe ways.
  //
  // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
  //
  void set_to_clean();  // Can only be called during a safepoint operation
  void set_to_monomorphic(const CompiledICInfo& info);
  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);

  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                        bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);

  // Location
  address instruction_address() const { return _ic_call->instruction_address(); }

  // Misc
  void print()             PRODUCT_RETURN;
  void print_compiled_ic() PRODUCT_RETURN;
  void verify()            PRODUCT_RETURN;
};

inline CompiledIC* CompiledIC_before(address return_addr) {
  CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
  c_ic->verify();
  return c_ic;
}

inline CompiledIC* CompiledIC_at(address call_site) {
  CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
  c_ic->verify();
  return c_ic;
}

inline CompiledIC* CompiledIC_at(Relocation* call_site) {
  CompiledIC* c_ic = new CompiledIC(call_site);
  c_ic->verify();
  return c_ic;
}


//-----------------------------------------------------------------------------
// The CompiledStaticCall represents a call to a static method in the compiled
//
// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
//
//
//           -----<----- Clean ----->-----
//          /                             \
//         /                               \
//    compilled code <------------> interpreted code
//
//  Clean:            Calls directly to runtime method for fixup
//  Compiled code:    Calls directly to compiled code
//  Interpreted code: Calls to stub that set methodOop reference
//
//
class CompiledStaticCall;

class StaticCallInfo {
 private:
  address      _entry;          // Entrypoint
  methodHandle _callee;         // Callee (used when calling interpreter)
  bool         _to_interpreter; // call to interpreted method (otherwise compiled)

  friend class CompiledStaticCall;
 public:
  address      entry() const    { return _entry;  }
  methodHandle callee() const   { return _callee; }
};


class CompiledStaticCall: public NativeCall {
  friend class CompiledIC;

  // Also used by CompiledIC
  void set_to_interpreted(methodHandle callee, address entry);
  bool is_optimized_virtual();

 public:
  friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
  friend CompiledStaticCall* compiledStaticCall_at(address native_call);
  friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);

  // State
  bool is_clean() const;
  bool is_call_to_compiled() const;
  bool is_call_to_interpreted() const;

  // Clean static call (will force resolving on next use)
  void set_to_clean();

  // Set state. The entry must be the same, as computed by compute_entry.
  // Computation and setting is split up, since the actions are separate during
  // a OptoRuntime::resolve_xxx.
  void set(const StaticCallInfo& info);

  // Compute entry point given a method
  static void compute_entry(methodHandle m, StaticCallInfo& info);

  // Stub support
  address find_stub();
  static void set_stub_to_clean(static_stub_Relocation* static_stub);

  // Misc.
  void print()  PRODUCT_RETURN;
  void verify() PRODUCT_RETURN;
};


inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
  CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
  st->verify();
  return st;
}

inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
  CompiledStaticCall* st = (CompiledStaticCall*)native_call;
  st->verify();
  return st;
}

inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
  return compiledStaticCall_at(call_site->addr());
}
265 266

#endif // SHARE_VM_CODE_COMPILEDIC_HPP