rewriter.cpp 14.0 KB
Newer Older
D
duke 已提交
1
/*
J
Merge  
jrose 已提交
2
 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35
#include "precompiled.hpp"
#include "interpreter/bytecodes.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "memory/gcLocker.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/generateOopMap.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodComparator.hpp"
D
duke 已提交
36

37
// Computes a CPC map (new_index -> original_index) for constant pool entries
D
duke 已提交
38
// that are referred to by the interpreter at runtime via the constant pool cache.
39 40 41 42 43
// Also computes a CP map (original_index -> new_index).
// Marks entries in CP which require additional processing.
void Rewriter::compute_index_maps() {
  const int length  = _pool->length();
  init_cp_map(length);
44
  jint tag_mask = 0;
D
duke 已提交
45
  for (int i = 0; i < length; i++) {
46
    int tag = _pool->tag_at(i).value();
47
    tag_mask |= (1 << tag);
48 49
    switch (tag) {
      case JVM_CONSTANT_InterfaceMethodref:
D
duke 已提交
50 51
      case JVM_CONSTANT_Fieldref          : // fall through
      case JVM_CONSTANT_Methodref         : // fall through
52 53
      case JVM_CONSTANT_MethodHandle      : // fall through
      case JVM_CONSTANT_MethodType        : // fall through
54
      case JVM_CONSTANT_InvokeDynamic     : // fall through
55
      case JVM_CONSTANT_InvokeDynamicTrans: // fall through
56 57
        add_cp_cache_entry(i);
        break;
D
duke 已提交
58 59
    }
  }
60 61 62

  guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
            "all cp cache indexes fit in a u2");
63 64

  _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
65
  _have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0);
D
duke 已提交
66 67 68
}


69
// Creates a constant pool cache given a CPC map
70 71 72
// This creates the constant pool cache initially in a state
// that is unsafe for concurrent GC processing but sets it to
// a safe mode before the constant pool cache is returned.
73 74 75 76 77
void Rewriter::make_constant_pool_cache(TRAPS) {
  const int length = _cp_cache_map.length();
  constantPoolCacheOop cache =
      oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK);
  cache->initialize(_cp_cache_map);
78

79
  // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic.
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
  if (_have_invoke_dynamic) {
    for (int i = 0; i < length; i++) {
      int pool_index = cp_cache_entry_pool_index(i);
      if (pool_index >= 0 &&
          _pool->tag_at(pool_index).is_invoke_dynamic()) {
        int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index);
        if (bsm_index != 0) {
          assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant");
          // There is a CP cache entry holding the BSM for these calls.
          int bsm_cache_index = cp_entry_to_cp_cache(bsm_index);
          cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index);
        } else {
          // There is no CP cache entry holding the BSM for these calls.
          // We will need to look for a class-global BSM, later.
          guarantee(AllowTransitionalJSR292, "");
        }
      }
    }
  }

100 101
  _pool->set_cache(cache);
  cache->set_constant_pool(_pool());
D
duke 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
}



// The new finalization semantics says that registration of
// finalizable objects must be performed on successful return from the
// Object.<init> constructor.  We could implement this trivially if
// <init> were never rewritten but since JVMTI allows this to occur, a
// more complicated solution is required.  A special return bytecode
// is used only by Object.<init> to signal the finalization
// registration point.  Additionally local 0 must be preserved so it's
// available to pass to the registration function.  For simplicty we
// require that local 0 is never overwritten so it's available as an
// argument for registration.

void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
  RawBytecodeStream bcs(method);
  while (!bcs.is_last_bytecode()) {
    Bytecodes::Code opcode = bcs.raw_next();
    switch (opcode) {
      case Bytecodes::_return: *bcs.bcp() = Bytecodes::_return_register_finalizer; break;

      case Bytecodes::_istore:
      case Bytecodes::_lstore:
      case Bytecodes::_fstore:
      case Bytecodes::_dstore:
      case Bytecodes::_astore:
        if (bcs.get_index() != 0) continue;

        // fall through
      case Bytecodes::_istore_0:
      case Bytecodes::_lstore_0:
      case Bytecodes::_fstore_0:
      case Bytecodes::_dstore_0:
      case Bytecodes::_astore_0:
        THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
                  "can't overwrite local 0 in Object.<init>");
        break;
    }
  }
}


145
// Rewrite a classfile-order CP index into a native-order CPC index.
146
void Rewriter::rewrite_member_reference(address bcp, int offset) {
147 148 149 150 151 152 153
  address p = bcp + offset;
  int  cp_index    = Bytes::get_Java_u2(p);
  int  cache_index = cp_entry_to_cp_cache(cp_index);
  Bytes::put_native_u2(p, cache_index);
}


154
void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
155 156 157 158
  address p = bcp + offset;
  assert(p[-1] == Bytecodes::_invokedynamic, "");
  int cp_index = Bytes::get_Java_u2(p);
  int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
159
  int cpc2 = add_secondary_cp_cache_entry(cpc);
160 161 162 163 164 165 166 167 168

  // Replace the trailing four bytes with a CPC index for the dynamic
  // call site.  Unlike other CPC entries, there is one per bytecode,
  // not just one per distinct CP entry.  In other words, the
  // CPC-to-CP relation is many-to-one for invokedynamic entries.
  // This means we must use a larger index size than u2 to address
  // all these entries.  That is the main reason invokedynamic
  // must have a five-byte instruction format.  (Of course, other JVM
  // implementations can use the bytes for other purposes.)
169
  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
170 171 172 173
  // Note: We use native_u4 format exclusively for 4-byte indexes.
}


174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
// Rewrite some ldc bytecodes to _fast_aldc
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
  address p = bcp + offset;
  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
  constantTag tag = _pool->tag_at(cp_index).value();
  if (tag.is_method_handle() || tag.is_method_type()) {
    int cache_index = cp_entry_to_cp_cache(cp_index);
    if (is_wide) {
      (*bcp) = Bytecodes::_fast_aldc_w;
      assert(cache_index == (u2)cache_index, "");
      Bytes::put_native_u2(p, cache_index);
    } else {
      (*bcp) = Bytecodes::_fast_aldc;
      assert(cache_index == (u1)cache_index, "");
      (*p) = (u1)cache_index;
    }
  }
}


D
duke 已提交
195
// Rewrites a method given the index_map information
196
void Rewriter::scan_method(methodOop method) {
D
duke 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214

  int nof_jsrs = 0;
  bool has_monitor_bytecodes = false;

  {
    // We cannot tolerate a GC in this block, because we've
    // cached the bytecodes in 'code_base'. If the methodOop
    // moves, the bytecodes will also move.
    No_Safepoint_Verifier nsv;
    Bytecodes::Code c;

    // Bytecodes and their length
    const address code_base = method->code_base();
    const int code_length = method->code_size();

    int bc_length;
    for (int bci = 0; bci < code_length; bci += bc_length) {
      address bcp = code_base + bci;
215
      int prefix_length = 0;
D
duke 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229
      c = (Bytecodes::Code)(*bcp);

      // Since we have the code, see if we can get the length
      // directly. Some more complicated bytecodes will report
      // a length of zero, meaning we need to make another method
      // call to calculate the length.
      bc_length = Bytecodes::length_for(c);
      if (bc_length == 0) {
        bc_length = Bytecodes::length_at(bcp);

        // length_at will put us at the bytecode after the one modified
        // by 'wide'. We don't currently examine any of the bytecodes
        // modified by wide, but in case we do in the future...
        if (c == Bytecodes::_wide) {
230
          prefix_length = 1;
D
duke 已提交
231 232 233 234 235 236 237 238 239 240
          c = (Bytecodes::Code)bcp[1];
        }
      }

      assert(bc_length != 0, "impossible bytecode length");

      switch (c) {
        case Bytecodes::_lookupswitch   : {
#ifndef CC_INTERP
          Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
241
          (*bcp) = (
D
duke 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254
            bc->number_of_pairs() < BinarySwitchThreshold
            ? Bytecodes::_fast_linearswitch
            : Bytecodes::_fast_binaryswitch
          );
#endif
          break;
        }
        case Bytecodes::_getstatic      : // fall through
        case Bytecodes::_putstatic      : // fall through
        case Bytecodes::_getfield       : // fall through
        case Bytecodes::_putfield       : // fall through
        case Bytecodes::_invokevirtual  : // fall through
        case Bytecodes::_invokespecial  : // fall through
255 256 257 258 259
        case Bytecodes::_invokestatic   :
        case Bytecodes::_invokeinterface:
          rewrite_member_reference(bcp, prefix_length+1);
          break;
        case Bytecodes::_invokedynamic:
260
          rewrite_invokedynamic(bcp, prefix_length+1);
D
duke 已提交
261
          break;
262 263 264 265 266 267
        case Bytecodes::_ldc:
          maybe_rewrite_ldc(bcp, prefix_length+1, false);
          break;
        case Bytecodes::_ldc_w:
          maybe_rewrite_ldc(bcp, prefix_length+1, true);
          break;
D
duke 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
        case Bytecodes::_jsr            : // fall through
        case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
        case Bytecodes::_monitorenter   : // fall through
        case Bytecodes::_monitorexit    : has_monitor_bytecodes = true; break;
      }
    }
  }

  // Update access flags
  if (has_monitor_bytecodes) {
    method->set_has_monitor_bytecodes();
  }

  // The present of a jsr bytecode implies that the method might potentially
  // have to be rewritten, so we run the oopMapGenerator on the method
  if (nof_jsrs > 0) {
    method->set_has_jsrs();
285 286 287 288
    // Second pass will revisit this method.
    assert(method->has_jsrs(), "");
  }
}
D
duke 已提交
289

290 291 292 293 294 295 296 297 298 299 300 301 302
// After constant pool is created, revisit methods containing jsrs.
methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
  ResolveOopMapConflicts romc(method);
  methodHandle original_method = method;
  method = romc.do_potential_rewrite(CHECK_(methodHandle()));
  if (method() != original_method()) {
    // Insert invalid bytecode into original methodOop and set
    // interpreter entrypoint, so that a executing this method
    // will manifest itself in an easy recognizable form.
    address bcp = original_method->bcp_from(0);
    *bcp = (u1)Bytecodes::_shouldnotreachhere;
    int kind = Interpreter::method_kind(original_method);
    original_method->set_interpreter_kind(kind);
D
duke 已提交
303 304
  }

305 306 307 308
  // Update monitor matching info.
  if (romc.monitor_safe()) {
    method->set_guaranteed_monitor_matching();
  }
D
duke 已提交
309 310 311 312 313 314 315

  return method;
}


void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
  ResourceMark rm(THREAD);
316
  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
317 318
  // (That's all, folks.)
}
D
duke 已提交
319

320 321 322 323 324 325 326 327 328

void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
  ResourceMark rm(THREAD);
  Rewriter     rw(klass, cpool, methods, CHECK);
  // (That's all, folks.)
}


Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
329
  : _klass(klass),
330 331
    _pool(cpool),
    _methods(methods)
332 333
{
  assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
D
duke 已提交
334

335 336
  // determine index maps for methodOop rewriting
  compute_index_maps();
D
duke 已提交
337

338
  if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
339
    bool did_rewrite = false;
340
    int i = _methods->length();
D
duke 已提交
341
    while (i-- > 0) {
342
      methodOop method = (methodOop)_methods->obj_at(i);
D
duke 已提交
343 344 345 346 347
      if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
        // rewrite the return bytecodes of Object.<init> to register the
        // object for finalization if needed.
        methodHandle m(THREAD, method);
        rewrite_Object_init(m, CHECK);
348
        did_rewrite = true;
D
duke 已提交
349 350 351
        break;
      }
    }
352
    assert(did_rewrite, "must find Object::<init> to rewrite it");
D
duke 已提交
353 354
  }

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
  // rewrite methods, in two passes
  int i, len = _methods->length();

  for (i = len; --i >= 0; ) {
    methodOop method = (methodOop)_methods->obj_at(i);
    scan_method(method);
  }

  // allocate constant pool cache, now that we've seen all the bytecodes
  make_constant_pool_cache(CHECK);

  for (i = len; --i >= 0; ) {
    methodHandle m(THREAD, (methodOop)_methods->obj_at(i));

    if (m->has_jsrs()) {
      m = rewrite_jsrs(m, CHECK);
D
duke 已提交
371
      // Method might have gotten rewritten.
372
      _methods->obj_at_put(i, m());
D
duke 已提交
373
    }
374 375 376

    // Set up method entry points for compiler and interpreter.
    m->link_method(m, CHECK);
377 378 379 380 381 382 383 384 385 386 387 388 389 390

#ifdef ASSERT
    if (StressMethodComparator) {
      static int nmc = 0;
      for (int j = i; j >= 0 && j >= i-4; j--) {
        if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
        if (j == i && !z) {
          tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
          assert(z, "method must compare equal to itself");
        }
      }
    }
#endif //ASSERT
D
duke 已提交
391 392
  }
}