cpCache.cpp 25.0 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29
#include "precompiled.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "memory/universe.inline.hpp"
30
#include "oops/cpCache.hpp"
31 32 33
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
34
#include "prims/methodHandles.hpp"
35
#include "runtime/handles.inline.hpp"
36 37 38
#ifndef SERIALGC
# include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#endif
D
duke 已提交
39 40 41 42


// Implememtation of ConstantPoolCacheEntry

43
void ConstantPoolCacheEntry::initialize_entry(int index) {
44
  assert(0 < index && index < 0x10000, "sanity check");
D
duke 已提交
45
  _indices = index;
46
  assert(constant_pool_index() == index, "");
D
duke 已提交
47 48
}

49 50 51 52 53
int ConstantPoolCacheEntry::make_flags(TosState state,
                                       int option_bits,
                                       int field_index_or_method_params) {
  assert(state < number_of_states, "Invalid state in make_flags");
  int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
D
duke 已提交
54
  // Preserve existing flag bit values
55
  // The low bits are a field offset, or else the method parameter size.
D
duke 已提交
56
#ifdef ASSERT
57 58
  TosState old_state = flag_state();
  assert(old_state == (TosState)0 || old_state == state,
D
duke 已提交
59 60 61 62 63 64 65 66 67 68 69 70
         "inconsistent cpCache flags state");
#endif
  return (_flags | f) ;
}

void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
#ifdef ASSERT
  // Read once.
  volatile Bytecodes::Code c = bytecode_1();
  assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
  // Need to flush pending stores here before bytecode is written.
71
  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
D
duke 已提交
72 73 74 75 76 77 78 79 80
}

void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
#ifdef ASSERT
  // Read once.
  volatile Bytecodes::Code c = bytecode_2();
  assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
  // Need to flush pending stores here before bytecode is written.
81
  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
D
duke 已提交
82 83
}

84
// Sets f1, ordering with previous writes.
85
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
86
  assert(f1 != NULL, "");
87
  OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
88 89 90 91 92 93
}

// Sets flags, but only if the value was previously zero.
bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
  intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
  return (result == 0);
94
}
95

D
duke 已提交
96 97 98 99 100 101 102 103
// Note that concurrent update of both bytecodes can leave one of them
// reset to zero.  This is harmless; the interpreter will simply re-resolve
// the damaged entry.  More seriously, the memory synchronization is needed
// to flush other fields (f1, f2) completely to memory before the bytecodes
// are updated, lest other processors see a non-zero bytecode but zero f1/f2.
void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
                                       Bytecodes::Code put_code,
                                       KlassHandle field_holder,
104
                                       int field_index,
D
duke 已提交
105 106 107
                                       int field_offset,
                                       TosState field_type,
                                       bool is_final,
108 109 110
                                       bool is_volatile,
                                       Klass* root_klass) {
  set_f1(field_holder());
D
duke 已提交
111
  set_f2(field_offset);
112
  assert((field_index & field_index_mask) == field_index,
D
duke 已提交
113
         "field index does not fit in low flag bits");
114 115 116 117
  set_field_flags(field_type,
                  ((is_volatile ? 1 : 0) << is_volatile_shift) |
                  ((is_final    ? 1 : 0) << is_final_shift),
                  field_index);
D
duke 已提交
118 119 120 121 122
  set_bytecode_1(get_code);
  set_bytecode_2(put_code);
  NOT_PRODUCT(verify(tty));
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
void ConstantPoolCacheEntry::set_parameter_size(int value) {
  // This routine is called only in corner cases where the CPCE is not yet initialized.
  // See AbstractInterpreter::deopt_continue_after_entry.
  assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
         err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
  // Setting the parameter size by itself is only safe if the
  // current value of _flags is 0, otherwise another thread may have
  // updated it and we don't want to overwrite that value.  Don't
  // bother trying to update it once it's nonzero but always make
  // sure that the final parameter size agrees with what was passed.
  if (_flags == 0) {
    Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
  }
  guarantee(parameter_size() == value,
            err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
D
duke 已提交
138 139 140 141 142 143 144 145 146
}

void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
                                        methodHandle method,
                                        int vtable_index) {
  assert(method->interpreter_entry() != NULL, "should have been set at this point");
  assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");

  int byte_no = -1;
147 148
  bool change_to_virtual = false;

D
duke 已提交
149
  switch (invoke_code) {
150 151 152 153 154 155 156 157
    case Bytecodes::_invokeinterface:
      // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
      // instruction somehow links to a non-interface method (in Object).
      // In that case, the method has no itable index and must be invoked as a virtual.
      // Set a flag to keep track of this corner case.
      change_to_virtual = true;

      // ...and fall through as if we were handling invokevirtual:
D
duke 已提交
158
    case Bytecodes::_invokevirtual:
159
      {
D
duke 已提交
160
        if (method->can_be_statically_bound()) {
161 162 163 164 165 166 167
          // set_f2_as_vfinal_method checks if is_vfinal flag is true.
          set_method_flags(as_TosState(method->result_type()),
                           (                             1      << is_vfinal_shift) |
                           ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
                           ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
                           method()->size_of_parameters());
          set_f2_as_vfinal_method(method());
D
duke 已提交
168 169
        } else {
          assert(vtable_index >= 0, "valid index");
170 171 172 173
          assert(!method->is_final_method(), "sanity");
          set_method_flags(as_TosState(method->result_type()),
                           ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
                           method()->size_of_parameters());
D
duke 已提交
174 175 176 177
          set_f2(vtable_index);
        }
        byte_no = 2;
        break;
178 179
      }

D
duke 已提交
180 181
    case Bytecodes::_invokespecial:
    case Bytecodes::_invokestatic:
182 183 184 185 186 187 188 189
      // Note:  Read and preserve the value of the is_vfinal flag on any
      // invokevirtual bytecode shared with this constant pool cache entry.
      // It is cheap and safe to consult is_vfinal() at all times.
      // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
      set_method_flags(as_TosState(method->result_type()),
                       ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
                       ((method->is_final_method() ? 1 : 0) << is_final_shift),
                       method()->size_of_parameters());
D
duke 已提交
190 191 192 193 194 195 196 197 198 199
      set_f1(method());
      byte_no = 1;
      break;
    default:
      ShouldNotReachHere();
      break;
  }

  // Note:  byte_no also appears in TemplateTable::resolve.
  if (byte_no == 1) {
200 201
    assert(invoke_code != Bytecodes::_invokevirtual &&
           invoke_code != Bytecodes::_invokeinterface, "");
D
duke 已提交
202 203 204
    set_bytecode_1(invoke_code);
  } else if (byte_no == 2)  {
    if (change_to_virtual) {
205
      assert(invoke_code == Bytecodes::_invokeinterface, "");
D
duke 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
      // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
      //
      // Workaround for the case where we encounter an invokeinterface, but we
      // should really have an _invokevirtual since the resolved method is a
      // virtual method in java.lang.Object. This is a corner case in the spec
      // but is presumably legal. javac does not generate this code.
      //
      // We set bytecode_1() to _invokeinterface, because that is the
      // bytecode # used by the interpreter to see if it is resolved.
      // We set bytecode_2() to _invokevirtual.
      // See also interpreterRuntime.cpp. (8/25/2000)
      // Only set resolved for the invokeinterface case if method is public.
      // Otherwise, the method needs to be reresolved with caller for each
      // interface call.
      if (method->is_public()) set_bytecode_1(invoke_code);
    } else {
222
      assert(invoke_code == Bytecodes::_invokevirtual, "");
D
duke 已提交
223
    }
224 225
    // set up for invokevirtual, even if linking for invokeinterface also:
    set_bytecode_2(Bytecodes::_invokevirtual);
D
duke 已提交
226 227 228 229 230 231 232 233
  } else {
    ShouldNotReachHere();
  }
  NOT_PRODUCT(verify(tty));
}


void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
234 235
  InstanceKlass* interf = method->method_holder();
  assert(interf->is_interface(), "must be an interface");
236
  assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
D
duke 已提交
237 238
  set_f1(interf);
  set_f2(index);
239 240 241
  set_method_flags(as_TosState(method->result_type()),
                   0,  // no option bits
                   method()->size_of_parameters());
D
duke 已提交
242 243 244 245
  set_bytecode_1(Bytecodes::_invokeinterface);
}


246
void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool,
247 248
                                               methodHandle adapter,
                                               Handle appendix, Handle method_type,
249
                                               objArrayHandle resolved_references) {
250
  set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type, resolved_references);
251 252
}

253
void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool,
254 255
                                              methodHandle adapter,
                                              Handle appendix, Handle method_type,
256
                                              objArrayHandle resolved_references) {
257
  set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type, resolved_references);
258 259
}

260 261
void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
                                                      Bytecodes::Code invoke_code,
262
                                                      methodHandle adapter,
263
                                                      Handle appendix, Handle method_type,
264
                                                      objArrayHandle resolved_references) {
265
  // NOTE: This CPCE can be the subject of data races.
266
  // There are three words to update: flags, refs[f2], f1 (in that order).
267 268
  // Writers must store all other values before f1.
  // Readers must test f1 first for non-null before reading other fields.
269 270 271 272 273 274 275 276 277
  // Competing writers must acquire exclusive access via a lock.
  // A losing writer waits on the lock until the winner writes f1 and leaves
  // the lock, so that when the losing writer returns, he can use the linked
  // cache entry.

  MonitorLockerEx ml(cpool->lock());
  if (!is_f1_null()) {
    return;
  }
278

279 280
  const bool has_appendix    = appendix.not_null();
  const bool has_method_type = method_type.not_null();
281

282
  // Write the flags.
283
  set_method_flags(as_TosState(adapter->result_type()),
284 285 286
                   ((has_appendix    ? 1 : 0) << has_appendix_shift   ) |
                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
                   (                   1      << is_final_shift       ),
287
                   adapter->size_of_parameters());
288

289
  if (TraceInvokeDynamic) {
290
    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
291
                  invoke_code,
292 293
                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
294 295 296 297 298 299
                  (intptr_t)adapter());
    adapter->print();
    if (has_appendix)  appendix()->print();
  }

  // Method handle invokes and invokedynamic sites use both cp cache words.
300
  // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
301 302
  // In the general case, this could be the call site's MethodType,
  // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
303
  // f1 contains the adapter method which manages the actual call.
304 305 306
  // In the general case, this is a compiled LambdaForm.
  // (The Java code is free to optimize these calls by binding other
  // sorts of methods and appendices to call sites.)
307
  // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
308
  // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
309
  // Even with the appendix, the method will never take more than 255 parameter slots.
310 311
  //
  // This means that given a call site like (List)mh.invoke("foo"),
312
  // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
313
  // not '(Ljava/lang/String;)Ljava/util/List;'.
314
  // The fact that String and List are involved is encoded in the MethodType in refs[f2].
315 316
  // This allows us to create fewer method oops, while keeping type safety.
  //
317

318
  // Store appendix, if any.
319
  if (has_appendix) {
320 321 322 323 324 325 326 327 328 329 330 331
    const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
    assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
    resolved_references->obj_at_put(appendix_index, appendix());
  }

  // Store MethodType, if any.
  if (has_method_type) {
    const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
    assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
    resolved_references->obj_at_put(method_type_index, method_type());
332 333 334 335
  }

  release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!

336 337
  // The interpreter assembly code does not check byte_2,
  // but it is used by is_resolved, method_if_resolved, etc.
338
  set_bytecode_1(invoke_code);
339 340 341 342 343 344
  NOT_PRODUCT(verify(tty));
  if (TraceInvokeDynamic) {
    this->print(tty, 0);
  }
}

345
Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
346
  // Decode the action of set_method and set_interface_call
347 348
  Bytecodes::Code invoke_code = bytecode_1();
  if (invoke_code != (Bytecodes::Code)0) {
349
    Metadata* f1 = (Metadata*)_f1;
350 351 352 353
    if (f1 != NULL) {
      switch (invoke_code) {
      case Bytecodes::_invokeinterface:
        assert(f1->is_klass(), "");
354
        return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());
355 356
      case Bytecodes::_invokestatic:
      case Bytecodes::_invokespecial:
357
        assert(!has_appendix(), "");
358 359
      case Bytecodes::_invokehandle:
      case Bytecodes::_invokedynamic:
360
        assert(f1->is_method(), "");
361
        return (Method*)f1;
362 363 364
      }
    }
  }
365 366
  invoke_code = bytecode_2();
  if (invoke_code != (Bytecodes::Code)0) {
367 368 369 370
    switch (invoke_code) {
    case Bytecodes::_invokevirtual:
      if (is_vfinal()) {
        // invokevirtual
371
        Method* m = f2_as_vfinal_method();
372 373 374 375 376
        assert(m->is_method(), "");
        return m;
      } else {
        int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
        if (cpool->tag_at(holder_index).is_klass()) {
377
          Klass* klass = cpool->resolved_klass_at(holder_index);
H
hseigel 已提交
378
          if (!klass->oop_is_instance())
379
            klass = SystemDictionary::Object_klass();
380
          return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());
381 382
        }
      }
383
      break;
384 385 386 387 388 389
    }
  }
  return NULL;
}


390 391 392
oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
  if (is_f1_null() || !has_appendix())
    return NULL;
393 394 395 396 397 398 399 400 401 402
  const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
  objArrayOop resolved_references = cpool->resolved_references();
  return resolved_references->obj_at(ref_index);
}


oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
  if (is_f1_null() || !has_method_type())
    return NULL;
  const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
403 404
  objArrayOop resolved_references = cpool->resolved_references();
  return resolved_references->obj_at(ref_index);
D
duke 已提交
405 406 407 408 409 410
}


// RedefineClasses() API support:
// If this constantPoolCacheEntry refers to old_method then update it
// to refer to new_method.
411 412
bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
       Method* new_method, bool * trace_name_printed) {
D
duke 已提交
413 414

  if (is_vfinal()) {
415 416
    // virtual and final so _f2 contains method ptr instead of vtable index
    if (f2_as_vfinal_method() == old_method) {
D
duke 已提交
417
      // match old_method so need an update
418
      // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
D
duke 已提交
419 420 421 422 423
      _f2 = (intptr_t)new_method;
      if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
        if (!(*trace_name_printed)) {
          // RC_TRACE_MESG macro has an embedded ResourceMark
          RC_TRACE_MESG(("adjust: name=%s",
424
            old_method->method_holder()->external_name()));
D
duke 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
          *trace_name_printed = true;
        }
        // RC_TRACE macro has an embedded ResourceMark
        RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
          new_method->name()->as_C_string(),
          new_method->signature()->as_C_string()));
      }

      return true;
    }

    // f1() is not used with virtual entries so bail out
    return false;
  }

440
  if (_f1 == NULL) {
D
duke 已提交
441 442 443 444 445
    // NULL f1() means this is a virtual entry so bail out
    // We are assuming that the vtable index does not need change.
    return false;
  }

446
  if (_f1 == old_method) {
D
duke 已提交
447 448 449 450 451
    _f1 = new_method;
    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
      if (!(*trace_name_printed)) {
        // RC_TRACE_MESG macro has an embedded ResourceMark
        RC_TRACE_MESG(("adjust: name=%s",
452
          old_method->method_holder()->external_name()));
D
duke 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466
        *trace_name_printed = true;
      }
      // RC_TRACE macro has an embedded ResourceMark
      RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
        new_method->name()->as_C_string(),
        new_method->signature()->as_C_string()));
    }

    return true;
  }

  return false;
}

467 468 469 470 471 472 473 474 475 476 477 478
#ifndef PRODUCT
bool ConstantPoolCacheEntry::check_no_old_entries() {
  if (is_vfinal()) {
    Metadata* f2 = (Metadata*)_f2;
    return (f2->is_valid() && f2->is_method() && !((Method*)f2)->is_old());
  } else {
    return (_f1 == NULL || (_f1->is_valid() && _f1->is_method() && !((Method*)_f1)->is_old()));
  }
}
#endif

bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {
D
duke 已提交
479 480 481 482 483
  if (!is_method_entry()) {
    // not a method entry so not interesting by default
    return false;
  }

484
  Method* m = NULL;
D
duke 已提交
485 486
  if (is_vfinal()) {
    // virtual and final so _f2 contains method ptr instead of vtable index
487 488
    m = f2_as_vfinal_method();
  } else if (is_f1_null()) {
D
duke 已提交
489 490 491
    // NULL _f1 means this is a virtual entry so also not interesting
    return false;
  } else {
492 493
    if (!(_f1->is_method())) {
      // _f1 can also contain a Klass* for an interface
D
duke 已提交
494 495
      return false;
    }
496
    m = f1_as_method();
D
duke 已提交
497 498 499
  }

  assert(m != NULL && m->is_method(), "sanity check");
500
  if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
D
duke 已提交
501 502 503 504 505 506 507 508 509 510 511
    // robustness for above sanity checks or method is not in
    // the interesting class
    return false;
  }

  // the method is in the interesting class so the entry is interesting
  return true;
}

void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
  // print separator
512
  if (index == 0) st->print_cr("                 -------------");
D
duke 已提交
513
  // print entry
514 515
  st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
    st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
516
  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f1);
517 518 519
  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
  st->print_cr("                 -------------");
D
duke 已提交
520 521 522 523 524 525 526 527
}

void ConstantPoolCacheEntry::verify(outputStream* st) const {
  // not implemented yet
}

// Implementation of ConstantPoolCache

528 529 530 531 532 533 534
ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
  int size = ConstantPoolCache::size(length);

  return new (loader_data, size, false, THREAD) ConstantPoolCache(length);
}

void ConstantPoolCache::initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map) {
D
duke 已提交
535
  assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
536 537 538
  for (int i = 0; i < length(); i++) {
    ConstantPoolCacheEntry* e = entry_at(i);
    int original_index = inverse_index_map[i];
539
    e->initialize_entry(original_index);
540
    assert(entry_at(i) == e, "sanity");
541
  }
542
  for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
543 544 545 546 547 548 549 550 551 552
    const int cpci = invokedynamic_references_map[ref];
    if (cpci >= 0) {
#ifdef ASSERT
      // invokedynamic and invokehandle have more entries; check if they
      // all point to the same constant pool cache entry.
      for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
        const int cpci_next = invokedynamic_references_map[ref + entry];
        assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));
      }
#endif
553
      entry_at(cpci)->initialize_resolved_reference_index(ref);
554 555
      ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1;  // skip extra entries
    }
556
  }
D
duke 已提交
557 558 559 560 561
}

// RedefineClasses() API support:
// If any entry of this constantPoolCache points to any of
// old_methods, replace it with the corresponding new_method.
562
void ConstantPoolCache::adjust_method_entries(Method** old_methods, Method** new_methods,
D
duke 已提交
563 564 565 566 567 568 569 570
                                                     int methods_length, bool * trace_name_printed) {

  if (methods_length == 0) {
    // nothing to do if there are no methods
    return;
  }

  // get shorthand for the interesting class
571
  Klass* old_holder = old_methods[0]->method_holder();
D
duke 已提交
572 573 574 575 576 577 578 579 580 581 582 583 584

  for (int i = 0; i < length(); i++) {
    if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
      // skip uninteresting methods
      continue;
    }

    // The constantPoolCache contains entries for several different
    // things, but we only care about methods. In fact, we only care
    // about methods in the same class as the one that contains the
    // old_methods. At this point, we have an interesting entry.

    for (int j = 0; j < methods_length; j++) {
585 586
      Method* old_method = old_methods[j];
      Method* new_method = new_methods[j];
D
duke 已提交
587 588 589 590 591 592 593 594 595 596

      if (entry_at(i)->adjust_method_entry(old_method, new_method,
          trace_name_printed)) {
        // current old_method matched this entry and we updated it so
        // break out and get to the next interesting entry if there one
        break;
      }
    }
  }
}
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

#ifndef PRODUCT
bool ConstantPoolCache::check_no_old_entries() {
  for (int i = 1; i < length(); i++) {
    if (entry_at(i)->is_interesting_method_entry(NULL) &&
       !entry_at(i)->check_no_old_entries()) {
      return false;
    }
  }
  return true;
}
#endif // PRODUCT


// Printing

void ConstantPoolCache::print_on(outputStream* st) const {
  assert(is_constantPoolCache(), "obj must be constant pool cache");
  st->print_cr(internal_name());
  // print constant pool cache entries
  for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);
}

void ConstantPoolCache::print_value_on(outputStream* st) const {
  assert(is_constantPoolCache(), "obj must be constant pool cache");
  st->print("cache [%d]", length());
  print_address_on(st);
  st->print(" for ");
  constant_pool()->print_value_on(st);
}


// Verification

void ConstantPoolCache::verify_on(outputStream* st) {
  guarantee(is_constantPoolCache(), "obj must be constant pool cache");
  // print constant pool cache entries
  for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
}