methodHandles_x86.cpp 25.3 KB
Newer Older
1
/*
2
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
27
#include "interpreter/interpreterRuntime.hpp"
28 29
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
30 31 32

#define __ _masm->

33 34
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
35
#define STOP(error) stop(error)
36 37
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
38
#define STOP(error) block_comment(error); __ stop(error)
39 40 41 42
#endif

#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")

43 44 45 46 47
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static RegisterOrConstant constant(int value) {
  return RegisterOrConstant(value);
}

48 49
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
  if (VerifyMethodHandles)
50
    verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
51
                 "MH argument is a Class");
52
  __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
53 54
}

55
#ifdef ASSERT
56 57 58
static int check_nonzero(const char* xname, int x) {
  assert(x != 0, err_msg("%s should be nonzero", xname));
  return x;
59
}
60 61 62 63
#define NONZERO(x) check_nonzero(#x, x)
#else //ASSERT
#define NONZERO(x) (x)
#endif //ASSERT
64

65
#ifdef ASSERT
66
void MethodHandles::verify_klass(MacroAssembler* _masm,
67
                                 Register obj, SystemDictionary::WKID klass_id,
68
                                 const char* error_message) {
69 70
  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
71
  Register temp = rdi;
72 73
  Register temp2 = noreg;
  LP64_ONLY(temp2 = rscratch1);  // used by MacroAssembler::cmpptr
74 75 76 77 78
  Label L_ok, L_bad;
  BLOCK_COMMENT("verify_klass {");
  __ verify_oop(obj);
  __ testptr(obj, obj);
  __ jcc(Assembler::zero, L_bad);
79 80
  __ push(temp); if (temp2 != noreg)  __ push(temp2);
#define UNPUSH { if (temp2 != noreg)  __ pop(temp2);  __ pop(temp); }
81 82 83 84 85 86 87
  __ load_klass(temp, obj);
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
  intptr_t super_check_offset = klass->super_check_offset();
  __ movptr(temp, Address(temp, super_check_offset));
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
88
  UNPUSH;
89
  __ bind(L_bad);
90
  __ STOP(error_message);
91
  __ BIND(L_ok);
92
  UNPUSH;
93 94
  BLOCK_COMMENT("} verify_klass");
}
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115

void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
  Label L;
  BLOCK_COMMENT("verify_ref_kind {");
  __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
  __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
  __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
  __ cmpl(temp, ref_kind);
  __ jcc(Assembler::equal, L);
  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
    if (ref_kind == JVM_REF_invokeVirtual ||
        ref_kind == JVM_REF_invokeSpecial)
      // could do this for all ref_kinds, but would explode assembly code size
      trace_method_handle(_masm, buf);
    __ STOP(buf);
  }
  BLOCK_COMMENT("} verify_ref_kind");
  __ bind(L);
}

116
#endif //ASSERT
117

118 119 120 121 122 123
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                            bool for_compiler_entry) {
  assert(method == rbx, "interpreter calling convention");
  __ verify_oop(method);

  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
124 125 126 127 128 129 130 131 132 133 134 135 136 137
    Label run_compiled_code;
    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
    // compiled code in threads for which the event is enabled.  Check here for
    // interp_only_mode if these events CAN be enabled.
#ifdef _LP64
    Register rthread = r15_thread;
#else
    Register rthread = temp;
    __ get_thread(rthread);
#endif
    // interp_only is an int, on little endian it is sufficient to test the byte only
    // Is a cmpl faster?
    __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
    __ jccb(Assembler::zero, run_compiled_code);
138
    __ jmp(Address(method, Method::interpreter_entry_offset()));
139 140 141
    __ BIND(run_compiled_code);
  }

142 143
  const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                     Method::from_interpreted_offset();
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
  __ jmp(Address(method, entry_offset));
}

void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
                                        Register recv, Register method_temp,
                                        Register temp2,
                                        bool for_compiler_entry) {
  BLOCK_COMMENT("jump_to_lambda_form {");
  // This is the initial entry point of a lazy method handle.
  // After type checking, it picks up the invoker from the LambdaForm.
  assert_different_registers(recv, method_temp, temp2);
  assert(recv != noreg, "required register");
  assert(method_temp == rbx, "required register for loading method");

  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

  // Load the invoker, as MH -> MH.form -> LF.vmentry
  __ verify_oop(recv);
  __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
  __ verify_oop(method_temp);
  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
  __ verify_oop(method_temp);
166 167
  // the following assumes that a Method* is normally compressed in the vmtarget field:
  __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
168 169 170 171

  if (VerifyMethodHandles && !for_compiler_entry) {
    // make sure recv is already on stack
    __ load_sized_value(temp2,
172
                        Address(method_temp, Method::size_of_parameters_offset()),
173
                        sizeof(u2), /*is_signed*/ false);
174
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
175 176 177 178 179 180
    Label L;
    __ cmpptr(recv, __ argument_address(temp2, -1));
    __ jcc(Assembler::equal, L);
    __ movptr(rax, __ argument_address(temp2, -1));
    __ STOP("receiver not on stack");
    __ BIND(L);
181
  }
182 183 184

  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
  BLOCK_COMMENT("} jump_to_lambda_form");
185 186
}

187

188
// Code generation
189 190 191 192 193 194 195 196 197 198 199 200 201
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
                                                                vmIntrinsics::ID iid) {
  const bool not_for_compiler_entry = false;  // this is the interpreter entry
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  if (iid == vmIntrinsics::_invokeGeneric ||
      iid == vmIntrinsics::_compiledLambdaForm) {
    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
    // They all allow an appendix argument.
    __ hlt();           // empty stubs make SG sick
    return NULL;
  }

202
  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
203
  // rbx: Method*
204 205 206 207 208 209 210 211 212
  // rdx: argument locator (parameter slot count, added to rsp)
  // rcx: used as temp to hold mh or receiver
  // rax, rdi: garbage temps, blown away
  Register rdx_argp   = rdx;   // argument list ptr, live on error paths
  Register rax_temp   = rax;
  Register rcx_mh     = rcx;   // MH receiver; dies quickly and is recycled
  Register rbx_method = rbx;   // eventual target of this invocation

  address code_start = __ pc();
213 214 215 216 217

  // here's where control starts out:
  __ align(CodeEntryAlignment);
  address entry_point = __ pc();

218 219 220
  if (VerifyMethodHandles) {
    Label L;
    BLOCK_COMMENT("verify_intrinsic_id {");
221
    __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
222 223 224 225
    __ jcc(Assembler::equal, L);
    if (iid == vmIntrinsics::_linkToVirtual ||
        iid == vmIntrinsics::_linkToSpecial) {
      // could do this for all kinds, but would explode assembly code size
226
      trace_method_handle(_masm, "bad Method*::intrinsic_id");
227
    }
228
    __ STOP("bad Method*::intrinsic_id");
229 230
    __ bind(L);
    BLOCK_COMMENT("} verify_intrinsic_id");
231 232
  }

233 234 235 236 237 238
  // First task:  Find out how big the argument list is.
  Address rdx_first_arg_addr;
  int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
  assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
  if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
    __ load_sized_value(rdx_argp,
239
                        Address(rbx_method, Method::size_of_parameters_offset()),
240
                        sizeof(u2), /*is_signed*/ false);
241
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
242 243 244 245
    rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
  } else {
    DEBUG_ONLY(rdx_argp = noreg);
  }
246

247 248 249 250
  if (!is_signature_polymorphic_static(iid)) {
    __ movptr(rcx_mh, rdx_first_arg_addr);
    DEBUG_ONLY(rdx_argp = noreg);
  }
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  // rdx_first_arg_addr is live!

  if (TraceMethodHandles) {
    const char* name = vmIntrinsics::name_at(iid);
    if (*name == '_')  name += 1;
    const size_t len = strlen(name) + 50;
    char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
    const char* suffix = "";
    if (vmIntrinsics::method_for(iid) == NULL ||
        !vmIntrinsics::method_for(iid)->access_flags().is_public()) {
      if (is_signature_polymorphic_static(iid))
        suffix = "/static";
      else
        suffix = "/private";
    }
    jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
    // note: stub look for mh in rcx
    trace_method_handle(_masm, qname);
  }
271

272 273
  if (iid == vmIntrinsics::_invokeBasic) {
    generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
274

275 276 277 278 279 280 281 282 283 284 285 286 287 288
  } else {
    // Adjust argument list by popping the trailing MemberName argument.
    Register rcx_recv = noreg;
    if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
      __ movptr(rcx_recv = rcx, rdx_first_arg_addr);
    }
    DEBUG_ONLY(rdx_argp = noreg);
    Register rbx_member = rbx_method;  // MemberName ptr; incoming method ptr is dead now
    __ pop(rax_temp);           // return address
    __ pop(rbx_member);         // extract last argument
    __ push(rax_temp);          // re-push return address
    generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
  }
289

290 291 292 293 294 295
  if (PrintMethodHandleStubs) {
    address code_end = __ pc();
    tty->print_cr("--------");
    tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid));
    Disassembler::decode(code_start, code_end);
    tty->cr();
296 297
  }

298 299 300
  return entry_point;
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
                                                    vmIntrinsics::ID iid,
                                                    Register receiver_reg,
                                                    Register member_reg,
                                                    bool for_compiler_entry) {
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  Register rbx_method = rbx;   // eventual target of this invocation
  // temps used in this code are not used in *either* compiled or interpreted calling sequences
#ifdef _LP64
  Register temp1 = rscratch1;
  Register temp2 = rscratch2;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
  }
#else
  Register temp1 = (for_compiler_entry ? rsi : rdx);
  Register temp2 = rdi;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
    assert_different_registers(temp1,        rcx, rdx);
    assert_different_registers(temp2,        rcx, rdx);
    assert_different_registers(temp3,        rcx, rdx);
328
  }
329 330 331 332 333
#endif
  assert_different_registers(temp1, temp2, temp3, receiver_reg);
  assert_different_registers(temp1, temp2, temp3, member_reg);
  if (!for_compiler_entry)
    assert_different_registers(temp1, temp2, temp3, saved_last_sp_register());  // don't trash lastSP
334

335 336 337
  if (iid == vmIntrinsics::_invokeBasic) {
    // indirect through MH.form.vmentry.vmtarget
    jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
338

339 340 341 342
  } else {
    // The method is a member invoker used by direct method handles.
    if (VerifyMethodHandles) {
      // make sure the trailing argument really is a MemberName (caller responsibility)
343
      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
344 345
                   "MemberName required for invokeVirtual etc.");
    }
346

347 348 349
    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
    Register temp1_recv_klass = temp1;
    if (iid != vmIntrinsics::_linkToStatic) {
      __ verify_oop(receiver_reg);
      if (iid == vmIntrinsics::_linkToSpecial) {
        // Don't actually load the klass; just null-check the receiver.
        __ null_check(receiver_reg);
      } else {
        // load receiver klass itself
        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_oop(temp1_recv_klass);
      }
      BLOCK_COMMENT("check_receiver {");
      // The receiver for the MemberName must be in receiver_reg.
      // Check the receiver against the MemberName.clazz
      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
        // Did not load it above...
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_oop(temp1_recv_klass);
      }
      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
        Label L_ok;
        Register temp2_defc = temp2;
        __ load_heap_oop(temp2_defc, member_clazz);
        load_klass_from_Class(_masm, temp2_defc);
        __ verify_oop(temp2_defc);
        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
        // If we get here, the type check failed!
        __ STOP("receiver class disagrees with MemberName.clazz");
        __ bind(L_ok);
      }
      BLOCK_COMMENT("} check_receiver");
    }
    if (iid == vmIntrinsics::_linkToSpecial ||
        iid == vmIntrinsics::_linkToStatic) {
      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
    }
388

389 390 391 392 393
    // Live registers at this point:
    //  member_reg - MemberName that was the trailing argument
    //  temp1_recv_klass - klass of stacked receiver, if needed
    //  rsi/r13 - interpreter linkage (if interpreted)
    //  rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
394

395 396 397 398 399 400
    bool method_is_live = false;
    switch (iid) {
    case vmIntrinsics::_linkToSpecial:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
      }
401
      __ movptr(rbx_method, member_vmtarget);
402 403
      method_is_live = true;
      break;
404

405 406 407 408
    case vmIntrinsics::_linkToStatic:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
      }
409
      __ movptr(rbx_method, member_vmtarget);
410 411
      method_is_live = true;
      break;
412

413 414 415 416
    case vmIntrinsics::_linkToVirtual:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:
417

418 419 420
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
      }
421

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
      // pick out the vtable index from the MemberName, and then we can discard it:
      Register temp2_index = temp2;
      __ movptr(temp2_index, member_vmindex);

      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpl(temp2_index, 0);
        __ jcc(Assembler::greaterEqual, L_index_ok);
        __ STOP("no virtual index");
        __ BIND(L_index_ok);
      }

      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.

437
      // get target Method* & entry point
438 439 440
      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
      method_is_live = true;
      break;
441 442
    }

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
    case vmIntrinsics::_linkToInterface:
    {
      // same as TemplateTable::invokeinterface
      // (minus the CP setup and profiling, with different argument motion)
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
      }

      Register temp3_intf = temp3;
      __ load_heap_oop(temp3_intf, member_clazz);
      load_klass_from_Class(_masm, temp3_intf);
      __ verify_oop(temp3_intf);

      Register rbx_index = rbx_method;
      __ movptr(rbx_index, member_vmindex);
      if (VerifyMethodHandles) {
        Label L;
        __ cmpl(rbx_index, 0);
        __ jcc(Assembler::greaterEqual, L);
        __ STOP("invalid vtable index for MH.invokeInterface");
        __ bind(L);
      }

      // given intf, index, and recv klass, dispatch to the implementation method
      Label L_no_such_interface;
      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
                                 // note: next two args must be the same:
                                 rbx_index, rbx_method,
                                 temp2,
                                 L_no_such_interface);

      __ verify_oop(rbx_method);
      jump_from_method_handle(_masm, rbx_method, temp2, for_compiler_entry);
      __ hlt();

      __ bind(L_no_such_interface);
      __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
      break;
481 482
    }

483 484 485
    default:
      fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
      break;
486 487
    }

488 489
    if (method_is_live) {
      // live at this point:  rbx_method, rsi/r13 (if interpreted)
490

491 492 493 494 495
      // After figuring out which concrete method to call, jump into it.
      // Note that this works in the interpreter with no data motion.
      // But the compiled version will require that rcx_recv be shifted out.
      __ verify_oop(rbx_method);
      jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
496 497 498 499
    }
  }
}

500
#ifndef PRODUCT
501
void trace_method_handle_stub(const char* adaptername,
502
                              oop mh,
503
                              intptr_t* saved_regs,
504
                              intptr_t* entry_sp) {
505
  // called as a leaf from native code: do not block the JVM!
506 507
  bool has_mh = (strstr(adaptername, "/static") == NULL &&
                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
508
  const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
509 510 511
  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
                adaptername, mh_reg_name,
                mh, entry_sp);
512

513
  if (Verbose) {
514 515 516 517 518 519 520
    tty->print_cr("Registers:");
    const int saved_regs_count = RegisterImpl::number_of_registers;
    for (int i = 0; i < saved_regs_count; i++) {
      Register r = as_Register(i);
      // The registers are stored in reverse order on the stack (by pusha).
      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
      if ((i + 1) % 4 == 0) {
521
        tty->cr();
522 523
      } else {
        tty->print(", ");
524 525 526
      }
    }
    tty->cr();
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574

    {
     // dumping last frame with frame::describe

      JavaThread* p = JavaThread::active();

      ResourceMark rm;
      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
      FrameValues values;

      // Note: We want to allow trace_method_handle from any call site.
      // While trace_method_handle creates a frame, it may be entered
      // without a PC on the stack top (e.g. not just after a call).
      // Walking that frame could lead to failures due to that invalid PC.
      // => carefully detect that frame when doing the stack walking

      // Current C frame
      frame cur_frame = os::current_frame();

      // Robust search of trace_calling_frame (independant of inlining).
      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
      while (trace_calling_frame.fp() < saved_regs) {
        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
      }

      // safely create a frame and call frame::describe
      intptr_t *dump_sp = trace_calling_frame.sender_sp();
      intptr_t *dump_fp = trace_calling_frame.link();

      bool walkable = has_mh; // whether the traced frame shoud be walkable

      if (walkable) {
        // The previous definition of walkable may have to be refined
        // if new call sites cause the next frame constructor to start
        // failing. Alternatively, frame constructors could be
        // modified to support the current or future non walkable
        // frames (but this is more intrusive and is not considered as
        // part of this RFE, which will instead use a simpler output).
        frame dump_frame = frame(dump_sp, dump_fp);
        dump_frame.describe(values, 1);
      } else {
        // Stack may not be walkable (invalid PC above FP):
        // Add descriptions without building a Java frame to avoid issues
        values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
        values.describe(-1, dump_sp, "sp for #1");
      }
575
      values.describe(-1, entry_sp, "raw top of stack");
576

577
      tty->print_cr("Stack layout:");
578
      values.print(p);
579
    }
580 581 582 583 584 585 586
    if (has_mh && mh->is_oop()) {
      mh->print();
      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
          java_lang_invoke_MethodHandle::form(mh)->print();
      }
    }
587 588
  }
}
589 590 591 592 593 594 595 596 597 598 599 600 601 602

// The stub wraps the arguments in a struct on the stack to avoid
// dealing with the different calling conventions for passing 6
// arguments.
struct MethodHandleStubArguments {
  const char* adaptername;
  oopDesc* mh;
  intptr_t* saved_regs;
  intptr_t* entry_sp;
};
void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
  trace_method_handle_stub(args->adaptername,
                           args->mh,
                           args->saved_regs,
603
                           args->entry_sp);
604 605
}

606 607 608
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
  if (!TraceMethodHandles)  return;
  BLOCK_COMMENT("trace_method_handle {");
609
  __ enter();
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
  __ andptr(rsp, -16); // align stack if needed for FPU state
  __ pusha();
  __ mov(rbx, rsp); // for retreiving saved_regs
  // Note: saved_regs must be in the entered frame for the
  // robust stack walking implemented in trace_method_handle_stub.

  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
  __ increment(rsp, -2 * wordSize);
  if  (UseSSE >= 2) {
    __ movdbl(Address(rsp, 0), xmm0);
  } else if (UseSSE == 1) {
    __ movflt(Address(rsp, 0), xmm0);
  } else {
    __ fst_d(Address(rsp, 0));
  }

626
  // Incoming state:
627
  // rcx: method handle
628 629 630
  //
  // To avoid calling convention issues, build a record on the stack
  // and pass the pointer to that instead.
631
  __ push(rbp);               // entry_sp (with extra align space)
632 633
  __ push(rbx);               // pusha saved_regs
  __ push(rcx);               // mh
634
  __ push(rcx);               // slot for adaptername
635 636
  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
637
  __ increment(rsp, sizeof(MethodHandleStubArguments));
638 639 640 641 642 643 644 645 646 647

  if  (UseSSE >= 2) {
    __ movdbl(xmm0, Address(rsp, 0));
  } else if (UseSSE == 1) {
    __ movflt(xmm0, Address(rsp, 0));
  } else {
    __ fld_d(Address(rsp, 0));
  }
  __ increment(rsp, 2 * wordSize);

648
  __ popa();
649
  __ leave();
650
  BLOCK_COMMENT("} trace_method_handle");
651 652
}
#endif //PRODUCT