sharedRuntime_x86_32.cpp 111.5 KB
Newer Older
D
duke 已提交
1
/*
X
xdono 已提交
2
 * Copyright 2003-2008 Sun Microsystems, Inc.  All Rights Reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 * CA 95054 USA or visit www.sun.com if you need additional information or
 * have any questions.
 *
 */

#include "incls/_precompiled.incl"
#include "incls/_sharedRuntime_x86_32.cpp.incl"

#define __ masm->
#ifdef COMPILER2
UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
#endif // COMPILER2

DeoptimizationBlob *SharedRuntime::_deopt_blob;
SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
RuntimeStub*       SharedRuntime::_wrong_method_blob;
RuntimeStub*       SharedRuntime::_ic_miss_blob;
RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
RuntimeStub*       SharedRuntime::_resolve_static_call_blob;

class RegisterSaver {
  enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ };
  // Capture info about frame layout
  enum layout {
                fpu_state_off = 0,
                fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
                st0_off, st0H_off,
                st1_off, st1H_off,
                st2_off, st2H_off,
                st3_off, st3H_off,
                st4_off, st4H_off,
                st5_off, st5H_off,
                st6_off, st6H_off,
                st7_off, st7H_off,

                xmm0_off, xmm0H_off,
                xmm1_off, xmm1H_off,
                xmm2_off, xmm2H_off,
                xmm3_off, xmm3H_off,
                xmm4_off, xmm4H_off,
                xmm5_off, xmm5H_off,
                xmm6_off, xmm6H_off,
                xmm7_off, xmm7H_off,
                flags_off,
                rdi_off,
                rsi_off,
                ignore_off,  // extra copy of rbp,
                rsp_off,
                rbx_off,
                rdx_off,
                rcx_off,
                rax_off,
                // The frame sender code expects that rbp will be in the "natural" place and
                // will override any oopMap setting for it. We must therefore force the layout
                // so that it agrees with the frame sender code.
                rbp_off,
                return_off,      // slot for return address
                reg_save_size };


  public:

  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
                                     int* total_frame_words, bool verify_fpu = true);
  static void restore_live_registers(MacroAssembler* masm);

  static int rax_offset() { return rax_off; }
  static int rbx_offset() { return rbx_off; }

  // Offsets into the register save area
  // Used by deoptimization when it is managing result register
  // values on its own

  static int raxOffset(void) { return rax_off; }
  static int rdxOffset(void) { return rdx_off; }
  static int rbxOffset(void) { return rbx_off; }
  static int xmm0Offset(void) { return xmm0_off; }
  // This really returns a slot in the fp save area, which one is not important
  static int fpResultOffset(void) { return st0_off; }

  // During deoptimization only the result register need to be restored
  // all the other values have already been extracted.

  static void restore_result_registers(MacroAssembler* masm);

};

OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
                                           int* total_frame_words, bool verify_fpu) {

  int frame_size_in_bytes =  (reg_save_size + additional_frame_words) * wordSize;
  int frame_words = frame_size_in_bytes / wordSize;
  *total_frame_words = frame_words;

  assert(FPUStateSizeInWords == 27, "update stack layout");

  // save registers, fpu state, and flags
  // We assume caller has already has return address slot on the stack
  // We push epb twice in this sequence because we want the real rbp,
121
  // to be under the return like a normal enter and we want to use pusha
D
duke 已提交
122 123
  // We push by hand instead of pusing push
  __ enter();
124 125 126
  __ pusha();
  __ pushf();
  __ subptr(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space
D
duke 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
  __ push_FPU_state();          // Save FPU state & init

  if (verify_fpu) {
    // Some stubs may have non standard FPU control word settings so
    // only check and reset the value when it required to be the
    // standard value.  The safepoint blob in particular can be used
    // in methods which are using the 24 bit control word for
    // optimized float math.

#ifdef ASSERT
    // Make sure the control word has the expected value
    Label ok;
    __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
    __ jccb(Assembler::equal, ok);
    __ stop("corrupted control word detected");
    __ bind(ok);
#endif

    // Reset the control word to guard against exceptions being unmasked
    // since fstp_d can cause FPU stack underflow exceptions.  Write it
    // into the on stack copy and then reload that to make sure that the
    // current and future values are correct.
    __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
  }

  __ frstor(Address(rsp, 0));
  if (!verify_fpu) {
    // Set the control word so that exceptions are masked for the
    // following code.
    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
  }

  // Save the FPU registers in de-opt-able form

  __ fstp_d(Address(rsp, st0_off*wordSize)); // st(0)
  __ fstp_d(Address(rsp, st1_off*wordSize)); // st(1)
  __ fstp_d(Address(rsp, st2_off*wordSize)); // st(2)
  __ fstp_d(Address(rsp, st3_off*wordSize)); // st(3)
  __ fstp_d(Address(rsp, st4_off*wordSize)); // st(4)
  __ fstp_d(Address(rsp, st5_off*wordSize)); // st(5)
  __ fstp_d(Address(rsp, st6_off*wordSize)); // st(6)
  __ fstp_d(Address(rsp, st7_off*wordSize)); // st(7)

  if( UseSSE == 1 ) {           // Save the XMM state
    __ movflt(Address(rsp,xmm0_off*wordSize),xmm0);
    __ movflt(Address(rsp,xmm1_off*wordSize),xmm1);
    __ movflt(Address(rsp,xmm2_off*wordSize),xmm2);
    __ movflt(Address(rsp,xmm3_off*wordSize),xmm3);
    __ movflt(Address(rsp,xmm4_off*wordSize),xmm4);
    __ movflt(Address(rsp,xmm5_off*wordSize),xmm5);
    __ movflt(Address(rsp,xmm6_off*wordSize),xmm6);
    __ movflt(Address(rsp,xmm7_off*wordSize),xmm7);
  } else if( UseSSE >= 2 ) {
    __ movdbl(Address(rsp,xmm0_off*wordSize),xmm0);
    __ movdbl(Address(rsp,xmm1_off*wordSize),xmm1);
    __ movdbl(Address(rsp,xmm2_off*wordSize),xmm2);
    __ movdbl(Address(rsp,xmm3_off*wordSize),xmm3);
    __ movdbl(Address(rsp,xmm4_off*wordSize),xmm4);
    __ movdbl(Address(rsp,xmm5_off*wordSize),xmm5);
    __ movdbl(Address(rsp,xmm6_off*wordSize),xmm6);
    __ movdbl(Address(rsp,xmm7_off*wordSize),xmm7);
  }

  // Set an oopmap for the call site.  This oopmap will map all
  // oop-registers and debug-info registers as callee-saved.  This
  // will allow deoptimization at this safepoint to find all possible
  // debug-info recordings, as well as let GC find all oops.

  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map =  new OopMap( frame_words, 0 );

#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)

  map->set_callee_saved(STACK_OFFSET( rax_off), rax->as_VMReg());
  map->set_callee_saved(STACK_OFFSET( rcx_off), rcx->as_VMReg());
  map->set_callee_saved(STACK_OFFSET( rdx_off), rdx->as_VMReg());
  map->set_callee_saved(STACK_OFFSET( rbx_off), rbx->as_VMReg());
  // rbp, location is known implicitly, no oopMap
  map->set_callee_saved(STACK_OFFSET( rsi_off), rsi->as_VMReg());
  map->set_callee_saved(STACK_OFFSET( rdi_off), rdi->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st0_off), as_FloatRegister(0)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st1_off), as_FloatRegister(1)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st2_off), as_FloatRegister(2)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st3_off), as_FloatRegister(3)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st4_off), as_FloatRegister(4)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st5_off), as_FloatRegister(5)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st6_off), as_FloatRegister(6)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(st7_off), as_FloatRegister(7)->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm0_off), xmm0->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm1_off), xmm1->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm2_off), xmm2->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm3_off), xmm3->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm4_off), xmm4->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm5_off), xmm5->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm6_off), xmm6->as_VMReg());
  map->set_callee_saved(STACK_OFFSET(xmm7_off), xmm7->as_VMReg());
  // %%% This is really a waste but we'll keep things as they were for now
  if (true) {
#define NEXTREG(x) (x)->as_VMReg()->next()
    map->set_callee_saved(STACK_OFFSET(st0H_off), NEXTREG(as_FloatRegister(0)));
    map->set_callee_saved(STACK_OFFSET(st1H_off), NEXTREG(as_FloatRegister(1)));
    map->set_callee_saved(STACK_OFFSET(st2H_off), NEXTREG(as_FloatRegister(2)));
    map->set_callee_saved(STACK_OFFSET(st3H_off), NEXTREG(as_FloatRegister(3)));
    map->set_callee_saved(STACK_OFFSET(st4H_off), NEXTREG(as_FloatRegister(4)));
    map->set_callee_saved(STACK_OFFSET(st5H_off), NEXTREG(as_FloatRegister(5)));
    map->set_callee_saved(STACK_OFFSET(st6H_off), NEXTREG(as_FloatRegister(6)));
    map->set_callee_saved(STACK_OFFSET(st7H_off), NEXTREG(as_FloatRegister(7)));
    map->set_callee_saved(STACK_OFFSET(xmm0H_off), NEXTREG(xmm0));
    map->set_callee_saved(STACK_OFFSET(xmm1H_off), NEXTREG(xmm1));
    map->set_callee_saved(STACK_OFFSET(xmm2H_off), NEXTREG(xmm2));
    map->set_callee_saved(STACK_OFFSET(xmm3H_off), NEXTREG(xmm3));
    map->set_callee_saved(STACK_OFFSET(xmm4H_off), NEXTREG(xmm4));
    map->set_callee_saved(STACK_OFFSET(xmm5H_off), NEXTREG(xmm5));
    map->set_callee_saved(STACK_OFFSET(xmm6H_off), NEXTREG(xmm6));
    map->set_callee_saved(STACK_OFFSET(xmm7H_off), NEXTREG(xmm7));
#undef NEXTREG
#undef STACK_OFFSET
  }

  return map;

}

void RegisterSaver::restore_live_registers(MacroAssembler* masm) {

  // Recover XMM & FPU state
  if( UseSSE == 1 ) {
    __ movflt(xmm0,Address(rsp,xmm0_off*wordSize));
    __ movflt(xmm1,Address(rsp,xmm1_off*wordSize));
    __ movflt(xmm2,Address(rsp,xmm2_off*wordSize));
    __ movflt(xmm3,Address(rsp,xmm3_off*wordSize));
    __ movflt(xmm4,Address(rsp,xmm4_off*wordSize));
    __ movflt(xmm5,Address(rsp,xmm5_off*wordSize));
    __ movflt(xmm6,Address(rsp,xmm6_off*wordSize));
    __ movflt(xmm7,Address(rsp,xmm7_off*wordSize));
  } else if( UseSSE >= 2 ) {
    __ movdbl(xmm0,Address(rsp,xmm0_off*wordSize));
    __ movdbl(xmm1,Address(rsp,xmm1_off*wordSize));
    __ movdbl(xmm2,Address(rsp,xmm2_off*wordSize));
    __ movdbl(xmm3,Address(rsp,xmm3_off*wordSize));
    __ movdbl(xmm4,Address(rsp,xmm4_off*wordSize));
    __ movdbl(xmm5,Address(rsp,xmm5_off*wordSize));
    __ movdbl(xmm6,Address(rsp,xmm6_off*wordSize));
    __ movdbl(xmm7,Address(rsp,xmm7_off*wordSize));
  }
  __ pop_FPU_state();
273
  __ addptr(rsp, FPU_regs_live*sizeof(jdouble)); // Pop FPU registers
D
duke 已提交
274

275 276
  __ popf();
  __ popa();
D
duke 已提交
277
  // Get the rbp, described implicitly by the frame sender code (no oopMap)
278
  __ pop(rbp);
D
duke 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

}

void RegisterSaver::restore_result_registers(MacroAssembler* masm) {

  // Just restore result register. Only used by deoptimization. By
  // now any callee save register that needs to be restore to a c2
  // caller of the deoptee has been extracted into the vframeArray
  // and will be stuffed into the c2i adapter we create for later
  // restoration so only result registers need to be restored here.
  //

  __ frstor(Address(rsp, 0));      // Restore fpu state

  // Recover XMM & FPU state
  if( UseSSE == 1 ) {
    __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
  } else if( UseSSE >= 2 ) {
    __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
  }
299 300
  __ movptr(rax, Address(rsp, rax_off*wordSize));
  __ movptr(rdx, Address(rsp, rdx_off*wordSize));
D
duke 已提交
301
  // Pop all of the register save are off the stack except the return address
302
  __ addptr(rsp, return_off * wordSize);
D
duke 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
}

// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
// the following value.
static int reg2offset_in(VMReg r) {
  // Account for saved rbp, and return address
  // This should really be in_preserve_stack_slots
  return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
}

static int reg2offset_out(VMReg r) {
  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}

// ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the
// arguments should go.  Values in the VMRegPair regs array refer to 4-byte
// quantities.  Values less than SharedInfo::stack0 are registers, those above
// refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
// as framesizes are fixed.
// VMRegImpl::stack0 refers to the first slot 0(sp).
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
// up to RegisterImpl::number_of_registers) are the 32-bit
// integer registers.

// Pass first two oop/int args in registers ECX and EDX.
// Pass first two float/double args in registers XMM0 and XMM1.
// Doubles have precedence, so if you pass a mix of floats and doubles
// the doubles will grab the registers before the floats will.

// Note: the INPUTS in sig_bt are in units of Java argument words, which are
// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
// units regardless of build. Of course for i486 there is no 64 bit build


// ---------------------------------------------------------------------------
// The compiled Java calling convention.
// Pass first two oop/int args in registers ECX and EDX.
// Pass first two float/double args in registers XMM0 and XMM1.
// Doubles have precedence, so if you pass a mix of floats and doubles
// the doubles will grab the registers before the floats will.
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                           VMRegPair *regs,
                                           int total_args_passed,
                                           int is_outgoing) {
  uint    stack = 0;          // Starting stack position for args on stack


  // Pass first two oop/int args in registers ECX and EDX.
  uint reg_arg0 = 9999;
  uint reg_arg1 = 9999;

  // Pass first two float/double args in registers XMM0 and XMM1.
  // Doubles have precedence, so if you pass a mix of floats and doubles
  // the doubles will grab the registers before the floats will.
  // CNC - TURNED OFF FOR non-SSE.
  //       On Intel we have to round all doubles (and most floats) at
  //       call sites by storing to the stack in any case.
  // UseSSE=0 ==> Don't Use ==> 9999+0
  // UseSSE=1 ==> Floats only ==> 9999+1
  // UseSSE>=2 ==> Floats or doubles ==> 9999+2
  enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
  uint fargs = (UseSSE>=2) ? 2 : UseSSE;
  uint freg_arg0 = 9999+fargs;
  uint freg_arg1 = 9999+fargs;

  // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
  int i;
  for( i = 0; i < total_args_passed; i++) {
    if( sig_bt[i] == T_DOUBLE ) {
      // first 2 doubles go in registers
      if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
      else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
      else // Else double is passed low on the stack to be aligned.
        stack += 2;
    } else if( sig_bt[i] == T_LONG ) {
      stack += 2;
    }
  }
  int dstack = 0;             // Separate counter for placing doubles

  // Now pick where all else goes.
  for( i = 0; i < total_args_passed; i++) {
    // From the type and the argument number (count) compute the location
    switch( sig_bt[i] ) {
    case T_SHORT:
    case T_CHAR:
    case T_BYTE:
    case T_BOOLEAN:
    case T_INT:
    case T_ARRAY:
    case T_OBJECT:
    case T_ADDRESS:
      if( reg_arg0 == 9999 )  {
        reg_arg0 = i;
        regs[i].set1(rcx->as_VMReg());
      } else if( reg_arg1 == 9999 )  {
        reg_arg1 = i;
        regs[i].set1(rdx->as_VMReg());
      } else {
        regs[i].set1(VMRegImpl::stack2reg(stack++));
      }
      break;
    case T_FLOAT:
      if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
        freg_arg0 = i;
        regs[i].set1(xmm0->as_VMReg());
      } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
        freg_arg1 = i;
        regs[i].set1(xmm1->as_VMReg());
      } else {
        regs[i].set1(VMRegImpl::stack2reg(stack++));
      }
      break;
    case T_LONG:
      assert(sig_bt[i+1] == T_VOID, "missing Half" );
      regs[i].set2(VMRegImpl::stack2reg(dstack));
      dstack += 2;
      break;
    case T_DOUBLE:
      assert(sig_bt[i+1] == T_VOID, "missing Half" );
      if( freg_arg0 == (uint)i ) {
        regs[i].set2(xmm0->as_VMReg());
      } else if( freg_arg1 == (uint)i ) {
        regs[i].set2(xmm1->as_VMReg());
      } else {
        regs[i].set2(VMRegImpl::stack2reg(dstack));
        dstack += 2;
      }
      break;
    case T_VOID: regs[i].set_bad(); break;
      break;
    default:
      ShouldNotReachHere();
      break;
    }
  }

  // return value can be odd number of VMRegImpl stack slots make multiple of 2
  return round_to(stack, 2);
}

// Patch the callers callsite with entry to compiled code if it exists.
static void patch_callers_callsite(MacroAssembler *masm) {
  Label L;
  __ verify_oop(rbx);
451
  __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
D
duke 已提交
452 453 454 455
  __ jcc(Assembler::equal, L);
  // Schedule the branch target address early.
  // Call into the VM to patch the caller, then jump to compiled callee
  // rax, isn't live so capture return address while we easily can
456 457 458
  __ movptr(rax, Address(rsp, 0));
  __ pusha();
  __ pushf();
D
duke 已提交
459 460

  if (UseSSE == 1) {
461
    __ subptr(rsp, 2*wordSize);
D
duke 已提交
462 463 464 465
    __ movflt(Address(rsp, 0), xmm0);
    __ movflt(Address(rsp, wordSize), xmm1);
  }
  if (UseSSE >= 2) {
466
    __ subptr(rsp, 4*wordSize);
D
duke 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479
    __ movdbl(Address(rsp, 0), xmm0);
    __ movdbl(Address(rsp, 2*wordSize), xmm1);
  }
#ifdef COMPILER2
  // C2 may leave the stack dirty if not in SSE2+ mode
  if (UseSSE >= 2) {
    __ verify_FPU(0, "c2i transition should have clean FPU stack");
  } else {
    __ empty_FPU_stack();
  }
#endif /* COMPILER2 */

  // VM needs caller's callsite
480
  __ push(rax);
D
duke 已提交
481
  // VM needs target method
482
  __ push(rbx);
D
duke 已提交
483 484
  __ verify_oop(rbx);
  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
485
  __ addptr(rsp, 2*wordSize);
D
duke 已提交
486 487 488 489

  if (UseSSE == 1) {
    __ movflt(xmm0, Address(rsp, 0));
    __ movflt(xmm1, Address(rsp, wordSize));
490
    __ addptr(rsp, 2*wordSize);
D
duke 已提交
491 492 493 494
  }
  if (UseSSE >= 2) {
    __ movdbl(xmm0, Address(rsp, 0));
    __ movdbl(xmm1, Address(rsp, 2*wordSize));
495
    __ addptr(rsp, 4*wordSize);
D
duke 已提交
496 497
  }

498 499
  __ popf();
  __ popa();
D
duke 已提交
500 501 502 503 504 505 506 507 508
  __ bind(L);
}


// Helper function to put tags in interpreter stack.
static void  tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
  if (TaggedStackInterpreter) {
    int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
    if (sig == T_OBJECT || sig == T_ARRAY) {
509
      __ movptr(Address(rsp, tag_offset), frame::TagReference);
D
duke 已提交
510 511
    } else if (sig == T_LONG || sig == T_DOUBLE) {
      int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
512 513
      __ movptr(Address(rsp, next_tag_offset), frame::TagValue);
      __ movptr(Address(rsp, tag_offset), frame::TagValue);
D
duke 已提交
514
    } else {
515
      __ movptr(Address(rsp, tag_offset), frame::TagValue);
D
duke 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
    }
  }
}

// Double and long values with Tagged stacks are not contiguous.
static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
  int next_off = st_off - Interpreter::stackElementSize();
  if (TaggedStackInterpreter) {
   __ movdbl(Address(rsp, next_off), r);
   // Move top half up and put tag in the middle.
   __ movl(rdi, Address(rsp, next_off+wordSize));
   __ movl(Address(rsp, st_off), rdi);
   tag_stack(masm, T_DOUBLE, next_off);
  } else {
   __ movdbl(Address(rsp, next_off), r);
  }
}

static void gen_c2i_adapter(MacroAssembler *masm,
                            int total_args_passed,
                            int comp_args_on_stack,
                            const BasicType *sig_bt,
                            const VMRegPair *regs,
                            Label& skip_fixup) {
  // Before we get into the guts of the C2I adapter, see if we should be here
  // at all.  We've come from compiled code and are attempting to jump to the
  // interpreter, which means the caller made a static call to get here
  // (vcalls always get a compiled target if there is one).  Check for a
  // compiled target.  If there is one, we need to patch the caller's call.
  patch_callers_callsite(masm);

  __ bind(skip_fixup);

#ifdef COMPILER2
  // C2 may leave the stack dirty if not in SSE2+ mode
  if (UseSSE >= 2) {
    __ verify_FPU(0, "c2i transition should have clean FPU stack");
  } else {
    __ empty_FPU_stack();
  }
#endif /* COMPILER2 */

  // Since all args are passed on the stack, total_args_passed * interpreter_
  // stack_element_size  is the
  // space we need.
  int extraspace = total_args_passed * Interpreter::stackElementSize();

  // Get return address
564
  __ pop(rax);
D
duke 已提交
565 566

  // set senderSP value
567
  __ movptr(rsi, rsp);
D
duke 已提交
568

569
  __ subptr(rsp, extraspace);
D
duke 已提交
570 571 572 573 574 575 576 577 578 579

  // Now write the args into the outgoing interpreter space
  for (int i = 0; i < total_args_passed; i++) {
    if (sig_bt[i] == T_VOID) {
      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
      continue;
    }

    // st_off points to lowest address on stack.
    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
580 581
    int next_off = st_off - Interpreter::stackElementSize();

D
duke 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
    // Say 4 args:
    // i   st_off
    // 0   12 T_LONG
    // 1    8 T_VOID
    // 2    4 T_OBJECT
    // 3    0 T_BOOL
    VMReg r_1 = regs[i].first();
    VMReg r_2 = regs[i].second();
    if (!r_1->is_valid()) {
      assert(!r_2->is_valid(), "");
      continue;
    }

    if (r_1->is_stack()) {
      // memory to memory use fpu stack top
      int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;

      if (!r_2->is_valid()) {
        __ movl(rdi, Address(rsp, ld_off));
601
        __ movptr(Address(rsp, st_off), rdi);
D
duke 已提交
602 603 604 605 606 607
        tag_stack(masm, sig_bt[i], st_off);
      } else {

        // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
        // st_off == MSW, st_off-wordSize == LSW

608 609 610 611 612 613 614 615 616 617 618 619
        __ movptr(rdi, Address(rsp, ld_off));
        __ movptr(Address(rsp, next_off), rdi);
#ifndef _LP64
        __ movptr(rdi, Address(rsp, ld_off + wordSize));
        __ movptr(Address(rsp, st_off), rdi);
#else
#ifdef ASSERT
        // Overwrite the unused slot with known junk
        __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
        __ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
#endif // _LP64
D
duke 已提交
620 621 622 623 624 625 626 627 628
        tag_stack(masm, sig_bt[i], next_off);
      }
    } else if (r_1->is_Register()) {
      Register r = r_1->as_Register();
      if (!r_2->is_valid()) {
        __ movl(Address(rsp, st_off), r);
        tag_stack(masm, sig_bt[i], st_off);
      } else {
        // long/double in gpr
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
        NOT_LP64(ShouldNotReachHere());
        // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
        // T_DOUBLE and T_LONG use two slots in the interpreter
        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
          // long/double in gpr
#ifdef ASSERT
          // Overwrite the unused slot with known junk
          LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
          __ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
          __ movptr(Address(rsp, next_off), r);
          tag_stack(masm, sig_bt[i], next_off);
        } else {
          __ movptr(Address(rsp, st_off), r);
          tag_stack(masm, sig_bt[i], st_off);
        }
D
duke 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658
      }
    } else {
      assert(r_1->is_XMMRegister(), "");
      if (!r_2->is_valid()) {
        __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
        tag_stack(masm, sig_bt[i], st_off);
      } else {
        assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
        move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
      }
    }
  }

  // Schedule the branch target address early.
659
  __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
D
duke 已提交
660
  // And repush original return address
661
  __ push(rax);
D
duke 已提交
662 663 664 665 666 667 668 669 670 671
  __ jmp(rcx);
}


// For tagged stacks, double or long value aren't contiguous on the stack
// so get them contiguous for the xmm load
static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
  int next_val_off = ld_off - Interpreter::stackElementSize();
  if (TaggedStackInterpreter) {
    // use tag slot temporarily for MSW
672 673
    __ movptr(rsi, Address(saved_sp, ld_off));
    __ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
D
duke 已提交
674 675
    __ movdbl(r, Address(saved_sp, next_val_off));
    // restore tag
676
    __ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
D
duke 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
  } else {
    __ movdbl(r, Address(saved_sp, next_val_off));
  }
}

static void gen_i2c_adapter(MacroAssembler *masm,
                            int total_args_passed,
                            int comp_args_on_stack,
                            const BasicType *sig_bt,
                            const VMRegPair *regs) {
  // we're being called from the interpreter but need to find the
  // compiled return entry point.  The return address on the stack
  // should point at it and we just need to pull the old value out.
  // load up the pointer to the compiled return entry point and
  // rewrite our return pc. The code is arranged like so:
  //
  // .word Interpreter::return_sentinel
  // .word address_of_compiled_return_point
  // return_entry_point: blah_blah_blah
  //
  // So we can find the appropriate return point by loading up the word
  // just prior to the current return address we have on the stack.
  //
  // We will only enter here from an interpreted frame and never from after
  // passing thru a c2i. Azul allowed this but we do not. If we lose the
  // race and use a c2i we will remain interpreted for the race loser(s).
  // This removes all sorts of headaches on the x86 side and also eliminates
  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.


  // Note: rsi contains the senderSP on entry. We must preserve it since
  // we may do a i2c -> c2i transition if we lose a race where compiled
  // code goes non-entrant while we get args ready.

  // Pick up the return address
712
  __ movptr(rax, Address(rsp, 0));
D
duke 已提交
713 714 715 716 717 718 719 720 721 722

  // If UseSSE >= 2 then no cleanup is needed on the return to the
  // interpreter so skip fixing up the return entry point unless
  // VerifyFPU is enabled.
  if (UseSSE < 2 || VerifyFPU) {
    Label skip, chk_int;
    // If we were called from the call stub we need to do a little bit different
    // cleanup than if the interpreter returned to the call stub.

    ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address);
723
    __ cmpptr(rax, stub_return_address.addr());
D
duke 已提交
724
    __ jcc(Assembler::notEqual, chk_int);
725 726
    assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
    __ lea(rax, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
D
duke 已提交
727 728 729 730 731 732 733 734
    __ jmp(skip);

    // It must be the interpreter since we never get here via a c2i (unlike Azul)

    __ bind(chk_int);
#ifdef ASSERT
    {
      Label ok;
735
      __ cmpl(Address(rax, -2*wordSize), Interpreter::return_sentinel);
D
duke 已提交
736 737 738 739 740
      __ jcc(Assembler::equal, ok);
      __ int3();
      __ bind(ok);
    }
#endif // ASSERT
741
    __ movptr(rax, Address(rax, -wordSize));
D
duke 已提交
742 743 744 745 746 747 748 749
    __ bind(skip);
  }

  // rax, now contains the compiled return entry point which will do an
  // cleanup needed for the return from compiled to interpreted.

  // Must preserve original SP for loading incoming arguments because
  // we need to align the outgoing SP for compiled code.
750
  __ movptr(rdi, rsp);
D
duke 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763

  // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
  // in registers, we will occasionally have no stack args.
  int comp_words_on_stack = 0;
  if (comp_args_on_stack) {
    // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
    // registers are below.  By subtracting stack0, we either get a negative
    // number (all values in registers) or the maximum stack slot accessed.
    // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
    // Convert 4-byte stack slots to words.
    comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
    // Round up to miminum stack alignment, in wordSize
    comp_words_on_stack = round_to(comp_words_on_stack, 2);
764
    __ subptr(rsp, comp_words_on_stack * wordSize);
D
duke 已提交
765 766 767
  }

  // Align the outgoing SP
768
  __ andptr(rsp, -(StackAlignmentInBytes));
D
duke 已提交
769 770 771

  // push the return address on the stack (note that pushing, rather
  // than storing it, yields the correct frame alignment for the callee)
772
  __ push(rax);
D
duke 已提交
773 774 775

  // Put saved SP in another register
  const Register saved_sp = rax;
776
  __ movptr(saved_sp, rdi);
D
duke 已提交
777 778 779 780


  // Will jump to the compiled code just as if compiled code was doing it.
  // Pre-load the register-jump target early, to schedule it better.
781
  __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
D
duke 已提交
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820

  // Now generate the shuffle code.  Pick up all register args and move the
  // rest through the floating point stack top.
  for (int i = 0; i < total_args_passed; i++) {
    if (sig_bt[i] == T_VOID) {
      // Longs and doubles are passed in native word order, but misaligned
      // in the 32-bit build.
      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
      continue;
    }

    // Pick up 0, 1 or 2 words from SP+offset.

    assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
            "scrambled load targets?");
    // Load in argument order going down.
    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
    // Point to interpreter value (vs. tag)
    int next_off = ld_off - Interpreter::stackElementSize();
    //
    //
    //
    VMReg r_1 = regs[i].first();
    VMReg r_2 = regs[i].second();
    if (!r_1->is_valid()) {
      assert(!r_2->is_valid(), "");
      continue;
    }
    if (r_1->is_stack()) {
      // Convert stack slot to an SP offset (+ wordSize to account for return address )
      int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;

      // We can use rsi as a temp here because compiled code doesn't need rsi as an input
      // and if we end up going thru a c2i because of a miss a reasonable value of rsi
      // we be generated.
      if (!r_2->is_valid()) {
        // __ fld_s(Address(saved_sp, ld_off));
        // __ fstp_s(Address(rsp, st_off));
        __ movl(rsi, Address(saved_sp, ld_off));
821
        __ movptr(Address(rsp, st_off), rsi);
D
duke 已提交
822 823 824 825 826 827 828 829
      } else {
        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
        // are accessed as negative so LSW is at LOW address

        // ld_off is MSW so get LSW
        // st_off is LSW (i.e. reg.first())
        // __ fld_d(Address(saved_sp, next_off));
        // __ fstp_d(Address(rsp, st_off));
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
        //
        // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
        // So we must adjust where to pick up the data to match the interpreter.
        //
        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
        // are accessed as negative so LSW is at LOW address

        // ld_off is MSW so get LSW
        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
                           next_off : ld_off;
        __ movptr(rsi, Address(saved_sp, offset));
        __ movptr(Address(rsp, st_off), rsi);
#ifndef _LP64
        __ movptr(rsi, Address(saved_sp, ld_off));
        __ movptr(Address(rsp, st_off + wordSize), rsi);
#endif // _LP64
D
duke 已提交
847 848 849 850 851
      }
    } else if (r_1->is_Register()) {  // Register argument
      Register r = r_1->as_Register();
      assert(r != rax, "must be different");
      if (r_2->is_valid()) {
852 853 854 855 856 857 858 859 860 861 862
        //
        // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
        // So we must adjust where to pick up the data to match the interpreter.

        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
                           next_off : ld_off;

        // this can be a misaligned move
        __ movptr(r, Address(saved_sp, offset));
#ifndef _LP64
D
duke 已提交
863 864 865
        assert(r_2->as_Register() != rax, "need another temporary register");
        // Remember r_1 is low address (and LSB on x86)
        // So r_2 gets loaded from high address regardless of the platform
866 867
        __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
#endif // _LP64
D
duke 已提交
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
      } else {
        __ movl(r, Address(saved_sp, ld_off));
      }
    } else {
      assert(r_1->is_XMMRegister(), "");
      if (!r_2->is_valid()) {
        __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
      } else {
        move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
      }
    }
  }

  // 6243940 We might end up in handle_wrong_method if
  // the callee is deoptimized as we race thru here. If that
  // happens we don't want to take a safepoint because the
  // caller frame will look interpreted and arguments are now
  // "compiled" so it is much better to make this transition
  // invisible to the stack walking code. Unfortunately if
  // we try and find the callee by normal means a safepoint
  // is possible. So we stash the desired callee in the thread
  // and the vm will find there should this case occur.

  __ get_thread(rax);
892
  __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
D
duke 已提交
893 894 895 896 897

  // move methodOop to rax, in case we end up in an c2i adapter.
  // the c2i adapters expect methodOop in rax, (c2) because c2's
  // resolve stubs return the result (the method) in rax,.
  // I'd love to fix this.
898
  __ mov(rax, rbx);
D
duke 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933

  __ jmp(rdi);
}

// ---------------------------------------------------------------
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
                                                            int total_args_passed,
                                                            int comp_args_on_stack,
                                                            const BasicType *sig_bt,
                                                            const VMRegPair *regs) {
  address i2c_entry = __ pc();

  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);

  // -------------------------------------------------------------------------
  // Generate a C2I adapter.  On entry we know rbx, holds the methodOop during calls
  // to the interpreter.  The args start out packed in the compiled layout.  They
  // need to be unpacked into the interpreter layout.  This will almost always
  // require some stack space.  We grow the current (compiled) stack, then repack
  // the args.  We  finally end in a jump to the generic interpreter entry point.
  // On exit from the interpreter, the interpreter will restore our SP (lest the
  // compiled code, which relys solely on SP and not EBP, get sick).

  address c2i_unverified_entry = __ pc();
  Label skip_fixup;

  Register holder = rax;
  Register receiver = rcx;
  Register temp = rbx;

  {

    Label missed;

    __ verify_oop(holder);
934
    __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
D
duke 已提交
935 936
    __ verify_oop(temp);

937 938
    __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
    __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
D
duke 已提交
939 940 941 942
    __ jcc(Assembler::notEqual, missed);
    // Method might have been compiled since the call site was patched to
    // interpreted if that is the case treat it as a miss so we can get
    // the call site corrected.
943
    __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
D
duke 已提交
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
    __ jcc(Assembler::equal, skip_fixup);

    __ bind(missed);
    __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  }

  address c2i_entry = __ pc();

  gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);

  __ flush();
  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
}

int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                         VMRegPair *regs,
                                         int total_args_passed) {
// We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots.

  uint    stack = 0;        // All arguments on stack

  for( int i = 0; i < total_args_passed; i++) {
    // From the type and the argument number (count) compute the location
    switch( sig_bt[i] ) {
    case T_BOOLEAN:
    case T_CHAR:
    case T_FLOAT:
    case T_BYTE:
    case T_SHORT:
    case T_INT:
    case T_OBJECT:
    case T_ARRAY:
    case T_ADDRESS:
      regs[i].set1(VMRegImpl::stack2reg(stack++));
      break;
    case T_LONG:
    case T_DOUBLE: // The stack numbering is reversed from Java
      // Since C arguments do not get reversed, the ordering for
      // doubles on the stack must be opposite the Java convention
      assert(sig_bt[i+1] == T_VOID, "missing Half" );
      regs[i].set2(VMRegImpl::stack2reg(stack));
      stack += 2;
      break;
    case T_VOID: regs[i].set_bad(); break;
    default:
      ShouldNotReachHere();
      break;
    }
  }
  return stack;
}

// A simple move of integer like type
static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack
      // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1004 1005
      __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
      __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
D
duke 已提交
1006 1007
    } else {
      // stack to reg
1008
      __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
D
duke 已提交
1009 1010 1011
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
1012 1013
    // no need to sign extend on 64bit
    __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
D
duke 已提交
1014
  } else {
1015 1016 1017
    if (dst.first() != src.first()) {
      __ mov(dst.first()->as_Register(), src.first()->as_Register());
    }
D
duke 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
  }
}

// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
                        OopMap* map,
                        int oop_handle_offset,
                        int framesize_in_slots,
                        VMRegPair src,
                        VMRegPair dst,
                        bool is_receiver,
                        int* receiver_offset) {

  // Because of the calling conventions we know that src can be a
  // register or a stack location. dst can only be a stack location.

  assert(dst.first()->is_stack(), "must be stack");
  // must pass a handle. First figure out the location we use as a handle

  if (src.first()->is_stack()) {
    // Oop is already on the stack as an argument
    Register rHandle = rax;
    Label nil;
1041 1042
    __ xorptr(rHandle, rHandle);
    __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
D
duke 已提交
1043
    __ jcc(Assembler::equal, nil);
1044
    __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
D
duke 已提交
1045
    __ bind(nil);
1046
    __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
D
duke 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
    if (is_receiver) {
      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
    }
  } else {
    // Oop is in an a register we must store it to the space we reserve
    // on the stack for oop_handles
    const Register rOop = src.first()->as_Register();
    const Register rHandle = rax;
    int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
    int offset = oop_slot*VMRegImpl::stack_slot_size;
    Label skip;
1061
    __ movptr(Address(rsp, offset), rOop);
D
duke 已提交
1062
    map->set_oop(VMRegImpl::stack2reg(oop_slot));
1063 1064
    __ xorptr(rHandle, rHandle);
    __ cmpptr(rOop, (int32_t)NULL_WORD);
D
duke 已提交
1065
    __ jcc(Assembler::equal, skip);
1066
    __ lea(rHandle, Address(rsp, offset));
D
duke 已提交
1067 1068
    __ bind(skip);
    // Store the handle parameter
1069
    __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
D
duke 已提交
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
    if (is_receiver) {
      *receiver_offset = offset;
    }
  }
}

// A float arg may have to do float reg int reg conversion
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");

  // Because of the calling convention we know that src is either a stack location
  // or an xmm register. dst can only be a stack location.

  assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");

  if (src.first()->is_stack()) {
    __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1087
    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
D
duke 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
  } else {
    // reg to stack
    __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
  }
}

// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

  // The only legal possibility for a long_move VMRegPair is:
  // 1: two stack slots (possibly unaligned)
  // as neither the java  or C calling convention will use registers
  // for longs.

  if (src.first()->is_stack() && dst.first()->is_stack()) {
    assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1104 1105 1106 1107
    __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
    NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
    NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
D
duke 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
  } else {
    ShouldNotReachHere();
  }
}

// A double move
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

  // The only legal possibilities for a double_move VMRegPair are:
  // The painful thing here is that like long_move a VMRegPair might be

  // Because of the calling convention we know that src is either
  //   1: a single physical register (xmm registers only)
  //   2: two stack slots (possibly unaligned)
  // dst can only be a pair of stack slots.

  assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");

  if (src.first()->is_stack()) {
    // source is all stack
1128 1129 1130 1131
    __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
    NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
    NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
D
duke 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
  } else {
    // reg to stack
    // No worries about stack alignment
    __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
  }
}


void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  // We always ignore the frame_slots arg and just use the space just below frame pointer
  // which by this time is free to use
  switch (ret_type) {
  case T_FLOAT:
    __ fstp_s(Address(rbp, -wordSize));
    break;
  case T_DOUBLE:
    __ fstp_d(Address(rbp, -2*wordSize));
    break;
  case T_VOID:  break;
  case T_LONG:
1152 1153
    __ movptr(Address(rbp, -wordSize), rax);
    NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
D
duke 已提交
1154 1155
    break;
  default: {
1156
    __ movptr(Address(rbp, -wordSize), rax);
D
duke 已提交
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
    }
  }
}

void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  // We always ignore the frame_slots arg and just use the space just below frame pointer
  // which by this time is free to use
  switch (ret_type) {
  case T_FLOAT:
    __ fld_s(Address(rbp, -wordSize));
    break;
  case T_DOUBLE:
    __ fld_d(Address(rbp, -2*wordSize));
    break;
  case T_LONG:
1172 1173
    __ movptr(rax, Address(rbp, -wordSize));
    NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
D
duke 已提交
1174 1175 1176
    break;
  case T_VOID:  break;
  default: {
1177
    __ movptr(rax, Address(rbp, -wordSize));
D
duke 已提交
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
    }
  }
}

// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method.  The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and
// returns.
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
                                                methodHandle method,
                                                int total_in_args,
                                                int comp_args_on_stack,
                                                BasicType *in_sig_bt,
                                                VMRegPair *in_regs,
                                                BasicType ret_type) {

  // An OopMap for lock (and class if static)
  OopMapSet *oop_maps = new OopMapSet();

  // We have received a description of where all the java arg are located
  // on entry to the wrapper. We need to convert these args to where
  // the jni function will expect them. To figure out where they go
  // we convert the java signature to a C signature by inserting
  // the hidden arguments as arg[0] and possibly arg[1] (static method)

  int total_c_args = total_in_args + 1;
  if (method->is_static()) {
    total_c_args++;
  }

  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);

  int argc = 0;
  out_sig_bt[argc++] = T_ADDRESS;
  if (method->is_static()) {
    out_sig_bt[argc++] = T_OBJECT;
  }

  int i;
  for (i = 0; i < total_in_args ; i++ ) {
    out_sig_bt[argc++] = in_sig_bt[i];
  }


  // Now figure out where the args must be stored and how much stack space
  // they require (neglecting out_preserve_stack_slots but space for storing
  // the 1st six register arguments). It's weird see int_stk_helper.
  //
  int out_arg_slots;
  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);

  // Compute framesize for the wrapper.  We need to handlize all oops in
  // registers a max of 2 on x86.

  // Calculate the total number of stack slots we will need.

  // First count the abi requirement plus all of the outgoing args
  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;

  // Now the space for the inbound oop handle area

  int oop_handle_offset = stack_slots;
  stack_slots += 2*VMRegImpl::slots_per_word;

  // Now any space we need for handlizing a klass if static method

  int klass_slot_offset = 0;
  int klass_offset = -1;
  int lock_slot_offset = 0;
  bool is_static = false;
  int oop_temp_slot_offset = 0;

  if (method->is_static()) {
    klass_slot_offset = stack_slots;
    stack_slots += VMRegImpl::slots_per_word;
    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
    is_static = true;
  }

  // Plus a lock if needed

  if (method->is_synchronized()) {
    lock_slot_offset = stack_slots;
    stack_slots += VMRegImpl::slots_per_word;
  }

  // Now a place (+2) to save return values or temp during shuffling
  // + 2 for return address (which we own) and saved rbp,
  stack_slots += 4;

  // Ok The space we have allocated will look like:
  //
  //
  // FP-> |                     |
  //      |---------------------|
  //      | 2 slots for moves   |
  //      |---------------------|
  //      | lock box (if sync)  |
  //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
  //      | klass (if static)   |
  //      |---------------------| <- klass_slot_offset
  //      | oopHandle area      |
  //      |---------------------| <- oop_handle_offset (a max of 2 registers)
  //      | outbound memory     |
  //      | based arguments     |
  //      |                     |
  //      |---------------------|
  //      |                     |
  // SP-> | out_preserved_slots |
  //
  //
  // ****************************************************************************
  // WARNING - on Windows Java Natives use pascal calling convention and pop the
  // arguments off of the stack after the jni call. Before the call we can use
  // instructions that are SP relative. After the jni call we switch to FP
  // relative instructions instead of re-adjusting the stack on windows.
  // ****************************************************************************


  // Now compute actual number of stack words we need rounding to make
  // stack properly aligned.
  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);

  int stack_size = stack_slots * VMRegImpl::stack_slot_size;

  intptr_t start = (intptr_t)__ pc();

  // First thing make an ic check to see if we should even be here

  // We are free to use all registers as temps without saving them and
  // restoring them except rbp,. rbp, is the only callee save register
  // as far as the interpreter and the compiler(s) are concerned.


  const Register ic_reg = rax;
  const Register receiver = rcx;
  Label hit;
  Label exception_pending;


  __ verify_oop(receiver);
1322
  __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
D
duke 已提交
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
  __ jcc(Assembler::equal, hit);

  __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));

  // verified entry must be aligned for code patching.
  // and the first 5 bytes must be in the same cache line
  // if we align at 8 then we will be sure 5 bytes are in the same line
  __ align(8);

  __ bind(hit);

  int vep_offset = ((intptr_t)__ pc()) - start;

#ifdef COMPILER1
  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
    // Object.hashCode can pull the hashCode from the header word
    // instead of doing a full VM transition once it's been computed.
    // Since hashCode is usually polymorphic at call sites we can't do
    // this optimization at the call site without a lot of work.
    Label slowCase;
    Register receiver = rcx;
    Register result = rax;
1345
    __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
D
duke 已提交
1346 1347

    // check if locked
1348
    __ testptr(result, markOopDesc::unlocked_value);
D
duke 已提交
1349 1350 1351 1352
    __ jcc (Assembler::zero, slowCase);

    if (UseBiasedLocking) {
      // Check if biased and fall through to runtime if so
1353
      __ testptr(result, markOopDesc::biased_lock_bit_in_place);
D
duke 已提交
1354 1355 1356 1357
      __ jcc (Assembler::notZero, slowCase);
    }

    // get hash
1358
    __ andptr(result, markOopDesc::hash_mask_in_place);
D
duke 已提交
1359 1360
    // test if hashCode exists
    __ jcc  (Assembler::zero, slowCase);
1361
    __ shrptr(result, markOopDesc::hash_shift);
D
duke 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
    __ ret(0);
    __ bind (slowCase);
  }
#endif // COMPILER1

  // The instruction at the verified entry point must be 5 bytes or longer
  // because it can be patched on the fly by make_non_entrant. The stack bang
  // instruction fits that requirement.

  // Generate stack overflow check

  if (UseStackBanging) {
    __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
  } else {
    // need a 5 byte instruction to allow MT safe patching to non-entrant
    __ fat_nop();
  }

  // Generate a new frame for the wrapper.
  __ enter();
  // -2 because return address is already present and so is saved rbp,
1383
  __ subptr(rsp, stack_size - 2*wordSize);
D
duke 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503

  // Frame is now completed as far a size and linkage.

  int frame_complete = ((intptr_t)__ pc()) - start;

  // Calculate the difference between rsp and rbp,. We need to know it
  // after the native call because on windows Java Natives will pop
  // the arguments and it is painful to do rsp relative addressing
  // in a platform independent way. So after the call we switch to
  // rbp, relative addressing.

  int fp_adjustment = stack_size - 2*wordSize;

#ifdef COMPILER2
  // C2 may leave the stack dirty if not in SSE2+ mode
  if (UseSSE >= 2) {
    __ verify_FPU(0, "c2i transition should have clean FPU stack");
  } else {
    __ empty_FPU_stack();
  }
#endif /* COMPILER2 */

  // Compute the rbp, offset for any slots used after the jni call

  int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
  int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;

  // We use rdi as a thread pointer because it is callee save and
  // if we load it once it is usable thru the entire wrapper
  const Register thread = rdi;

  // We use rsi as the oop handle for the receiver/klass
  // It is callee save so it survives the call to native

  const Register oop_handle_reg = rsi;

  __ get_thread(thread);


  //
  // We immediately shuffle the arguments so that any vm call we have to
  // make from here on out (sync slow path, jvmti, etc.) we will have
  // captured the oops from our caller and have a valid oopMap for
  // them.

  // -----------------
  // The Grand Shuffle
  //
  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
  // and, if static, the class mirror instead of a receiver.  This pretty much
  // guarantees that register layout will not match (and x86 doesn't use reg
  // parms though amd does).  Since the native abi doesn't use register args
  // and the java conventions does we don't have to worry about collisions.
  // All of our moved are reg->stack or stack->stack.
  // We ignore the extra arguments during the shuffle and handle them at the
  // last moment. The shuffle is described by the two calling convention
  // vectors we have in our possession. We simply walk the java vector to
  // get the source locations and the c vector to get the destinations.

  int c_arg = method->is_static() ? 2 : 1 ;

  // Record rsp-based slot for receiver on stack for non-static methods
  int receiver_offset = -1;

  // This is a trick. We double the stack slots so we can claim
  // the oops in the caller's frame. Since we are sure to have
  // more args than the caller doubling is enough to make
  // sure we can capture all the incoming oop args from the
  // caller.
  //
  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);

  // Mark location of rbp,
  // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());

  // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
  // Are free to temporaries if we have to do  stack to steck moves.
  // All inbound args are referenced based on rbp, and all outbound args via rsp.

  for (i = 0; i < total_in_args ; i++, c_arg++ ) {
    switch (in_sig_bt[i]) {
      case T_ARRAY:
      case T_OBJECT:
        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                    ((i == 0) && (!is_static)),
                    &receiver_offset);
        break;
      case T_VOID:
        break;

      case T_FLOAT:
        float_move(masm, in_regs[i], out_regs[c_arg]);
          break;

      case T_DOUBLE:
        assert( i + 1 < total_in_args &&
                in_sig_bt[i + 1] == T_VOID &&
                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
        double_move(masm, in_regs[i], out_regs[c_arg]);
        break;

      case T_LONG :
        long_move(masm, in_regs[i], out_regs[c_arg]);
        break;

      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");

      default:
        simple_move32(masm, in_regs[i], out_regs[c_arg]);
    }
  }

  // Pre-load a static method's oop into rsi.  Used both by locking code and
  // the normal JNI call code.
  if (method->is_static()) {

    //  load opp into a register
    __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));

    // Now handlize the static class mirror it's known not-null.
1504
    __ movptr(Address(rsp, klass_offset), oop_handle_reg);
D
duke 已提交
1505 1506 1507
    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));

    // Now get the handle
1508
    __ lea(oop_handle_reg, Address(rsp, klass_offset));
D
duke 已提交
1509
    // store the klass handle as second argument
1510
    __ movptr(Address(rsp, wordSize), oop_handle_reg);
D
duke 已提交
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
  }

  // Change state to native (we save the return address in the thread, since it might not
  // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
  // points into the right code segment. It does not have to be the correct return pc.
  // We use the same pc/oopMap repeatedly when we call out

  intptr_t the_pc = (intptr_t) __ pc();
  oop_maps->add_gc_map(the_pc - start, map);

  __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);


  // We have all of the arguments setup at this point. We must not touch any register
  // argument registers at this point (what if we save/restore them there are no oop?

  {
    SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
    __ movoop(rax, JNIHandles::make_local(method()));
    __ call_VM_leaf(
         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
         thread, rax);
  }


  // These are register definitions we need for locking/unlocking
  const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
  const Register obj_reg  = rcx;  // Will contain the oop
  const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)

  Label slow_path_lock;
  Label lock_done;

  // Lock a synchronized method
  if (method->is_synchronized()) {


    const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();

    // Get the handle (the 2nd argument)
1551
    __ movptr(oop_handle_reg, Address(rsp, wordSize));
D
duke 已提交
1552 1553 1554

    // Get address of the box

1555
    __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
D
duke 已提交
1556 1557

    // Load the oop from the handle
1558
    __ movptr(obj_reg, Address(oop_handle_reg, 0));
D
duke 已提交
1559 1560 1561 1562 1563 1564 1565

    if (UseBiasedLocking) {
      // Note that oop_handle_reg is trashed during this call
      __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
    }

    // Load immediate 1 into swap_reg %rax,
1566
    __ movptr(swap_reg, 1);
D
duke 已提交
1567 1568

    // Load (object->mark() | 1) into swap_reg %rax,
1569
    __ orptr(swap_reg, Address(obj_reg, 0));
D
duke 已提交
1570 1571

    // Save (object->mark() | 1) into BasicLock's displaced header
1572
    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
D
duke 已提交
1573 1574 1575 1576 1577 1578 1579

    if (os::is_MP()) {
      __ lock();
    }

    // src -> dest iff dest == rax, else rax, <- dest
    // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1580
    __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
D
duke 已提交
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
    __ jcc(Assembler::equal, lock_done);

    // Test if the oopMark is an obvious stack pointer, i.e.,
    //  1) (mark & 3) == 0, and
    //  2) rsp <= mark < mark + os::pagesize()
    // These 3 tests can be done by evaluating the following
    // expression: ((mark - rsp) & (3 - os::vm_page_size())),
    // assuming both stack pointer and pagesize have their
    // least significant 2 bits clear.
    // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg

1592 1593
    __ subptr(swap_reg, rsp);
    __ andptr(swap_reg, 3 - os::vm_page_size());
D
duke 已提交
1594 1595

    // Save the test result, for recursive case, the result is zero
1596
    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
D
duke 已提交
1597 1598 1599 1600 1601 1602
    __ jcc(Assembler::notEqual, slow_path_lock);
    // Slow path will re-enter here
    __ bind(lock_done);

    if (UseBiasedLocking) {
      // Re-fetch oop_handle_reg as we trashed it above
1603
      __ movptr(oop_handle_reg, Address(rsp, wordSize));
D
duke 已提交
1604 1605 1606 1607 1608 1609 1610 1611 1612
    }
  }


  // Finally just about ready to make the JNI call


  // get JNIEnv* which is first argument to native

1613 1614
  __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
  __ movptr(Address(rsp, 0), rdx);
D
duke 已提交
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

  // Now set thread in native
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);

  __ call(RuntimeAddress(method->native_function()));

  // WARNING - on Windows Java Natives use pascal calling convention and pop the
  // arguments off of the stack. We could just re-adjust the stack pointer here
  // and continue to do SP relative addressing but we instead switch to FP
  // relative addressing.

  // Unpack native results.
  switch (ret_type) {
  case T_BOOLEAN: __ c2bool(rax);            break;
1629
  case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
D
duke 已提交
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
  case T_BYTE   : __ sign_extend_byte (rax); break;
  case T_SHORT  : __ sign_extend_short(rax); break;
  case T_INT    : /* nothing to do */        break;
  case T_DOUBLE :
  case T_FLOAT  :
    // Result is in st0 we'll save as needed
    break;
  case T_ARRAY:                 // Really a handle
  case T_OBJECT:                // Really a handle
      break; // can't de-handlize until after safepoint check
  case T_VOID: break;
  case T_LONG: break;
  default       : ShouldNotReachHere();
  }

  // Switch thread to "native transition" state before reading the synchronization state.
  // This additional state is necessary because reading and testing the synchronization
  // state is not atomic w.r.t. GC, as this scenario demonstrates:
  //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
  //     VM thread changes sync state to synchronizing and suspends threads for GC.
  //     Thread A is resumed to finish this native method, but doesn't block here since it
  //     didn't see any synchronization is progress, and escapes.
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);

  if(os::is_MP()) {
    if (UseMembar) {
1656 1657 1658 1659
      // Force this write out before the read below
      __ membar(Assembler::Membar_mask_bits(
           Assembler::LoadLoad | Assembler::LoadStore |
           Assembler::StoreLoad | Assembler::StoreStore));
D
duke 已提交
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
    } else {
      // Write serialization page so VM thread can do a pseudo remote membar.
      // We use the current thread pointer to calculate a thread specific
      // offset to write to within the page. This minimizes bus traffic
      // due to cache line collision.
      __ serialize_memory(thread, rcx);
    }
  }

  if (AlwaysRestoreFPU) {
    // Make sure the control word is correct.
    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
  }

  // check for safepoint operation in progress and/or pending suspend requests
  { Label Continue;

    __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);

    Label L;
    __ jcc(Assembler::notEqual, L);
    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
    __ jcc(Assembler::equal, Continue);
    __ bind(L);

    // Don't use call_VM as it will see a possible pending exception and forward it
    // and never return here preventing us from clearing _last_native_pc down below.
    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
    // preserved and correspond to the bcp/locals pointers. So we do a runtime call
    // by hand.
    //
    save_native_result(masm, ret_type, stack_slots);
1693
    __ push(thread);
D
duke 已提交
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
                                            JavaThread::check_special_condition_for_native_trans)));
    __ increment(rsp, wordSize);
    // Restore any method result value
    restore_native_result(masm, ret_type, stack_slots);

    __ bind(Continue);
  }

  // change thread state
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);

  Label reguard;
  Label reguard_done;
  __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
  __ jcc(Assembler::equal, reguard);

  // slow path reguard  re-enters here
  __ bind(reguard_done);

  // Handle possible exception (will unlock if necessary)

  // native result if any is live

  // Unlock
  Label slow_path_unlock;
  Label unlock_done;
  if (method->is_synchronized()) {

    Label done;

    // Get locked oop from the handle we passed to jni
1726
    __ movptr(obj_reg, Address(oop_handle_reg, 0));
D
duke 已提交
1727 1728 1729 1730 1731 1732 1733

    if (UseBiasedLocking) {
      __ biased_locking_exit(obj_reg, rbx, done);
    }

    // Simple recursive lock?

1734
    __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
D
duke 已提交
1735 1736 1737 1738 1739 1740 1741 1742
    __ jcc(Assembler::equal, done);

    // Must save rax, if if it is live now because cmpxchg must use it
    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
      save_native_result(masm, ret_type, stack_slots);
    }

    //  get old displaced header
1743
    __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
D
duke 已提交
1744 1745

    // get address of the stack lock
1746
    __ lea(rax, Address(rbp, lock_slot_rbp_offset));
D
duke 已提交
1747 1748 1749 1750 1751 1752 1753 1754

    // Atomic swap old header if oop still contains the stack lock
    if (os::is_MP()) {
    __ lock();
    }

    // src -> dest iff dest == rax, else rax, <- dest
    // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1755
    __ cmpxchgptr(rbx, Address(obj_reg, 0));
D
duke 已提交
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
    __ jcc(Assembler::notEqual, slow_path_unlock);

    // slow path re-enters here
    __ bind(unlock_done);
    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
      restore_native_result(masm, ret_type, stack_slots);
    }

    __ bind(done);

  }

  {
    SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
    // Tell dtrace about this method exit
    save_native_result(masm, ret_type, stack_slots);
    __ movoop(rax, JNIHandles::make_local(method()));
    __ call_VM_leaf(
         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
         thread, rax);
    restore_native_result(masm, ret_type, stack_slots);
  }

  // We can finally stop using that last_Java_frame we setup ages ago

  __ reset_last_Java_frame(thread, false, true);

  // Unpack oop result
  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
      Label L;
1786
      __ cmpptr(rax, (int32_t)NULL_WORD);
D
duke 已提交
1787
      __ jcc(Assembler::equal, L);
1788
      __ movptr(rax, Address(rax, 0));
D
duke 已提交
1789 1790 1791 1792 1793
      __ bind(L);
      __ verify_oop(rax);
  }

  // reset handle block
1794
  __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
D
duke 已提交
1795

1796
  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
D
duke 已提交
1797 1798

  // Any exception pending?
1799
  __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
D
duke 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
  __ jcc(Assembler::notEqual, exception_pending);


  // no exception, we're almost done

  // check that only result value is on FPU stack
  __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");

  // Fixup floating pointer results so that result looks like a return from a compiled method
  if (ret_type == T_FLOAT) {
    if (UseSSE >= 1) {
      // Pop st0 and store as float and reload into xmm register
      __ fstp_s(Address(rbp, -4));
      __ movflt(xmm0, Address(rbp, -4));
    }
  } else if (ret_type == T_DOUBLE) {
    if (UseSSE >= 2) {
      // Pop st0 and store as double and reload into xmm register
      __ fstp_d(Address(rbp, -8));
      __ movdbl(xmm0, Address(rbp, -8));
    }
  }

  // Return

  __ leave();
  __ ret(0);

  // Unexpected paths are out of line and go here

  // Slow path locking & unlocking
  if (method->is_synchronized()) {

    // BEGIN Slow path lock

    __ bind(slow_path_lock);

    // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
    // args are (oop obj, BasicLock* lock, JavaThread* thread)
1839 1840 1841
    __ push(thread);
    __ push(lock_reg);
    __ push(obj_reg);
D
duke 已提交
1842
    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1843
    __ addptr(rsp, 3*wordSize);
D
duke 已提交
1844 1845 1846

#ifdef ASSERT
    { Label L;
1847
    __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
D
duke 已提交
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
    __ jcc(Assembler::equal, L);
    __ stop("no pending exception allowed on exit from monitorenter");
    __ bind(L);
    }
#endif
    __ jmp(lock_done);

    // END Slow path lock

    // BEGIN Slow path unlock
    __ bind(slow_path_unlock);

    // Slow path unlock

    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
      save_native_result(masm, ret_type, stack_slots);
    }
    // Save pending exception around call to VM (which contains an EXCEPTION_MARK)

1867
    __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1868
    __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
D
duke 已提交
1869 1870 1871 1872


    // should be a peal
    // +wordSize because of the push above
1873 1874
    __ lea(rax, Address(rbp, lock_slot_rbp_offset));
    __ push(rax);
D
duke 已提交
1875

1876
    __ push(obj_reg);
D
duke 已提交
1877
    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1878
    __ addptr(rsp, 2*wordSize);
D
duke 已提交
1879 1880 1881
#ifdef ASSERT
    {
      Label L;
1882
      __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
D
duke 已提交
1883 1884 1885 1886 1887 1888
      __ jcc(Assembler::equal, L);
      __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
      __ bind(L);
    }
#endif /* ASSERT */

1889
    __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
D
duke 已提交
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936

    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
      restore_native_result(masm, ret_type, stack_slots);
    }
    __ jmp(unlock_done);
    // END Slow path unlock

  }

  // SLOW PATH Reguard the stack if needed

  __ bind(reguard);
  save_native_result(masm, ret_type, stack_slots);
  {
    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
  }
  restore_native_result(masm, ret_type, stack_slots);
  __ jmp(reguard_done);


  // BEGIN EXCEPTION PROCESSING

  // Forward  the exception
  __ bind(exception_pending);

  // remove possible return value from FPU register stack
  __ empty_FPU_stack();

  // pop our frame
  __ leave();
  // and forward the exception
  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));

  __ flush();

  nmethod *nm = nmethod::new_native_nmethod(method,
                                            masm->code(),
                                            vep_offset,
                                            frame_complete,
                                            stack_slots / VMRegImpl::slots_per_word,
                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
                                            in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
                                            oop_maps);
  return nm;

}

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
#ifdef HAVE_DTRACE_H
// ---------------------------------------------------------------------------
// Generate a dtrace nmethod for a given signature.  The method takes arguments
// in the Java compiled code convention, marshals them to the native
// abi and then leaves nops at the position you would expect to call a native
// function. When the probe is enabled the nops are replaced with a trap
// instruction that dtrace inserts and the trace will cause a notification
// to dtrace.
//
// The probes are only able to take primitive types and java/lang/String as
// arguments.  No other java types are allowed. Strings are converted to utf8
// strings so that from dtrace point of view java strings are converted to C
// strings. There is an arbitrary fixed limit on the total space that a method
// can use for converting the strings. (256 chars per string in the signature).
// So any java string larger then this is truncated.

nmethod *SharedRuntime::generate_dtrace_nmethod(
    MacroAssembler *masm, methodHandle method) {

  // generate_dtrace_nmethod is guarded by a mutex so we are sure to
  // be single threaded in this method.
  assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");

  // Fill in the signature array, for the calling-convention call.
  int total_args_passed = method->size_of_parameters();

  BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);

  // The signature we are going to use for the trap that dtrace will see
  // java/lang/String is converted. We drop "this" and any other object
  // is converted to NULL.  (A one-slot java/lang/Long object reference
  // is converted to a two-slot long, which is why we double the allocation).
  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);

  int i=0;
  int total_strings = 0;
  int first_arg_to_pass = 0;
  int total_c_args = 0;

  if( !method->is_static() ) {  // Pass in receiver first
    in_sig_bt[i++] = T_OBJECT;
    first_arg_to_pass = 1;
  }

  // We need to convert the java args to where a native (non-jni) function
  // would expect them. To figure out where they go we convert the java
  // signature to a C signature.

  SignatureStream ss(method->signature());
  for ( ; !ss.at_return_type(); ss.next()) {
    BasicType bt = ss.type();
    in_sig_bt[i++] = bt;  // Collect remaining bits of signature
    out_sig_bt[total_c_args++] = bt;
    if( bt == T_OBJECT) {
      symbolOop s = ss.as_symbol_or_null();
      if (s == vmSymbols::java_lang_String()) {
        total_strings++;
        out_sig_bt[total_c_args-1] = T_ADDRESS;
      } else if (s == vmSymbols::java_lang_Boolean() ||
                 s == vmSymbols::java_lang_Character() ||
                 s == vmSymbols::java_lang_Byte() ||
                 s == vmSymbols::java_lang_Short() ||
                 s == vmSymbols::java_lang_Integer() ||
                 s == vmSymbols::java_lang_Float()) {
        out_sig_bt[total_c_args-1] = T_INT;
      } else if (s == vmSymbols::java_lang_Long() ||
                 s == vmSymbols::java_lang_Double()) {
        out_sig_bt[total_c_args-1] = T_LONG;
        out_sig_bt[total_c_args++] = T_VOID;
      }
    } else if ( bt == T_LONG || bt == T_DOUBLE ) {
      in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
      out_sig_bt[total_c_args++] = T_VOID;
    }
  }

  assert(i==total_args_passed, "validly parsed signature");

  // Now get the compiled-Java layout as input arguments
  int comp_args_on_stack;
  comp_args_on_stack = SharedRuntime::java_calling_convention(
      in_sig_bt, in_regs, total_args_passed, false);

  // Now figure out where the args must be stored and how much stack space
  // they require (neglecting out_preserve_stack_slots).

  int out_arg_slots;
  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);

  // Calculate the total number of stack slots we will need.

  // First count the abi requirement plus all of the outgoing args
  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;

  // Now space for the string(s) we must convert

  int* string_locs   = NEW_RESOURCE_ARRAY(int, total_strings + 1);
  for (i = 0; i < total_strings ; i++) {
    string_locs[i] = stack_slots;
    stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
  }

  // + 2 for return address (which we own) and saved rbp,

  stack_slots += 2;

  // Ok The space we have allocated will look like:
  //
  //
  // FP-> |                     |
  //      |---------------------|
  //      | string[n]           |
  //      |---------------------| <- string_locs[n]
  //      | string[n-1]         |
  //      |---------------------| <- string_locs[n-1]
  //      | ...                 |
  //      | ...                 |
  //      |---------------------| <- string_locs[1]
  //      | string[0]           |
  //      |---------------------| <- string_locs[0]
  //      | outbound memory     |
  //      | based arguments     |
  //      |                     |
  //      |---------------------|
  //      |                     |
  // SP-> | out_preserved_slots |
  //
  //

  // Now compute actual number of stack words we need rounding to make
  // stack properly aligned.
  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);

  int stack_size = stack_slots * VMRegImpl::stack_slot_size;

  intptr_t start = (intptr_t)__ pc();

  // First thing make an ic check to see if we should even be here

  // We are free to use all registers as temps without saving them and
  // restoring them except rbp. rbp, is the only callee save register
  // as far as the interpreter and the compiler(s) are concerned.

  const Register ic_reg = rax;
  const Register receiver = rcx;
  Label hit;
  Label exception_pending;


  __ verify_oop(receiver);
  __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
  __ jcc(Assembler::equal, hit);

  __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));

  // verified entry must be aligned for code patching.
  // and the first 5 bytes must be in the same cache line
  // if we align at 8 then we will be sure 5 bytes are in the same line
  __ align(8);

  __ bind(hit);

  int vep_offset = ((intptr_t)__ pc()) - start;


  // The instruction at the verified entry point must be 5 bytes or longer
  // because it can be patched on the fly by make_non_entrant. The stack bang
  // instruction fits that requirement.

  // Generate stack overflow check


  if (UseStackBanging) {
    if (stack_size <= StackShadowPages*os::vm_page_size()) {
      __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
    } else {
      __ movl(rax, stack_size);
      __ bang_stack_size(rax, rbx);
    }
  } else {
    // need a 5 byte instruction to allow MT safe patching to non-entrant
    __ fat_nop();
  }

  assert(((int)__ pc() - start - vep_offset) >= 5,
         "valid size for make_non_entrant");

  // Generate a new frame for the wrapper.
  __ enter();

  // -2 because return address is already present and so is saved rbp,
  if (stack_size - 2*wordSize != 0) {
    __ subl(rsp, stack_size - 2*wordSize);
  }

  // Frame is now completed as far a size and linkage.

  int frame_complete = ((intptr_t)__ pc()) - start;

  // First thing we do store all the args as if we are doing the call.
  // Since the C calling convention is stack based that ensures that
  // all the Java register args are stored before we need to convert any
  // string we might have.

  int sid = 0;
  int c_arg, j_arg;
  int string_reg = 0;

  for (j_arg = first_arg_to_pass, c_arg = 0 ;
       j_arg < total_args_passed ; j_arg++, c_arg++ ) {

    VMRegPair src = in_regs[j_arg];
    VMRegPair dst = out_regs[c_arg];
    assert(dst.first()->is_stack() || in_sig_bt[j_arg] == T_VOID,
           "stack based abi assumed");

    switch (in_sig_bt[j_arg]) {

      case T_ARRAY:
      case T_OBJECT:
        if (out_sig_bt[c_arg] == T_ADDRESS) {
          // Any register based arg for a java string after the first
          // will be destroyed by the call to get_utf so we store
          // the original value in the location the utf string address
          // will eventually be stored.
          if (src.first()->is_reg()) {
            if (string_reg++ != 0) {
              simple_move32(masm, src, dst);
            }
          }
        } else if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
          // need to unbox a one-word value
          Register in_reg = rax;
          if ( src.first()->is_reg() ) {
            in_reg = src.first()->as_Register();
          } else {
            simple_move32(masm, src, in_reg->as_VMReg());
          }
          Label skipUnbox;
          __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
          if ( out_sig_bt[c_arg] == T_LONG ) {
            __ movl(Address(rsp, reg2offset_out(dst.second())), NULL_WORD);
          }
          __ testl(in_reg, in_reg);
          __ jcc(Assembler::zero, skipUnbox);
          assert(dst.first()->is_stack() &&
                 (!dst.second()->is_valid() || dst.second()->is_stack()),
                 "value(s) must go into stack slots");
2187 2188 2189 2190

          BasicType bt = out_sig_bt[c_arg];
          int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
          if ( bt == T_LONG ) {
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
            __ movl(rbx, Address(in_reg,
                                 box_offset + VMRegImpl::stack_slot_size));
            __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
          }
          __ movl(in_reg,  Address(in_reg, box_offset));
          __ movl(Address(rsp, reg2offset_out(dst.first())), in_reg);
          __ bind(skipUnbox);
        } else {
          // Convert the arg to NULL
          __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
        }
        if (out_sig_bt[c_arg] == T_LONG) {
          assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
          ++c_arg; // Move over the T_VOID To keep the loop indices in sync
        }
        break;

      case T_VOID:
        break;

      case T_FLOAT:
        float_move(masm, src, dst);
        break;

      case T_DOUBLE:
        assert( j_arg + 1 < total_args_passed &&
                in_sig_bt[j_arg + 1] == T_VOID, "bad arg list");
        double_move(masm, src, dst);
        break;

      case T_LONG :
        long_move(masm, src, dst);
        break;

      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");

      default:
        simple_move32(masm, src, dst);
    }
  }

  // Now we must convert any string we have to utf8
  //

  for (sid = 0, j_arg = first_arg_to_pass, c_arg = 0 ;
       sid < total_strings ; j_arg++, c_arg++ ) {

    if (out_sig_bt[c_arg] == T_ADDRESS) {

      Address utf8_addr = Address(
          rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
      __ leal(rax, utf8_addr);

      // The first string we find might still be in the original java arg
      // register
      VMReg orig_loc = in_regs[j_arg].first();
      Register string_oop;

      // This is where the argument will eventually reside
      Address dest = Address(rsp, reg2offset_out(out_regs[c_arg].first()));

      if (sid == 1 && orig_loc->is_reg()) {
        string_oop = orig_loc->as_Register();
        assert(string_oop != rax, "smashed arg");
      } else {

        if (orig_loc->is_reg()) {
          // Get the copy of the jls object
          __ movl(rcx, dest);
        } else {
          // arg is still in the original location
          __ movl(rcx, Address(rbp, reg2offset_in(orig_loc)));
        }
        string_oop = rcx;

      }
      Label nullString;
      __ movl(dest, NULL_WORD);
      __ testl(string_oop, string_oop);
      __ jcc(Assembler::zero, nullString);

      // Now we can store the address of the utf string as the argument
      __ movl(dest, rax);

      // And do the conversion
      __ call_VM_leaf(CAST_FROM_FN_PTR(
             address, SharedRuntime::get_utf), string_oop, rax);
      __ bind(nullString);
    }

    if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
      assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
      ++c_arg; // Move over the T_VOID To keep the loop indices in sync
    }
  }


  // Ok now we are done. Need to place the nop that dtrace wants in order to
  // patch in the trap

  int patch_offset = ((intptr_t)__ pc()) - start;

  __ nop();


  // Return

  __ leave();
  __ ret(0);

  __ flush();

  nmethod *nm = nmethod::new_dtrace_nmethod(
      method, masm->code(), vep_offset, patch_offset, frame_complete,
      stack_slots / VMRegImpl::slots_per_word);
  return nm;

}

#endif // HAVE_DTRACE_H

D
duke 已提交
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
  return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
}


uint SharedRuntime::out_preserve_stack_slots() {
  return 0;
}


//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  CodeBuffer   buffer("deopt_blob", 1024, 1024);
  MacroAssembler* masm = new MacroAssembler(&buffer);
  int frame_size_in_words;
  OopMap* map = NULL;
  // Account for the extra args we place on the stack
  // by the time we call fetch_unroll_info
  const int additional_words = 2; // deopt kind, thread

  OopMapSet *oop_maps = new OopMapSet();

  // -------------
  // This code enters when returning to a de-optimized nmethod.  A return
  // address has been pushed on the the stack, and return values are in
  // registers.
  // If we are doing a normal deopt then we were called from the patched
  // nmethod from the point we returned to the nmethod. So the return
  // address on the stack is wrong by NativeCall::instruction_size
  // We will adjust the value to it looks like we have the original return
  // address on the stack (like when we eagerly deoptimized).
  // In the case of an exception pending with deoptimized then we enter
  // with a return address on the stack that points after the call we patched
  // into the exception handler. We have the following register state:
  //    rax,: exception
  //    rbx,: exception handler
  //    rdx: throwing pc
  // So in this case we simply jam rdx into the useless return address and
  // the stack looks just like we want.
  //
  // At this point we need to de-opt.  We save the argument return
  // registers.  We call the first C routine, fetch_unroll_info().  This
  // routine captures the return values and returns a structure which
  // describes the current frame size and the sizes of all replacement frames.
  // The current frame is compiled code and may contain many inlined
  // functions, each with their own JVM state.  We pop the current frame, then
  // push all the new frames.  Then we call the C routine unpack_frames() to
  // populate these frames.  Finally unpack_frames() returns us the new target
  // address.  Notice that callee-save registers are BLOWN here; they have
  // already been captured in the vframeArray at the time the return PC was
  // patched.
  address start = __ pc();
  Label cont;

  // Prolog for non exception case!

  // Save everything in sight.

  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
  // Normal deoptimization
2377
  __ push(Deoptimization::Unpack_deopt);
D
duke 已提交
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
  __ jmp(cont);

  int reexecute_offset = __ pc() - start;

  // Reexecute case
  // return address is the pc describes what bci to do re-execute at

  // No need to update map as each call to save_live_registers will produce identical oopmap
  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);

2388
  __ push(Deoptimization::Unpack_reexecute);
D
duke 已提交
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
  __ jmp(cont);

  int exception_offset = __ pc() - start;

  // Prolog for exception case

  // all registers are dead at this entry point, except for rax, and
  // rdx which contain the exception oop and exception pc
  // respectively.  Set them in TLS and fall thru to the
  // unpack_with_exception_in_tls entry point.

  __ get_thread(rdi);
2401 2402
  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
  __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
D
duke 已提交
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416

  int exception_in_tls_offset = __ pc() - start;

  // new implementation because exception oop is now passed in JavaThread

  // Prolog for exception case
  // All registers must be preserved because they might be used by LinearScan
  // Exceptiop oop and throwing PC are passed in JavaThread
  // tos: stack at point of call to method that threw the exception (i.e. only
  // args are on the stack, no return address)

  // make room on stack for the return address
  // It will be patched later with the throwing pc. The correct value is not
  // available now because loading it from memory would destroy registers.
2417
  __ push(0);
D
duke 已提交
2418 2419 2420 2421 2422 2423 2424 2425 2426

  // Save everything in sight.

  // No need to update map as each call to save_live_registers will produce identical oopmap
  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);

  // Now it is safe to overwrite any register

  // store the correct deoptimization type
2427
  __ push(Deoptimization::Unpack_exception);
D
duke 已提交
2428 2429 2430 2431

  // load throwing pc from JavaThread and patch it as the return address
  // of the current frame. Then clear the field in JavaThread
  __ get_thread(rdi);
2432 2433
  __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
  __ movptr(Address(rbp, wordSize), rdx);
2434
  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
D
duke 已提交
2435 2436 2437

#ifdef ASSERT
  // verify that there is really an exception oop in JavaThread
2438
  __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
D
duke 已提交
2439 2440 2441 2442
  __ verify_oop(rax);

  // verify that there is no pending exception
  Label no_pending_exception;
2443 2444
  __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
  __ testptr(rax, rax);
D
duke 已提交
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
  __ jcc(Assembler::zero, no_pending_exception);
  __ stop("must not have pending exception here");
  __ bind(no_pending_exception);
#endif

  __ bind(cont);

  // Compiled code leaves the floating point stack dirty, empty it.
  __ empty_FPU_stack();


  // Call C code.  Need thread and this frame, but NOT official VM entry
  // crud.  We cannot block on this call, no GC can happen.
  __ get_thread(rcx);
2459
  __ push(rcx);
D
duke 已提交
2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
  // fetch_unroll_info needs to call last_java_frame()
  __ set_last_Java_frame(rcx, noreg, noreg, NULL);

  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));

  // Need to have an oopmap that tells fetch_unroll_info where to
  // find any register it might need.

  oop_maps->add_gc_map( __ pc()-start, map);

  // Discard arg to fetch_unroll_info
2471
  __ pop(rcx);
D
duke 已提交
2472 2473 2474 2475 2476

  __ get_thread(rcx);
  __ reset_last_Java_frame(rcx, false, false);

  // Load UnrollBlock into EDI
2477
  __ mov(rdi, rax);
D
duke 已提交
2478 2479 2480 2481 2482 2483

  // Move the unpack kind to a safe place in the UnrollBlock because
  // we are very short of registers

  Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
  // retrieve the deopt kind from where we left it.
2484
  __ pop(rax);
D
duke 已提交
2485 2486 2487 2488 2489
  __ movl(unpack_kind, rax);                      // save the unpack_kind value

   Label noException;
  __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
  __ jcc(Assembler::notEqual, noException);
2490 2491
  __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
  __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2492 2493
  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
D
duke 已提交
2494 2495 2496 2497

  __ verify_oop(rax);

  // Overwrite the result registers with the exception results.
2498 2499
  __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
  __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
D
duke 已提交
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523

  __ bind(noException);

  // Stack is back to only having register save data on the stack.
  // Now restore the result registers. Everything else is either dead or captured
  // in the vframeArray.

  RegisterSaver::restore_result_registers(masm);

  // All of the register save area has been popped of the stack. Only the
  // return address remains.

  // Pop all the frames we must move/replace.
  //
  // Frame picture (youngest to oldest)
  // 1: self-frame (no frame link)
  // 2: deopting frame  (no frame link)
  // 3: caller of deopting frame (could be compiled/interpreted).
  //
  // Note: by leaving the return address of self-frame on the stack
  // and using the size of frame 2 to adjust the stack
  // when we are done the return to frame 3 will still be on the stack.

  // Pop deoptimized frame
2524
  __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
D
duke 已提交
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534

  // sp should be pointing at the return address to the caller (3)

  // Stack bang to make sure there's enough room for these interpreter frames.
  if (UseStackBanging) {
    __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
    __ bang_stack_size(rbx, rcx);
  }

  // Load array of frame pcs into ECX
2535
  __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
D
duke 已提交
2536

2537
  __ pop(rsi); // trash the old pc
D
duke 已提交
2538 2539

  // Load array of frame sizes into ESI
2540
  __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
D
duke 已提交
2541 2542 2543 2544 2545 2546 2547

  Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());

  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
  __ movl(counter, rbx);

  // Pick up the initial fp we should save
2548
  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
D
duke 已提交
2549 2550 2551 2552 2553 2554 2555

  // Now adjust the caller's stack to make up for the extra locals
  // but record the original sp so that we can save it in the skeletal interpreter
  // frame and the stack walking of interpreter_sender will get the unextended sp
  // value and not the "real" sp value.

  Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2556 2557 2558
  __ movptr(sp_temp, rsp);
  __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
  __ subptr(rsp, rbx);
D
duke 已提交
2559 2560 2561 2562

  // Push interpreter frames in a loop
  Label loop;
  __ bind(loop);
2563
  __ movptr(rbx, Address(rsi, 0));      // Load frame size
D
duke 已提交
2564
#ifdef CC_INTERP
2565
  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
D
duke 已提交
2566
#ifdef ASSERT
2567 2568
  __ push(0xDEADDEAD);                  // Make a recognizable pattern
  __ push(0xDEADDEAD);
D
duke 已提交
2569
#else /* ASSERT */
2570
  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
D
duke 已提交
2571 2572
#endif /* ASSERT */
#else /* CC_INTERP */
2573
  __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
D
duke 已提交
2574
#endif /* CC_INTERP */
2575
  __ pushptr(Address(rcx, 0));          // save return address
D
duke 已提交
2576
  __ enter();                           // save old & set new rbp,
2577 2578
  __ subptr(rsp, rbx);                  // Prolog!
  __ movptr(rbx, sp_temp);              // sender's sp
D
duke 已提交
2579
#ifdef CC_INTERP
2580
  __ movptr(Address(rbp,
D
duke 已提交
2581 2582 2583 2584
                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
          rbx); // Make it walkable
#else /* CC_INTERP */
  // This value is corrected by layout_activation_impl
2585
  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2586
  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
D
duke 已提交
2587
#endif /* CC_INTERP */
2588 2589 2590 2591
  __ movptr(sp_temp, rsp);              // pass to next frame
  __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
  __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
  __ decrementl(counter);             // decrement counter
D
duke 已提交
2592
  __ jcc(Assembler::notZero, loop);
2593
  __ pushptr(Address(rcx, 0));          // save final return address
D
duke 已提交
2594 2595 2596 2597 2598 2599 2600

  // Re-push self-frame
  __ enter();                           // save old & set new rbp,

  //  Return address and rbp, are in place
  // We'll push additional args later. Just allocate a full sized
  // register save area
2601
  __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
D
duke 已提交
2602 2603

  // Restore frame locals after moving the frame
2604 2605
  __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
  __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
D
duke 已提交
2606 2607 2608 2609 2610 2611 2612 2613
  __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
  if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
  if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);

  // Set up the args to unpack_frame

  __ pushl(unpack_kind);                     // get the unpack_kind value
  __ get_thread(rcx);
2614
  __ push(rcx);
D
duke 已提交
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626

  // set last_Java_sp, last_Java_fp
  __ set_last_Java_frame(rcx, noreg, rbp, NULL);

  // Call C code.  Need thread but NOT official VM entry
  // crud.  We cannot block on this call, no GC can happen.  Call should
  // restore return values to their stack-slots with the new SP.
  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
  // Set an oopmap for the call site
  oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));

  // rax, contains the return result type
2627
  __ push(rax);
D
duke 已提交
2628 2629 2630 2631 2632

  __ get_thread(rcx);
  __ reset_last_Java_frame(rcx, false, false);

  // Collect return values
2633 2634
  __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
  __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
D
duke 已提交
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694

  // Clear floating point stack before returning to interpreter
  __ empty_FPU_stack();

  // Check if we should push the float or double return value.
  Label results_done, yes_double_value;
  __ cmpl(Address(rsp, 0), T_DOUBLE);
  __ jcc (Assembler::zero, yes_double_value);
  __ cmpl(Address(rsp, 0), T_FLOAT);
  __ jcc (Assembler::notZero, results_done);

  // return float value as expected by interpreter
  if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
  else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
  __ jmp(results_done);

  // return double value as expected by interpreter
  __ bind(yes_double_value);
  if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
  else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));

  __ bind(results_done);

  // Pop self-frame.
  __ leave();                              // Epilog!

  // Jump to interpreter
  __ ret(0);

  // -------------
  // make sure all code is generated
  masm->flush();

  _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
}


#ifdef COMPILER2
//------------------------------generate_uncommon_trap_blob--------------------
void SharedRuntime::generate_uncommon_trap_blob() {
  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
  MacroAssembler* masm = new MacroAssembler(&buffer);

  enum frame_layout {
    arg0_off,      // thread                     sp + 0 // Arg location for
    arg1_off,      // unloaded_class_index       sp + 1 // calling C
    // The frame sender code expects that rbp will be in the "natural" place and
    // will override any oopMap setting for it. We must therefore force the layout
    // so that it agrees with the frame sender code.
    rbp_off,       // callee saved register      sp + 2
    return_off,    // slot for return address    sp + 3
    framesize
  };

  address start = __ pc();
  // Push self-frame.
2695
  __ subptr(rsp, return_off*wordSize);     // Epilog!
D
duke 已提交
2696 2697 2698 2699

  // rbp, is an implicitly saved callee saved register (i.e. the calling
  // convention will save restore it in prolog/epilog) Other than that
  // there are no callee save registers no that adapter frames are gone.
2700
  __ movptr(Address(rsp, rbp_off*wordSize), rbp);
D
duke 已提交
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711

  // Clear the floating point exception stack
  __ empty_FPU_stack();

  // set last_Java_sp
  __ get_thread(rdx);
  __ set_last_Java_frame(rdx, noreg, noreg, NULL);

  // Call C code.  Need thread but NOT official VM entry
  // crud.  We cannot block on this call, no GC can happen.  Call should
  // capture callee-saved registers as well as return values.
2712
  __ movptr(Address(rsp, arg0_off*wordSize), rdx);
D
duke 已提交
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
  // argument already in ECX
  __ movl(Address(rsp, arg1_off*wordSize),rcx);
  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));

  // Set an oopmap for the call site
  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map =  new OopMap( framesize, 0 );
  // No oopMap for rbp, it is known implicitly

  oop_maps->add_gc_map( __ pc()-start, map);

  __ get_thread(rcx);

  __ reset_last_Java_frame(rcx, false, false);

  // Load UnrollBlock into EDI
2729
  __ movptr(rdi, rax);
D
duke 已提交
2730 2731 2732 2733 2734 2735 2736 2737 2738

  // Pop all the frames we must move/replace.
  //
  // Frame picture (youngest to oldest)
  // 1: self-frame (no frame link)
  // 2: deopting frame  (no frame link)
  // 3: caller of deopting frame (could be compiled/interpreted).

  // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2739
  __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
D
duke 已提交
2740 2741

  // Pop deoptimized frame
2742 2743
  __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
  __ addptr(rsp, rcx);
D
duke 已提交
2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756

  // sp should be pointing at the return address to the caller (3)

  // Stack bang to make sure there's enough room for these interpreter frames.
  if (UseStackBanging) {
    __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
    __ bang_stack_size(rbx, rcx);
  }


  // Load array of frame pcs into ECX
  __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));

2757
  __ pop(rsi); // trash the pc
D
duke 已提交
2758 2759

  // Load array of frame sizes into ESI
2760
  __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
D
duke 已提交
2761 2762 2763 2764 2765 2766 2767

  Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());

  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
  __ movl(counter, rbx);

  // Pick up the initial fp we should save
2768
  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
D
duke 已提交
2769 2770 2771 2772 2773 2774 2775

  // Now adjust the caller's stack to make up for the extra locals
  // but record the original sp so that we can save it in the skeletal interpreter
  // frame and the stack walking of interpreter_sender will get the unextended sp
  // value and not the "real" sp value.

  Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2776 2777 2778
  __ movptr(sp_temp, rsp);
  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
  __ subptr(rsp, rbx);
D
duke 已提交
2779 2780 2781 2782

  // Push interpreter frames in a loop
  Label loop;
  __ bind(loop);
2783
  __ movptr(rbx, Address(rsi, 0));      // Load frame size
D
duke 已提交
2784
#ifdef CC_INTERP
2785
  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
D
duke 已提交
2786
#ifdef ASSERT
2787 2788
  __ push(0xDEADDEAD);                  // Make a recognizable pattern
  __ push(0xDEADDEAD);                  // (parm to RecursiveInterpreter...)
D
duke 已提交
2789
#else /* ASSERT */
2790
  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
D
duke 已提交
2791 2792
#endif /* ASSERT */
#else /* CC_INTERP */
2793
  __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
D
duke 已提交
2794
#endif /* CC_INTERP */
2795
  __ pushptr(Address(rcx, 0));          // save return address
D
duke 已提交
2796
  __ enter();                           // save old & set new rbp,
2797 2798
  __ subptr(rsp, rbx);                  // Prolog!
  __ movptr(rbx, sp_temp);              // sender's sp
D
duke 已提交
2799
#ifdef CC_INTERP
2800
  __ movptr(Address(rbp,
D
duke 已提交
2801 2802 2803 2804
                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
          rbx); // Make it walkable
#else /* CC_INTERP */
  // This value is corrected by layout_activation_impl
2805
  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2806
  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
D
duke 已提交
2807
#endif /* CC_INTERP */
2808 2809 2810 2811
  __ movptr(sp_temp, rsp);              // pass to next frame
  __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
  __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
  __ decrementl(counter);             // decrement counter
D
duke 已提交
2812
  __ jcc(Assembler::notZero, loop);
2813
  __ pushptr(Address(rcx, 0));            // save final return address
D
duke 已提交
2814 2815 2816

  // Re-push self-frame
  __ enter();                           // save old & set new rbp,
2817
  __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
D
duke 已提交
2818 2819 2820 2821 2822 2823 2824 2825 2826


  // set last_Java_sp, last_Java_fp
  __ get_thread(rdi);
  __ set_last_Java_frame(rdi, noreg, rbp, NULL);

  // Call C code.  Need thread but NOT official VM entry
  // crud.  We cannot block on this call, no GC can happen.  Call should
  // restore return values to their stack-slots with the new SP.
2827
  __ movptr(Address(rsp,arg0_off*wordSize),rdi);
D
duke 已提交
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
  __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
  // Set an oopmap for the call site
  oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );

  __ get_thread(rdi);
  __ reset_last_Java_frame(rdi, true, false);

  // Pop self-frame.
  __ leave();     // Epilog!

  // Jump to interpreter
  __ ret(0);

  // -------------
  // make sure all code is generated
  masm->flush();

   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
}
#endif // COMPILER2

//------------------------------generate_handler_blob------
//
// Generate a special Compile2Runtime blob that saves all registers,
// setup oopmap, and calls safepoint code to stop the compiled code for
// a safepoint.
//
static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {

  // Account for thread arg in our frame
  const int additional_words = 1;
  int frame_size_in_words;

  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");

  ResourceMark rm;
  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map;

  // allocate space for the code
  // setup code generation tools
  CodeBuffer   buffer("handler_blob", 1024, 512);
  MacroAssembler* masm = new MacroAssembler(&buffer);

  const Register java_thread = rdi; // callee-saved for VC++
  address start   = __ pc();
  address call_pc = NULL;

  // If cause_return is true we are at a poll_return and there is
  // the return address on the stack to the caller on the nmethod
  // that is safepoint. We can leave this return on the stack and
  // effectively complete the return and safepoint in the caller.
  // Otherwise we push space for a return address that the safepoint
  // handler will install later to make the stack walking sensible.
  if( !cause_return )
2884
    __ push(rbx);                // Make room for return address (or push it again)
D
duke 已提交
2885 2886 2887 2888 2889 2890 2891 2892 2893

  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);

  // The following is basically a call_VM. However, we need the precise
  // address of the call in order to generate an oopmap. Hence, we do all the
  // work ourselves.

  // Push thread argument and setup last_Java_sp
  __ get_thread(java_thread);
2894
  __ push(java_thread);
D
duke 已提交
2895 2896 2897 2898
  __ set_last_Java_frame(java_thread, noreg, noreg, NULL);

  // if this was not a poll_return then we need to correct the return address now.
  if( !cause_return ) {
2899 2900
    __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
    __ movptr(Address(rbp, wordSize), rax);
D
duke 已提交
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
  }

  // do the call
  __ call(RuntimeAddress(call_ptr));

  // Set an oopmap for the call site.  This oopmap will map all
  // oop-registers and debug-info registers as callee-saved.  This
  // will allow deoptimization at this safepoint to find all possible
  // debug-info recordings, as well as let GC find all oops.

  oop_maps->add_gc_map( __ pc() - start, map);

  // Discard arg
2914
  __ pop(rcx);
D
duke 已提交
2915 2916 2917 2918 2919 2920 2921

  Label noException;

  // Clear last_Java_sp again
  __ get_thread(java_thread);
  __ reset_last_Java_frame(java_thread, false, false);

2922
  __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
D
duke 已提交
2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
  __ jcc(Assembler::equal, noException);

  // Exception pending

  RegisterSaver::restore_live_registers(masm);

  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));

  __ bind(noException);

  // Normal exit, register restoring and exit
  RegisterSaver::restore_live_registers(masm);

  __ ret(0);

  // make sure all code is generated
  masm->flush();

  // Fill-out other meta info
  return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
}

//
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
//
// Generate a stub that calls into vm to find out the proper destination
// of a java call. All the argument registers are live at this point
// but since this is generic code we don't know what they are and the caller
// must do any gc of the args.
//
static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");

  // allocate space for the code
  ResourceMark rm;

  CodeBuffer buffer(name, 1000, 512);
  MacroAssembler* masm                = new MacroAssembler(&buffer);

  int frame_size_words;
  enum frame_layout {
                thread_off,
                extra_words };

  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map = NULL;

  int start = __ offset();

  map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);

  int frame_complete = __ offset();

  const Register thread = rdi;
  __ get_thread(rdi);

2979
  __ push(thread);
D
duke 已提交
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
  __ set_last_Java_frame(thread, noreg, rbp, NULL);

  __ call(RuntimeAddress(destination));


  // Set an oopmap for the call site.
  // We need this not only for callee-saved registers, but also for volatile
  // registers that the compiler might be keeping live across a safepoint.

  oop_maps->add_gc_map( __ offset() - start, map);

  // rax, contains the address we are going to jump to assuming no exception got installed

2993
  __ addptr(rsp, wordSize);
D
duke 已提交
2994 2995 2996 2997 2998

  // clear last_Java_sp
  __ reset_last_Java_frame(thread, true, false);
  // check for pending exceptions
  Label pending;
2999
  __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
D
duke 已提交
3000 3001 3002
  __ jcc(Assembler::notEqual, pending);

  // get the returned methodOop
3003 3004
  __ movptr(rbx, Address(thread, JavaThread::vm_result_offset()));
  __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
D
duke 已提交
3005

3006
  __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
D
duke 已提交
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022

  RegisterSaver::restore_live_registers(masm);

  // We are back the the original state on entry and ready to go.

  __ jmp(rax);

  // Pending exception after the safepoint

  __ bind(pending);

  RegisterSaver::restore_live_registers(masm);

  // exception pending => remove activation and forward to exception handler

  __ get_thread(thread);
3023
  __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3024
  __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
D
duke 已提交
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));

  // -------------
  // make sure all code is generated
  masm->flush();

  // return the  blob
  // frame_size_words or bytes??
  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}

void SharedRuntime::generate_stubs() {

  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
                                        "wrong_method_stub");

  _ic_miss_blob      = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
                                        "ic_miss_stub");

  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
                                        "resolve_opt_virtual_call");

  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
                                        "resolve_virtual_call");

  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
                                        "resolve_static_call");

  _polling_page_safepoint_handler_blob =
    generate_handler_blob(CAST_FROM_FN_PTR(address,
                   SafepointSynchronize::handle_polling_page_exception), false);

  _polling_page_return_handler_blob =
    generate_handler_blob(CAST_FROM_FN_PTR(address,
                   SafepointSynchronize::handle_polling_page_exception), true);

  generate_deopt_blob();
#ifdef COMPILER2
  generate_uncommon_trap_blob();
#endif // COMPILER2
}