sharedRuntime_sparc.cpp 155.7 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
32
#include "oops/compiledICHolder.hpp"
33 34 35 36 37 38 39 40 41 42 43 44 45 46
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_sparc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#ifdef SHARK
#include "compiler/compileBroker.hpp"
#include "shark/sharkCompiler.hpp"
#endif
D
duke 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

#define __ masm->


class RegisterSaver {

  // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
  // The Oregs are problematic. In the 32bit build the compiler can
  // have O registers live with 64 bit quantities. A window save will
  // cut the heads off of the registers. We have to do a very extensive
  // stack dance to save and restore these properly.

  // Note that the Oregs problem only exists if we block at either a polling
  // page exception a compiled code safepoint that was not originally a call
  // or deoptimize following one of these kinds of safepoints.

  // Lots of registers to save.  For all builds, a window save will preserve
  // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
  // builds a window-save will preserve the %o registers.  In the LION build
  // we need to save the 64-bit %o registers which requires we save them
  // before the window-save (as then they become %i registers and get their
  // heads chopped off on interrupt).  We have to save some %g registers here
  // as well.
  enum {
    // This frame's save area.  Includes extra space for the native call:
    // vararg's layout space and the like.  Briefly holds the caller's
    // register save area.
    call_args_area = frame::register_save_words_sp_offset +
                     frame::memory_parameter_word_sp_offset*wordSize,
    // Make sure save locations are always 8 byte aligned.
    // can't use round_to because it doesn't produce compile time constant
    start_of_extra_save_area = ((call_args_area + 7) & ~7),
    g1_offset = start_of_extra_save_area, // g-regs needing saving
    g3_offset = g1_offset+8,
    g4_offset = g3_offset+8,
    g5_offset = g4_offset+8,
    o0_offset = g5_offset+8,
    o1_offset = o0_offset+8,
    o2_offset = o1_offset+8,
    o3_offset = o2_offset+8,
    o4_offset = o3_offset+8,
    o5_offset = o4_offset+8,
    start_of_flags_save_area = o5_offset+8,
    ccr_offset = start_of_flags_save_area,
    fsr_offset = ccr_offset + 8,
    d00_offset = fsr_offset+8,  // Start of float save area
    register_save_size = d00_offset+8*32
  };


  public:

  static int Oexception_offset() { return o0_offset; };
  static int G3_offset() { return g3_offset; };
  static int G5_offset() { return g5_offset; };
  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  static void restore_live_registers(MacroAssembler* masm);

  // During deoptimization only the result register need to be restored
  // all the other values have already been extracted.

  static void restore_result_registers(MacroAssembler* masm);
};

OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
  // Record volatile registers as callee-save values in an OopMap so their save locations will be
  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
  int i;
118
  // Always make the frame size 16 byte aligned.
D
duke 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
  int frame_size = round_to(additional_frame_words + register_save_size, 16);
  // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
  int frame_size_in_slots = frame_size / sizeof(jint);
  // CodeBlob frame size is in words.
  *total_frame_words = frame_size / wordSize;
  // OopMap* map = new OopMap(*total_frame_words, 0);
  OopMap* map = new OopMap(frame_size_in_slots, 0);

#if !defined(_LP64)

  // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */

  __ save(SP, -frame_size, SP);

#ifndef _LP64
  // Reload the 64 bit Oregs. Although they are now Iregs we load them
  // to Oregs here to avoid interrupts cutting off their heads

  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);

  __ stx(O0, SP, o0_offset+STACK_BIAS);
  map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());

  __ stx(O1, SP, o1_offset+STACK_BIAS);

  map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());

  __ stx(O2, SP, o2_offset+STACK_BIAS);
  map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());

  __ stx(O3, SP, o3_offset+STACK_BIAS);
  map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());

  __ stx(O4, SP, o4_offset+STACK_BIAS);
  map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());

  __ stx(O5, SP, o5_offset+STACK_BIAS);
  map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
#endif /* _LP64 */

171 172 173 174 175 176

#ifdef _LP64
  int debug_offset = 0;
#else
  int debug_offset = 4;
#endif
D
duke 已提交
177 178
  // Save the G's
  __ stx(G1, SP, g1_offset+STACK_BIAS);
179
  map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
D
duke 已提交
180 181

  __ stx(G3, SP, g3_offset+STACK_BIAS);
182
  map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
D
duke 已提交
183 184

  __ stx(G4, SP, g4_offset+STACK_BIAS);
185
  map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
D
duke 已提交
186 187

  __ stx(G5, SP, g5_offset+STACK_BIAS);
188
  map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
D
duke 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202

  // This is really a waste but we'll keep things as they were for now
  if (true) {
#ifndef _LP64
    map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
    map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
203
#endif /* _LP64 */
D
duke 已提交
204 205 206 207 208 209 210 211
  }


  // Save the flags
  __ rdccr( G5 );
  __ stx(G5, SP, ccr_offset+STACK_BIAS);
  __ stxfsr(SP, fsr_offset+STACK_BIAS);

212
  // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
D
duke 已提交
213
  int offset = d00_offset;
214
  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
D
duke 已提交
215 216
    FloatRegister f = as_FloatRegister(i);
    __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
217
    // Record as callee saved both halves of double registers (2 float registers).
D
duke 已提交
218
    map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
219
    map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
D
duke 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233
    offset += sizeof(double);
  }

  // And we're done.

  return map;
}


// Pop the current frame and restore all the registers that we
// saved.
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {

  // Restore all the FP registers
234
  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
D
duke 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
    __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
  }

  __ ldx(SP, ccr_offset+STACK_BIAS, G1);
  __ wrccr (G1) ;

  // Restore the G's
  // Note that G2 (AKA GThread) must be saved and restored separately.
  // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.

  __ ldx(SP, g1_offset+STACK_BIAS, G1);
  __ ldx(SP, g3_offset+STACK_BIAS, G3);
  __ ldx(SP, g4_offset+STACK_BIAS, G4);
  __ ldx(SP, g5_offset+STACK_BIAS, G5);


#if !defined(_LP64)
  // Restore the 64-bit O's.
  __ ldx(SP, o0_offset+STACK_BIAS, O0);
  __ ldx(SP, o1_offset+STACK_BIAS, O1);
  __ ldx(SP, o2_offset+STACK_BIAS, O2);
  __ ldx(SP, o3_offset+STACK_BIAS, O3);
  __ ldx(SP, o4_offset+STACK_BIAS, O4);
  __ ldx(SP, o5_offset+STACK_BIAS, O5);

  // And temporarily place them in TLS

  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */

  // Restore flags

  __ ldxfsr(SP, fsr_offset+STACK_BIAS);

  __ restore();

#if !defined(_LP64)
  // Now reload the 64bit Oregs after we've restore the window.
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
#endif /* _LP64 */

}

// Pop the current frame and restore the registers that might be holding
// a result.
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {

#if !defined(_LP64)
  // 32bit build returns longs in G1
  __ ldx(SP, g1_offset+STACK_BIAS, G1);

  // Retrieve the 64-bit O's.
  __ ldx(SP, o0_offset+STACK_BIAS, O0);
  __ ldx(SP, o1_offset+STACK_BIAS, O1);
  // and save to TLS
  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
#endif /* _LP64 */

  __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));

  __ restore();

#if !defined(_LP64)
  // Now reload the 64bit Oregs after we've restore the window.
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
#endif /* _LP64 */

}

// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
// the following value.
static int reg2offset(VMReg r) {
  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}

324 325 326 327 328 329 330 331 332 333
static VMRegPair reg64_to_VMRegPair(Register r) {
  VMRegPair ret;
  if (wordSize == 8) {
    ret.set2(r->as_VMReg());
  } else {
    ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
  }
  return ret;
}

D
duke 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
// ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the
// arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
// quantities.  Values less than VMRegImpl::stack0 are registers, those above
// refer to 4-byte stack slots.  All stack slots are based off of the window
// top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
// values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
// integer registers.  Values 64-95 are the (32-bit only) float registers.
// Each 32-bit quantity is given its own number, so the integer registers
// (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
// an O0-low and an O0-high.  Essentially, all int register numbers are doubled.

// Register results are passed in O0-O5, for outgoing call arguments.  To
// convert to incoming arguments, convert all O's to I's.  The regs array
// refer to the low and hi 32-bit words of 64-bit registers or stack slots.
// If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
// 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
// passed (used as a placeholder for the other half of longs and doubles in
// the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
// regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
// Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
// == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
// same VMRegPair.

// Note: the INPUTS in sig_bt are in units of Java argument words, which are
// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
// units regardless of build.


// ---------------------------------------------------------------------------
// The compiled Java calling convention.  The Java convention always passes
// 64-bit values in adjacent aligned locations (either registers or stack),
// floats in float registers and doubles in aligned float pairs.  Values are
// packed in the registers.  There is no backing varargs store for values in
// registers.  In the 32-bit build, longs are passed in G1 and G4 (cannot be
// passed in I's, because longs in I's get their heads chopped off at
// interrupt).
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                           VMRegPair *regs,
                                           int total_args_passed,
                                           int is_outgoing) {
  assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");

  // Convention is to pack the first 6 int/oop args into the first 6 registers
  // (I0-I5), extras spill to the stack.  Then pack the first 8 float args
  // into F0-F7, extras spill to the stack.  Then pad all register sets to
  // align.  Then put longs and doubles into the same registers as they fit,
  // else spill to the stack.
  const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
  const int flt_reg_max = 8;
  //
  // Where 32-bit 1-reg longs start being passed
  // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
  // So make it look like we've filled all the G regs that c2 wants to use.
  Register g_reg = TieredCompilation ? noreg : G1;

  // Count int/oop and float args.  See how many stack slots we'll need and
  // where the longs & doubles will go.
  int int_reg_cnt   = 0;
  int flt_reg_cnt   = 0;
  // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
  // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
  int stk_reg_pairs = 0;
  for (int i = 0; i < total_args_passed; i++) {
    switch (sig_bt[i]) {
    case T_LONG:                // LP64, longs compete with int args
      assert(sig_bt[i+1] == T_VOID, "");
#ifdef _LP64
403
      if (int_reg_cnt < int_reg_max)  int_reg_cnt++;
D
duke 已提交
404 405 406 407 408
#endif
      break;
    case T_OBJECT:
    case T_ARRAY:
    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
409
      if (int_reg_cnt < int_reg_max)  int_reg_cnt++;
D
duke 已提交
410 411 412 413 414 415 416 417 418
#ifndef _LP64
      else                            stk_reg_pairs++;
#endif
      break;
    case T_INT:
    case T_SHORT:
    case T_CHAR:
    case T_BYTE:
    case T_BOOLEAN:
419
      if (int_reg_cnt < int_reg_max)  int_reg_cnt++;
D
duke 已提交
420 421 422
      else                            stk_reg_pairs++;
      break;
    case T_FLOAT:
423
      if (flt_reg_cnt < flt_reg_max)  flt_reg_cnt++;
D
duke 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
      else                            stk_reg_pairs++;
      break;
    case T_DOUBLE:
      assert(sig_bt[i+1] == T_VOID, "");
      break;
    case T_VOID:
      break;
    default:
      ShouldNotReachHere();
    }
  }

  // This is where the longs/doubles start on the stack.
  stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round

  int flt_reg_pairs = (flt_reg_cnt+1) & ~1;

  // int stk_reg = frame::register_save_words*(wordSize>>2);
  // int stk_reg = SharedRuntime::out_preserve_stack_slots();
  int stk_reg = 0;
  int int_reg = 0;
  int flt_reg = 0;

  // Now do the signature layout
  for (int i = 0; i < total_args_passed; i++) {
    switch (sig_bt[i]) {
    case T_INT:
    case T_SHORT:
    case T_CHAR:
    case T_BYTE:
    case T_BOOLEAN:
#ifndef _LP64
    case T_OBJECT:
    case T_ARRAY:
    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
#endif // _LP64
      if (int_reg < int_reg_max) {
        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
        regs[i].set1(r->as_VMReg());
      } else {
        regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
      }
      break;

#ifdef _LP64
    case T_OBJECT:
    case T_ARRAY:
    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
      if (int_reg < int_reg_max) {
        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
        regs[i].set2(r->as_VMReg());
      } else {
        regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
        stk_reg_pairs += 2;
      }
      break;
#endif // _LP64

    case T_LONG:
      assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
#ifdef _LP64
        if (int_reg < int_reg_max) {
          Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
          regs[i].set2(r->as_VMReg());
        } else {
          regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
          stk_reg_pairs += 2;
        }
#else
493
#ifdef COMPILER2
D
duke 已提交
494 495 496 497
        // For 32-bit build, can't pass longs in O-regs because they become
        // I-regs and get trashed.  Use G-regs instead.  G1 and G4 are almost
        // spare and available.  This convention isn't used by the Sparc ABI or
        // anywhere else. If we're tiered then we don't use G-regs because c1
498
        // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
D
duke 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
        // G0: zero
        // G1: 1st Long arg
        // G2: global allocated to TLS
        // G3: used in inline cache check
        // G4: 2nd Long arg
        // G5: used in inline cache check
        // G6: used by OS
        // G7: used by OS

        if (g_reg == G1) {
          regs[i].set2(G1->as_VMReg()); // This long arg in G1
          g_reg = G4;                  // Where the next arg goes
        } else if (g_reg == G4) {
          regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
          g_reg = noreg;               // No more longs in registers
        } else {
          regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
          stk_reg_pairs += 2;
        }
#else // COMPILER2
          regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
          stk_reg_pairs += 2;
#endif // COMPILER2
522
#endif // _LP64
D
duke 已提交
523 524 525 526
      break;

    case T_FLOAT:
      if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
527
      else                       regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
D
duke 已提交
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
      break;
    case T_DOUBLE:
      assert(sig_bt[i+1] == T_VOID, "expecting half");
      if (flt_reg_pairs + 1 < flt_reg_max) {
        regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
        flt_reg_pairs += 2;
      } else {
        regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
        stk_reg_pairs += 2;
      }
      break;
    case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
    default:
      ShouldNotReachHere();
    }
  }

  // retun the amount of stack space these arguments will need.
  return stk_reg_pairs;

}

550 551
// Helper class mostly to avoid passing masm everywhere, and handle
// store displacement overflow logic.
D
duke 已提交
552 553 554 555 556 557 558 559
class AdapterGenerator {
  MacroAssembler *masm;
  Register Rdisp;
  void set_Rdisp(Register r)  { Rdisp = r; }

  void patch_callers_callsite();

  // base+st_off points to top of argument
560
  int arg_offset(const int st_off) { return st_off; }
D
duke 已提交
561
  int next_arg_offset(const int st_off) {
562
    return st_off - Interpreter::stackElementSize;
563 564 565 566 567 568 569
  }

  // Argument slot values may be loaded first into a register because
  // they might not fit into displacement.
  RegisterOrConstant arg_slot(const int st_off);
  RegisterOrConstant next_arg_slot(const int st_off);

D
duke 已提交
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
  // Stores long into offset pointed to by base
  void store_c2i_long(Register r, Register base,
                      const int st_off, bool is_stack);
  void store_c2i_object(Register r, Register base,
                        const int st_off);
  void store_c2i_int(Register r, Register base,
                     const int st_off);
  void store_c2i_double(VMReg r_2,
                        VMReg r_1, Register base, const int st_off);
  void store_c2i_float(FloatRegister f, Register base,
                       const int st_off);

 public:
  void gen_c2i_adapter(int total_args_passed,
                              // VMReg max_arg,
                              int comp_args_on_stack, // VMRegStackSlots
                              const BasicType *sig_bt,
                              const VMRegPair *regs,
                              Label& skip_fixup);
  void gen_i2c_adapter(int total_args_passed,
                              // VMReg max_arg,
                              int comp_args_on_stack, // VMRegStackSlots
                              const BasicType *sig_bt,
                              const VMRegPair *regs);

  AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
};


// Patch the callers callsite with entry to compiled code if it exists.
void AdapterGenerator::patch_callers_callsite() {
  Label L;
602
  __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
K
kvn 已提交
603
  __ br_null(G3_scratch, false, Assembler::pt, L);
D
duke 已提交
604
  // Schedule the branch target address early.
605
  __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
D
duke 已提交
606 607 608 609 610 611 612 613
  // Call into the VM to patch the caller, then jump to compiled callee
  __ save_frame(4);     // Args in compiled layout; do not blow them

  // Must save all the live Gregs the list is:
  // G1: 1st Long arg (32bit build)
  // G2: global allocated to TLS
  // G3: used in inline cache check (scratch)
  // G4: 2nd Long arg (32bit build);
614
  // G5: used in inline cache check (Method*)
D
duke 已提交
615 616 617 618 619 620 621 622 623 624 625 626

  // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.

#ifdef _LP64
  // mov(s,d)
  __ mov(G1, L1);
  __ mov(G4, L4);
  __ mov(G5_method, L5);
  __ mov(G5_method, O0);         // VM needs target method
  __ mov(I7, O1);                // VM needs caller's callsite
  // Must be a leaf call...
  // can be very far once the blob has been relocated
627
  AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
D
duke 已提交
628
  __ relocate(relocInfo::runtime_call_type);
629
  __ jumpl_to(dest, O7, O7);
D
duke 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
  __ delayed()->mov(G2_thread, L7_thread_cache);
  __ mov(L7_thread_cache, G2_thread);
  __ mov(L1, G1);
  __ mov(L4, G4);
  __ mov(L5, G5_method);
#else
  __ stx(G1, FP, -8 + STACK_BIAS);
  __ stx(G4, FP, -16 + STACK_BIAS);
  __ mov(G5_method, L5);
  __ mov(G5_method, O0);         // VM needs target method
  __ mov(I7, O1);                // VM needs caller's callsite
  // Must be a leaf call...
  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
  __ delayed()->mov(G2_thread, L7_thread_cache);
  __ mov(L7_thread_cache, G2_thread);
  __ ldx(FP, -8 + STACK_BIAS, G1);
  __ ldx(FP, -16 + STACK_BIAS, G4);
  __ mov(L5, G5_method);
648
  __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
D
duke 已提交
649 650 651 652 653 654
#endif /* _LP64 */

  __ restore();      // Restore args
  __ bind(L);
}

655 656 657 658

RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
  RegisterOrConstant roc(arg_offset(st_off));
  return __ ensure_simm13_or_reg(roc, Rdisp);
D
duke 已提交
659 660
}

661 662 663 664 665 666
RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
  RegisterOrConstant roc(next_arg_offset(st_off));
  return __ ensure_simm13_or_reg(roc, Rdisp);
}


D
duke 已提交
667 668 669 670 671 672 673 674
// Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base,
                                      const int st_off, bool is_stack) {
#ifdef _LP64
  // In V9, longs are given 2 64-bit slots in the interpreter, but the
  // data is passed in only 1 slot.
  __ stx(r, base, next_arg_slot(st_off));
#else
675
#ifdef COMPILER2
D
duke 已提交
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
  // Misaligned store of 64-bit data
  __ stw(r, base, arg_slot(st_off));    // lo bits
  __ srlx(r, 32, r);
  __ stw(r, base, next_arg_slot(st_off));  // hi bits
#else
  if (is_stack) {
    // Misaligned store of 64-bit data
    __ stw(r, base, arg_slot(st_off));    // lo bits
    __ srlx(r, 32, r);
    __ stw(r, base, next_arg_slot(st_off));  // hi bits
  } else {
    __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
    __ stw(r             , base, next_arg_slot(st_off)); // hi bits
  }
#endif // COMPILER2
691
#endif // _LP64
D
duke 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
}

void AdapterGenerator::store_c2i_object(Register r, Register base,
                      const int st_off) {
  __ st_ptr (r, base, arg_slot(st_off));
}

void AdapterGenerator::store_c2i_int(Register r, Register base,
                   const int st_off) {
  __ st (r, base, arg_slot(st_off));
}

// Stores into offset pointed to by base
void AdapterGenerator::store_c2i_double(VMReg r_2,
                      VMReg r_1, Register base, const int st_off) {
#ifdef _LP64
  // In V9, doubles are given 2 64-bit slots in the interpreter, but the
  // data is passed in only 1 slot.
  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
#else
  // Need to marshal 64-bit value from misaligned Lesp loads
  __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
  __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
#endif
}

void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
                                       const int st_off) {
  __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
}

void AdapterGenerator::gen_c2i_adapter(
                            int total_args_passed,
                            // VMReg max_arg,
                            int comp_args_on_stack, // VMRegStackSlots
                            const BasicType *sig_bt,
                            const VMRegPair *regs,
                            Label& skip_fixup) {

  // Before we get into the guts of the C2I adapter, see if we should be here
  // at all.  We've come from compiled code and are attempting to jump to the
  // interpreter, which means the caller made a static call to get here
  // (vcalls always get a compiled target if there is one).  Check for a
  // compiled target.  If there is one, we need to patch the caller's call.
  // However we will run interpreted if we come thru here. The next pass
  // thru the call site will run compiled. If we ran compiled here then
  // we can (theorectically) do endless i2c->c2i->i2c transitions during
  // deopt/uncommon trap cycles. If we always go interpreted here then
  // we can have at most one and don't need to play any tricks to keep
  // from endlessly growing the stack.
  //
  // Actually if we detected that we had an i2c->c2i transition here we
  // ought to be able to reset the world back to the state of the interpreted
  // call and not bother building another interpreter arg area. We don't
  // do that at this point.

  patch_callers_callsite();

  __ bind(skip_fixup);

  // Since all args are passed on the stack, total_args_passed*wordSize is the
  // space we need.  Add in varargs area needed by the interpreter. Round up
  // to stack alignment.
755
  const int arg_size = total_args_passed * Interpreter::stackElementSize;
D
duke 已提交
756 757 758 759 760 761
  const int varargs_area =
                 (frame::varargs_offset - frame::register_save_words)*wordSize;
  const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);

  int bias = STACK_BIAS;
  const int interp_arg_offset = frame::varargs_offset*wordSize +
762
                        (total_args_passed-1)*Interpreter::stackElementSize;
D
duke 已提交
763 764 765 766 767 768 769

  Register base = SP;

#ifdef _LP64
  // In the 64bit build because of wider slots and STACKBIAS we can run
  // out of bits in the displacement to do loads and stores.  Use g3 as
  // temporary displacement.
770
  if (!Assembler::is_simm13(extraspace)) {
D
duke 已提交
771 772 773 774 775 776 777 778 779 780 781 782
    __ set(extraspace, G3_scratch);
    __ sub(SP, G3_scratch, SP);
  } else {
    __ sub(SP, extraspace, SP);
  }
  set_Rdisp(G3_scratch);
#else
  __ sub(SP, extraspace, SP);
#endif // _LP64

  // First write G1 (if used) to where ever it must go
  for (int i=0; i<total_args_passed; i++) {
783
    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
D
duke 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
    VMReg r_1 = regs[i].first();
    VMReg r_2 = regs[i].second();
    if (r_1 == G1_scratch->as_VMReg()) {
      if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
        store_c2i_object(G1_scratch, base, st_off);
      } else if (sig_bt[i] == T_LONG) {
        assert(!TieredCompilation, "should not use register args for longs");
        store_c2i_long(G1_scratch, base, st_off, false);
      } else {
        store_c2i_int(G1_scratch, base, st_off);
      }
    }
  }

  // Now write the args into the outgoing interpreter space
  for (int i=0; i<total_args_passed; i++) {
800
    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
D
duke 已提交
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
    VMReg r_1 = regs[i].first();
    VMReg r_2 = regs[i].second();
    if (!r_1->is_valid()) {
      assert(!r_2->is_valid(), "");
      continue;
    }
    // Skip G1 if found as we did it first in order to free it up
    if (r_1 == G1_scratch->as_VMReg()) {
      continue;
    }
#ifdef ASSERT
    bool G1_forced = false;
#endif // ASSERT
    if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
#ifdef _LP64
      Register ld_off = Rdisp;
      __ set(reg2offset(r_1) + extraspace + bias, ld_off);
#else
      int ld_off = reg2offset(r_1) + extraspace + bias;
820
#endif // _LP64
D
duke 已提交
821 822 823 824 825 826 827 828 829 830 831 832 833
#ifdef ASSERT
      G1_forced = true;
#endif // ASSERT
      r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
      if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
      else                  __ ldx(base, ld_off, G1_scratch);
    }

    if (r_1->is_Register()) {
      Register r = r_1->as_Register()->after_restore();
      if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
        store_c2i_object(r, base, st_off);
      } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
834
#ifndef _LP64
D
duke 已提交
835 836 837
        if (TieredCompilation) {
          assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
        }
838
#endif // _LP64
D
duke 已提交
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
        store_c2i_long(r, base, st_off, r_2->is_stack());
      } else {
        store_c2i_int(r, base, st_off);
      }
    } else {
      assert(r_1->is_FloatRegister(), "");
      if (sig_bt[i] == T_FLOAT) {
        store_c2i_float(r_1->as_FloatRegister(), base, st_off);
      } else {
        assert(sig_bt[i] == T_DOUBLE, "wrong type");
        store_c2i_double(r_2, r_1, base, st_off);
      }
    }
  }

#ifdef _LP64
  // Need to reload G3_scratch, used for temporary displacements.
856
  __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
D
duke 已提交
857 858 859 860 861 862 863 864 865 866 867 868

  // Pass O5_savedSP as an argument to the interpreter.
  // The interpreter will restore SP to this value before returning.
  __ set(extraspace, G1);
  __ add(SP, G1, O5_savedSP);
#else
  // Pass O5_savedSP as an argument to the interpreter.
  // The interpreter will restore SP to this value before returning.
  __ add(SP, extraspace, O5_savedSP);
#endif // _LP64

  __ mov((frame::varargs_offset)*wordSize -
869
         1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
D
duke 已提交
870 871 872 873 874 875 876 877 878
  // Jump to the interpreter just as if interpreter was doing it.
  __ jmpl(G3_scratch, 0, G0);
  // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
  // (really L0) is in use by the compiled frame as a generic temp.  However,
  // the interpreter does not know where its args are without some kind of
  // arg pointer being passed in.  Pass it in Gargs.
  __ delayed()->add(SP, G1, Gargs);
}

879 880 881 882 883 884 885 886 887 888 889 890 891 892
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
                        address code_start, address code_end,
                        Label& L_ok) {
  Label L_fail;
  __ set(ExternalAddress(code_start), temp_reg);
  __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
  __ cmp(pc_reg, temp_reg);
  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
  __ delayed()->add(temp_reg, temp2_reg, temp_reg);
  __ cmp(pc_reg, temp_reg);
  __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
  __ bind(L_fail);
}

D
duke 已提交
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
void AdapterGenerator::gen_i2c_adapter(
                            int total_args_passed,
                            // VMReg max_arg,
                            int comp_args_on_stack, // VMRegStackSlots
                            const BasicType *sig_bt,
                            const VMRegPair *regs) {

  // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
  // layout.  Lesp was saved by the calling I-frame and will be restored on
  // return.  Meanwhile, outgoing arg space is all owned by the callee
  // C-frame, so we can mangle it at will.  After adjusting the frame size,
  // hoist register arguments and repack other args according to the compiled
  // code convention.  Finally, end in a jump to the compiled code.  The entry
  // point address is the start of the buffer.

  // We will only enter here from an interpreted frame and never from after
  // passing thru a c2i. Azul allowed this but we do not. If we lose the
  // race and use a c2i we will remain interpreted for the race loser(s).
  // This removes all sorts of headaches on the x86 side and also eliminates
  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
  // More detail:
  // Adapters can be frameless because they do not require the caller
  // to perform additional cleanup work, such as correcting the stack pointer.
  // An i2c adapter is frameless because the *caller* frame, which is interpreted,
  // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
  // even if a callee has modified the stack pointer.
  // A c2i adapter is frameless because the *callee* frame, which is interpreted,
  // routinely repairs its caller's stack pointer (from sender_sp, which is set
  // up via the senderSP register).
  // In other words, if *either* the caller or callee is interpreted, we can
  // get the stack pointer repaired after a call.
  // This is why c2i and i2c adapters cannot be indefinitely composed.
  // In particular, if a c2i adapter were to somehow call an i2c adapter,
  // both caller and callee would be compiled methods, and neither would
  // clean up the stack pointer changes performed by the two adapters.
  // If this happens, control eventually transfers back to the compiled
  // caller, but with an uncorrected stack, causing delayed havoc.

  if (VerifyAdapterCalls &&
      (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
    // So, let's test for cascading c2i/i2c adapters right now.
    //  assert(Interpreter::contains($return_addr) ||
    //         StubRoutines::contains($return_addr),
    //         "i2c adapter must return to an interpreter frame");
    __ block_comment("verify_i2c { ");
    Label L_ok;
    if (Interpreter::code() != NULL)
      range_check(masm, O7, O0, O1,
                  Interpreter::code()->code_start(), Interpreter::code()->code_end(),
                  L_ok);
    if (StubRoutines::code1() != NULL)
      range_check(masm, O7, O0, O1,
                  StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
                  L_ok);
    if (StubRoutines::code2() != NULL)
      range_check(masm, O7, O0, O1,
                  StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
                  L_ok);
    const char* msg = "i2c adapter must return to an interpreter frame";
    __ block_comment(msg);
    __ stop(msg);
    __ bind(L_ok);
    __ block_comment("} verify_i2ce ");
  }

D
duke 已提交
959 960 961 962 963 964
  // As you can see from the list of inputs & outputs there are not a lot
  // of temp registers to work with: mostly G1, G3 & G4.

  // Inputs:
  // G2_thread      - TLS
  // G5_method      - Method oop
965 966 967
  // G4 (Gargs)     - Pointer to interpreter's args
  // O0..O4         - free for scratch
  // O5_savedSP     - Caller's saved SP, to be restored if needed
D
duke 已提交
968 969
  // O6             - Current SP!
  // O7             - Valid return address
970
  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
D
duke 已提交
971 972 973 974 975 976 977

  // Outputs:
  // G2_thread      - TLS
  // G1, G4         - Outgoing long args in 32-bit build
  // O0-O5          - Outgoing args in compiled layout
  // O6             - Adjusted or restored SP
  // O7             - Valid return address
978
  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
D
duke 已提交
979 980 981
  // F0-F7          - more outgoing args


982
  // Gargs is the incoming argument base, and also an outgoing argument.
D
duke 已提交
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
  __ sub(Gargs, BytesPerWord, Gargs);

  // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
  // WITH O7 HOLDING A VALID RETURN PC
  //
  // |              |
  // :  java stack  :
  // |              |
  // +--------------+ <--- start of outgoing args
  // |   receiver   |   |
  // : rest of args :   |---size is java-arg-words
  // |              |   |
  // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
  // |              |   |
  // :    unused    :   |---Space for max Java stack, plus stack alignment
  // |              |   |
  // +--------------+ <--- SP + 16*wordsize
  // |              |
  // :    window    :
  // |              |
  // +--------------+ <--- SP

  // WE REPACK THE STACK.  We use the common calling convention layout as
  // discovered by calling SharedRuntime::calling_convention.  We assume it
  // causes an arbitrary shuffle of memory, which may require some register
  // temps to do the shuffle.  We hope for (and optimize for) the case where
  // temps are not needed.  We may have to resize the stack slightly, in case
  // we need alignment padding (32-bit interpreter can pass longs & doubles
  // misaligned, but the compilers expect them aligned).
  //
  // |              |
  // :  java stack  :
  // |              |
  // +--------------+ <--- start of outgoing args
  // |  pad, align  |   |
  // +--------------+   |
  // | ints, floats |   |---Outgoing stack args, packed low.
  // +--------------+   |   First few args in registers.
  // :   doubles    :   |
  // |   longs      |   |
  // +--------------+ <--- SP' + 16*wordsize
  // |              |
  // :    window    :
  // |              |
  // +--------------+ <--- SP'

  // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
  // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
  // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.

  // Cut-out for having no stack args.  Since up to 6 args are passed
  // in registers, we will commonly have no stack args.
  if (comp_args_on_stack > 0) {

    // Convert VMReg stack slots to words.
    int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
    // Round up to miminum stack alignment, in wordSize
    comp_words_on_stack = round_to(comp_words_on_stack, 2);
    // Now compute the distance from Lesp to SP.  This calculation does not
    // include the space for total_args_passed because Lesp has not yet popped
    // the arguments.
    __ sub(SP, (comp_words_on_stack)*wordSize, SP);
  }

  // Will jump to the compiled code just as if compiled code was doing it.
  // Pre-load the register-jump target early, to schedule it better.
1049
  __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
D
duke 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065

  // Now generate the shuffle code.  Pick up all register args and move the
  // rest through G1_scratch.
  for (int i=0; i<total_args_passed; i++) {
    if (sig_bt[i] == T_VOID) {
      // Longs and doubles are passed in native word order, but misaligned
      // in the 32-bit build.
      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
      continue;
    }

    // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
    // 32-bit build and aligned in the 64-bit build.  Look for the obvious
    // ldx/lddf optimizations.

    // Load in argument order going down.
1066
    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
D
duke 已提交
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
    set_Rdisp(G1_scratch);

    VMReg r_1 = regs[i].first();
    VMReg r_2 = regs[i].second();
    if (!r_1->is_valid()) {
      assert(!r_2->is_valid(), "");
      continue;
    }
    if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
      r_1 = F8->as_VMReg();        // as part of the load/store shuffle
      if (r_2->is_valid()) r_2 = r_1->next();
    }
    if (r_1->is_Register()) {  // Register argument
      Register r = r_1->as_Register()->after_restore();
      if (!r_2->is_valid()) {
        __ ld(Gargs, arg_slot(ld_off), r);
      } else {
#ifdef _LP64
        // In V9, longs are given 2 64-bit slots in the interpreter, but the
        // data is passed in only 1 slot.
1087
        RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
D
duke 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
              next_arg_slot(ld_off) : arg_slot(ld_off);
        __ ldx(Gargs, slot, r);
#else
        // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
        // stack shuffle.  Load the first 2 longs into G1/G4 later.
#endif
      }
    } else {
      assert(r_1->is_FloatRegister(), "");
      if (!r_2->is_valid()) {
        __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
      } else {
#ifdef _LP64
        // In V9, doubles are given 2 64-bit slots in the interpreter, but the
        // data is passed in only 1 slot.  This code also handles longs that
        // are passed on the stack, but need a stack-to-stack move through a
        // spare float register.
1105
        RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
D
duke 已提交
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
              next_arg_slot(ld_off) : arg_slot(ld_off);
        __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
#else
        // Need to marshal 64-bit value from misaligned Lesp loads
        __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
        __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
#endif
      }
    }
    // Was the argument really intended to be on the stack, but was loaded
    // into F8/F9?
    if (regs[i].first()->is_stack()) {
      assert(r_1->as_FloatRegister() == F8, "fix this code");
      // Convert stack slot to an SP offset
      int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
      // Store down the shuffled stack word.  Target address _is_ aligned.
1122 1123 1124
      RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
      if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
      else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
D
duke 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
    }
  }
  bool made_space = false;
#ifndef _LP64
  // May need to pick up a few long args in G1/G4
  bool g4_crushed = false;
  bool g3_crushed = false;
  for (int i=0; i<total_args_passed; i++) {
    if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
      // Load in argument order going down
1135
      int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
D
duke 已提交
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
      // Need to marshal 64-bit value from misaligned Lesp loads
      Register r = regs[i].first()->as_Register()->after_restore();
      if (r == G1 || r == G4) {
        assert(!g4_crushed, "ordering problem");
        if (r == G4){
          g4_crushed = true;
          __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
          __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
        } else {
          // better schedule this way
          __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
          __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
        }
        g3_crushed = true;
        __ sllx(r, 32, r);
        __ or3(G3_scratch, r, r);
      } else {
        assert(r->is_out(), "longs passed in two O registers");
        __ ld  (Gargs, arg_slot(ld_off)     , r->successor()); // Load lo bits
        __ ld  (Gargs, next_arg_slot(ld_off), r);              // Load hi bits
      }
    }
  }
#endif

  // Jump to the compiled code just as if compiled code was doing it.
  //
#ifndef _LP64
    if (g3_crushed) {
      // Rats load was wasted, at least it is in cache...
1166
      __ ld_ptr(G5_method, Method::from_compiled_offset(), G3);
D
duke 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
    }
#endif /* _LP64 */

    // 6243940 We might end up in handle_wrong_method if
    // the callee is deoptimized as we race thru here. If that
    // happens we don't want to take a safepoint because the
    // caller frame will look interpreted and arguments are now
    // "compiled" so it is much better to make this transition
    // invisible to the stack walking code. Unfortunately if
    // we try and find the callee by normal means a safepoint
    // is possible. So we stash the desired callee in the thread
    // and the vm will find there should this case occur.
1179
    Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
D
duke 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188
    __ st_ptr(G5_method, callee_target_addr);

    if (StressNonEntrant) {
      // Open a big window for deopt failure
      __ save_frame(0);
      __ mov(G0, L0);
      Label loop;
      __ bind(loop);
      __ sub(L0, 1, L0);
K
kvn 已提交
1189
      __ br_null_short(L0, Assembler::pt, loop);
D
duke 已提交
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

      __ restore();
    }


    __ jmpl(G3, 0, G0);
    __ delayed()->nop();
}

// ---------------------------------------------------------------
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
                                                            int total_args_passed,
                                                            // VMReg max_arg,
                                                            int comp_args_on_stack, // VMRegStackSlots
                                                            const BasicType *sig_bt,
1205 1206
                                                            const VMRegPair *regs,
                                                            AdapterFingerPrint* fingerprint) {
D
duke 已提交
1207 1208 1209 1210 1211 1212 1213 1214
  address i2c_entry = __ pc();

  AdapterGenerator agen(masm);

  agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);


  // -------------------------------------------------------------------------
1215
  // Generate a C2I adapter.  On entry we know G5 holds the Method*.  The
D
duke 已提交
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
  // args start out packed in the compiled layout.  They need to be unpacked
  // into the interpreter layout.  This will almost always require some stack
  // space.  We grow the current (compiled) stack, then repack the args.  We
  // finally end in a jump to the generic interpreter entry point.  On exit
  // from the interpreter, the interpreter will restore our SP (lest the
  // compiled code, which relys solely on SP and not FP, get sick).

  address c2i_unverified_entry = __ pc();
  Label skip_fixup;
  {
#if !defined(_LP64) && defined(COMPILER2)
    Register R_temp   = L0;   // another scratch register
#else
    Register R_temp   = G1;   // another scratch register
#endif

1232
    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
D
duke 已提交
1233 1234

    __ verify_oop(O0);
1235
    __ load_klass(O0, G3_scratch);
D
duke 已提交
1236 1237 1238

#if !defined(_LP64) && defined(COMPILER2)
    __ save(SP, -frame::register_save_words*wordSize, SP);
1239
    __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
D
duke 已提交
1240 1241 1242
    __ cmp(G3_scratch, R_temp);
    __ restore();
#else
1243
    __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
D
duke 已提交
1244 1245 1246 1247 1248
    __ cmp(G3_scratch, R_temp);
#endif

    Label ok, ok2;
    __ brx(Assembler::equal, false, Assembler::pt, ok);
1249
    __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1250
    __ jump_to(ic_miss, G3_scratch);
D
duke 已提交
1251 1252 1253 1254 1255 1256
    __ delayed()->nop();

    __ bind(ok);
    // Method might have been compiled since the call site was patched to
    // interpreted if that is the case treat it as a miss so we can get
    // the call site corrected.
1257
    __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
D
duke 已提交
1258
    __ bind(ok2);
K
kvn 已提交
1259
    __ br_null(G3_scratch, false, Assembler::pt, skip_fixup);
1260
    __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
1261
    __ jump_to(ic_miss, G3_scratch);
D
duke 已提交
1262 1263 1264 1265 1266 1267 1268 1269 1270
    __ delayed()->nop();

  }

  address c2i_entry = __ pc();

  agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);

  __ flush();
1271
  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
D
duke 已提交
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342

}

// Helper function for native calling conventions
static VMReg int_stk_helper( int i ) {
  // Bias any stack based VMReg we get by ignoring the window area
  // but not the register parameter save area.
  //
  // This is strange for the following reasons. We'd normally expect
  // the calling convention to return an VMReg for a stack slot
  // completely ignoring any abi reserved area. C2 thinks of that
  // abi area as only out_preserve_stack_slots. This does not include
  // the area allocated by the C abi to store down integer arguments
  // because the java calling convention does not use it. So
  // since c2 assumes that there are only out_preserve_stack_slots
  // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
  // location the c calling convention must add in this bias amount
  // to make up for the fact that the out_preserve_stack_slots is
  // insufficient for C calls. What a mess. I sure hope those 6
  // stack words were worth it on every java call!

  // Another way of cleaning this up would be for out_preserve_stack_slots
  // to take a parameter to say whether it was C or java calling conventions.
  // Then things might look a little better (but not much).

  int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
  if( mem_parm_offset < 0 ) {
    return as_oRegister(i)->as_VMReg();
  } else {
    int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
    // Now return a biased offset that will be correct when out_preserve_slots is added back in
    return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
  }
}


int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                         VMRegPair *regs,
                                         int total_args_passed) {

    // Return the number of VMReg stack_slots needed for the args.
    // This value does not include an abi space (like register window
    // save area).

    // The native convention is V8 if !LP64
    // The LP64 convention is the V9 convention which is slightly more sane.

    // We return the amount of VMReg stack slots we need to reserve for all
    // the arguments NOT counting out_preserve_stack_slots. Since we always
    // have space for storing at least 6 registers to memory we start with that.
    // See int_stk_helper for a further discussion.
    int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();

#ifdef _LP64
    // V9 convention: All things "as-if" on double-wide stack slots.
    // Hoist any int/ptr/long's in the first 6 to int regs.
    // Hoist any flt/dbl's in the first 16 dbl regs.
    int j = 0;                  // Count of actual args, not HALVES
    for( int i=0; i<total_args_passed; i++, j++ ) {
      switch( sig_bt[i] ) {
      case T_BOOLEAN:
      case T_BYTE:
      case T_CHAR:
      case T_INT:
      case T_SHORT:
        regs[i].set1( int_stk_helper( j ) ); break;
      case T_LONG:
        assert( sig_bt[i+1] == T_VOID, "expecting half" );
      case T_ADDRESS: // raw pointers, like current thread, for VM calls
      case T_ARRAY:
      case T_OBJECT:
1343
      case T_METADATA:
D
duke 已提交
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
        regs[i].set2( int_stk_helper( j ) );
        break;
      case T_FLOAT:
        if ( j < 16 ) {
          // V9ism: floats go in ODD registers
          regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
        } else {
          // V9ism: floats go in ODD stack slot
          regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
        }
        break;
      case T_DOUBLE:
        assert( sig_bt[i+1] == T_VOID, "expecting half" );
        if ( j < 16 ) {
          // V9ism: doubles go in EVEN/ODD regs
          regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
        } else {
          // V9ism: doubles go in EVEN/ODD stack slots
          regs[i].set2(VMRegImpl::stack2reg(j<<1));
        }
        break;
      case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
      default:
        ShouldNotReachHere();
      }
      if (regs[i].first()->is_stack()) {
        int off =  regs[i].first()->reg2stack();
        if (off > max_stack_slots) max_stack_slots = off;
      }
      if (regs[i].second()->is_stack()) {
        int off =  regs[i].second()->reg2stack();
        if (off > max_stack_slots) max_stack_slots = off;
      }
    }

#else // _LP64
    // V8 convention: first 6 things in O-regs, rest on stack.
    // Alignment is willy-nilly.
    for( int i=0; i<total_args_passed; i++ ) {
      switch( sig_bt[i] ) {
      case T_ADDRESS: // raw pointers, like current thread, for VM calls
      case T_ARRAY:
      case T_BOOLEAN:
      case T_BYTE:
      case T_CHAR:
      case T_FLOAT:
      case T_INT:
      case T_OBJECT:
1392
      case T_METADATA:
D
duke 已提交
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
      case T_SHORT:
        regs[i].set1( int_stk_helper( i ) );
        break;
      case T_DOUBLE:
      case T_LONG:
        assert( sig_bt[i+1] == T_VOID, "expecting half" );
        regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
        break;
      case T_VOID: regs[i].set_bad(); break;
      default:
        ShouldNotReachHere();
      }
      if (regs[i].first()->is_stack()) {
        int off =  regs[i].first()->reg2stack();
        if (off > max_stack_slots) max_stack_slots = off;
      }
      if (regs[i].second()->is_stack()) {
        int off =  regs[i].second()->reg2stack();
        if (off > max_stack_slots) max_stack_slots = off;
      }
    }
#endif // _LP64

  return round_to(max_stack_slots + 1, 2);

}


// ---------------------------------------------------------------------------
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  switch (ret_type) {
  case T_FLOAT:
    __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
    break;
  case T_DOUBLE:
    __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
    break;
  }
}

void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  switch (ret_type) {
  case T_FLOAT:
    __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
    break;
  case T_DOUBLE:
    __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
    break;
  }
}

// Check and forward and pending exception.  Thread is stored in
// L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
// is no exception handler.  We merely pop this frame off and throw the
// exception in the caller's frame.
static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
  Label L;
  __ br_null(Rex_oop, false, Assembler::pt, L);
  __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
  // Since this is a native call, we *know* the proper exception handler
  // without calling into the VM: it's the empty function.  Just pop this
  // frame and then jump to forward_exception_entry; O7 will contain the
  // native caller's return PC.
1456 1457
 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
  __ jump_to(exception_entry, G3_scratch);
D
duke 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
  __ delayed()->restore();      // Pop this frame off.
  __ bind(L);
}

// A simple move of integer like type
static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      // stack to reg
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
    __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  } else {
    __ mov(src.first()->as_Register(), dst.first()->as_Register());
  }
}

// On 64 bit we will store integer like items to the stack as
// 64 bits items (sparc abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      // stack to reg
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  } else {
    __ mov(src.first()->as_Register(), dst.first()->as_Register());
  }
}


1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack
      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      // stack to reg
      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  } else {
    __ mov(src.first()->as_Register(), dst.first()->as_Register());
  }
}


D
duke 已提交
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
                        OopMap* map,
                        int oop_handle_offset,
                        int framesize_in_slots,
                        VMRegPair src,
                        VMRegPair dst,
                        bool is_receiver,
                        int* receiver_offset) {

  // must pass a handle. First figure out the location we use as a handle

  if (src.first()->is_stack()) {
    // Oop is already on the stack
    Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
    __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
    __ ld_ptr(rHandle, 0, L4);
#ifdef _LP64
    __ movr( Assembler::rc_z, L4, G0, rHandle );
#else
    __ tst( L4 );
    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif
    if (dst.first()->is_stack()) {
      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
    }
    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
    if (is_receiver) {
      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
    }
    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
  } else {
    // Oop is in an input register pass we must flush it to the stack
    const Register rOop = src.first()->as_Register();
    const Register rHandle = L5;
    int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
    int offset = oop_slot*VMRegImpl::stack_slot_size;
    Label skip;
    __ st_ptr(rOop, SP, offset + STACK_BIAS);
    if (is_receiver) {
      *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
    }
    map->set_oop(VMRegImpl::stack2reg(oop_slot));
    __ add(SP, offset + STACK_BIAS, rHandle);
#ifdef _LP64
    __ movr( Assembler::rc_z, rOop, G0, rHandle );
#else
    __ tst( rOop );
    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif

    if (dst.first()->is_stack()) {
      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      __ mov(rHandle, dst.first()->as_Register());
    }
  }
}

// A float arg may have to do float reg int reg conversion
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");

  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack the easiest of the bunch
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      // stack to reg
      if (dst.first()->is_Register()) {
        __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
      } else {
        __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
      }
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
    if (src.first()->is_Register()) {
      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
    } else {
      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
    }
  } else {
    // reg to reg
    if (src.first()->is_Register()) {
      if (dst.first()->is_Register()) {
        // gpr -> gpr
        __ mov(src.first()->as_Register(), dst.first()->as_Register());
      } else {
        // gpr -> fpr
        __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
      }
    } else if (dst.first()->is_Register()) {
      // fpr -> gpr
      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
      __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
    } else {
      // fpr -> fpr
      // In theory these overlap but the ordering is such that this is likely a nop
      if ( src.first() != dst.first()) {
        __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
      }
    }
  }
}

static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  VMRegPair src_lo(src.first());
  VMRegPair src_hi(src.second());
  VMRegPair dst_lo(dst.first());
  VMRegPair dst_hi(dst.second());
  simple_move32(masm, src_lo, dst_lo);
  simple_move32(masm, src_hi, dst_hi);
}

// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

  // Do the simple ones here else do two int moves
  if (src.is_single_phys_reg() ) {
    if (dst.is_single_phys_reg()) {
      __ mov(src.first()->as_Register(), dst.first()->as_Register());
    } else {
      // split src into two separate registers
      // Remember hi means hi address or lsw on sparc
      // Move msw to lsw
      if (dst.second()->is_reg()) {
        // MSW -> MSW
        __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
        // Now LSW -> LSW
        // this will only move lo -> lo and ignore hi
        VMRegPair split(dst.second());
        simple_move32(masm, src, split);
      } else {
        VMRegPair split(src.first(), L4->as_VMReg());
        // MSW -> MSW (lo ie. first word)
        __ srax(src.first()->as_Register(), 32, L4);
        split_long_move(masm, split, dst);
      }
    }
  } else if (dst.is_single_phys_reg()) {
    if (src.is_adjacent_aligned_on_stack(2)) {
1667
      __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
D
duke 已提交
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
    } else {
      // dst is a single reg.
      // Remember lo is low address not msb for stack slots
      // and lo is the "real" register for registers
      // src is

      VMRegPair split;

      if (src.first()->is_reg()) {
        // src.lo (msw) is a reg, src.hi is stk/reg
        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
        split.set_pair(dst.first(), src.first());
      } else {
        // msw is stack move to L5
        // lsw is stack move to dst.lo (real reg)
        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
        split.set_pair(dst.first(), L5->as_VMReg());
      }

      // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
      // msw   -> src.lo/L5,  lsw -> dst.lo
      split_long_move(masm, src, split);

      // So dst now has the low order correct position the
      // msw half
      __ sllx(split.first()->as_Register(), 32, L5);

      const Register d = dst.first()->as_Register();
      __ or3(L5, d, d);
    }
  } else {
    // For LP64 we can probably do better.
    split_long_move(masm, src, dst);
  }
}

// A double move
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

  // The painful thing here is that like long_move a VMRegPair might be
  // 1: a single physical register
  // 2: two physical registers (v8)
  // 3: a physical reg [lo] and a stack slot [hi] (v8)
  // 4: two stack slots

  // Since src is always a java calling convention we know that the src pair
  // is always either all registers or all stack (and aligned?)

  // in a register [lo] and a stack slot [hi]
  if (src.first()->is_stack()) {
    if (dst.first()->is_stack()) {
      // stack to stack the easiest of the bunch
      // ought to be a way to do this where if alignment is ok we use ldd/std when possible
      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
      __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
      __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
    } else {
      // stack to reg
      if (dst.second()->is_stack()) {
        // stack -> reg, stack -> stack
        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
        if (dst.first()->is_Register()) {
          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
        } else {
          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
        }
        // This was missing. (very rare case)
        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
      } else {
        // stack -> reg
        // Eventually optimize for alignment QQQ
        if (dst.first()->is_Register()) {
          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
          __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
        } else {
          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
        }
      }
    }
  } else if (dst.first()->is_stack()) {
    // reg to stack
    if (src.first()->is_Register()) {
      // Eventually optimize for alignment QQQ
      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
      if (src.second()->is_stack()) {
        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
      } else {
        __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
      }
    } else {
      // fpr to stack
      if (src.second()->is_stack()) {
        ShouldNotReachHere();
      } else {
        // Is the stack aligned?
        if (reg2offset(dst.first()) & 0x7) {
          // No do as pairs
          __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
          __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
        } else {
          __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
        }
      }
    }
  } else {
    // reg to reg
    if (src.first()->is_Register()) {
      if (dst.first()->is_Register()) {
        // gpr -> gpr
        __ mov(src.first()->as_Register(), dst.first()->as_Register());
        __ mov(src.second()->as_Register(), dst.second()->as_Register());
      } else {
        // gpr -> fpr
        // ought to be able to do a single store
        __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
        __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
        // ought to be able to do a single load
        __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
      }
    } else if (dst.first()->is_Register()) {
      // fpr -> gpr
      // ought to be able to do a single store
      __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
      // ought to be able to do a single load
      // REMEMBER first() is low address not LSB
      __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
      if (dst.second()->is_Register()) {
        __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
      } else {
        __ ld(FP, -4 + STACK_BIAS, L4);
        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
      }
    } else {
      // fpr -> fpr
      // In theory these overlap but the ordering is such that this is likely a nop
      if ( src.first() != dst.first()) {
        __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
      }
    }
  }
}

// Creates an inner frame if one hasn't already been created, and
// saves a copy of the thread in L7_thread_cache
static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
  if (!*already_created) {
    __ save_frame(0);
    // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
    // Don't use save_thread because it smashes G2 and we merely want to save a
    // copy
    __ mov(G2_thread, L7_thread_cache);
    *already_created = true;
  }
}

1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986

static void save_or_restore_arguments(MacroAssembler* masm,
                                      const int stack_slots,
                                      const int total_in_args,
                                      const int arg_save_area,
                                      OopMap* map,
                                      VMRegPair* in_regs,
                                      BasicType* in_sig_bt) {
  // if map is non-NULL then the code should store the values,
  // otherwise it should load them.
  if (map != NULL) {
    // Fill in the map
    for (int i = 0; i < total_in_args; i++) {
      if (in_sig_bt[i] == T_ARRAY) {
        if (in_regs[i].first()->is_stack()) {
          int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
          map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
        } else if (in_regs[i].first()->is_Register()) {
          map->set_oop(in_regs[i].first());
        } else {
          ShouldNotReachHere();
        }
      }
    }
  }

  // Save or restore double word values
  int handle_index = 0;
  for (int i = 0; i < total_in_args; i++) {
    int slot = handle_index + arg_save_area;
    int offset = slot * VMRegImpl::stack_slot_size;
    if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
      const Register reg = in_regs[i].first()->as_Register();
      if (reg->is_global()) {
        handle_index += 2;
        assert(handle_index <= stack_slots, "overflow");
        if (map != NULL) {
          __ stx(reg, SP, offset + STACK_BIAS);
        } else {
          __ ldx(SP, offset + STACK_BIAS, reg);
        }
      }
    } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
      handle_index += 2;
      assert(handle_index <= stack_slots, "overflow");
      if (map != NULL) {
        __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
      } else {
        __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
      }
    }
  }
  // Save floats
  for (int i = 0; i < total_in_args; i++) {
    int slot = handle_index + arg_save_area;
    int offset = slot * VMRegImpl::stack_slot_size;
    if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
      handle_index++;
      assert(handle_index <= stack_slots, "overflow");
      if (map != NULL) {
        __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
      } else {
        __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
      }
    }
  }

}


// Check GC_locker::needs_gc and enter the runtime if it's true.  This
// keeps a new JNI critical region from starting until a GC has been
// forced.  Save down any oops in registers and describe them in an
// OopMap.
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
                                               const int stack_slots,
                                               const int total_in_args,
                                               const int arg_save_area,
                                               OopMapSet* oop_maps,
                                               VMRegPair* in_regs,
                                               BasicType* in_sig_bt) {
  __ block_comment("check GC_locker::needs_gc");
  Label cont;
  AddressLiteral sync_state(GC_locker::needs_gc_address());
  __ load_bool_contents(sync_state, G3_scratch);
  __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
  __ delayed()->nop();

  // Save down any values that are live in registers and call into the
  // runtime to halt for a GC
  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  save_or_restore_arguments(masm, stack_slots, total_in_args,
                            arg_save_area, map, in_regs, in_sig_bt);

  __ mov(G2_thread, L7_thread_cache);

  __ set_last_Java_frame(SP, noreg);

  __ block_comment("block_for_jni_critical");
  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
  __ delayed()->mov(L7_thread_cache, O0);
  oop_maps->add_gc_map( __ offset(), map);

  __ restore_thread(L7_thread_cache); // restore G2_thread
  __ reset_last_Java_frame();

  // Reload all the register arguments
  save_or_restore_arguments(masm, stack_slots, total_in_args,
                            arg_save_area, NULL, in_regs, in_sig_bt);

  __ bind(cont);
#ifdef ASSERT
  if (StressCriticalJNINatives) {
    // Stress register saving
    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
    save_or_restore_arguments(masm, stack_slots, total_in_args,
                              arg_save_area, map, in_regs, in_sig_bt);
    // Destroy argument registers
    for (int i = 0; i < total_in_args; i++) {
      if (in_regs[i].first()->is_Register()) {
        const Register reg = in_regs[i].first()->as_Register();
        if (reg->is_global()) {
          __ mov(G0, reg);
        }
      } else if (in_regs[i].first()->is_FloatRegister()) {
        __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
      }
    }

    save_or_restore_arguments(masm, stack_slots, total_in_args,
                              arg_save_area, NULL, in_regs, in_sig_bt);
  }
#endif
}

// Unpack an array argument into a pointer to the body and the length
// if the array is non-null, otherwise pass 0 for both.
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
  // Pass the length, ptr pair
  Label is_null, done;
  if (reg.first()->is_stack()) {
    VMRegPair tmp  = reg64_to_VMRegPair(L2);
    // Load the arg up from the stack
    move_ptr(masm, reg, tmp);
    reg = tmp;
  }
  __ cmp(reg.first()->as_Register(), G0);
  __ brx(Assembler::equal, false, Assembler::pt, is_null);
  __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
  move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
  __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
  move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
  __ ba_short(done);
  __ bind(is_null);
  // Pass zeros
  move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
  move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
  __ bind(done);
}

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
static void verify_oop_args(MacroAssembler* masm,
                            int total_args_passed,
                            const BasicType* sig_bt,
                            const VMRegPair* regs) {
  Register temp_reg = G5_method;  // not part of any compiled calling seq
  if (VerifyOops) {
    for (int i = 0; i < total_args_passed; i++) {
      if (sig_bt[i] == T_OBJECT ||
          sig_bt[i] == T_ARRAY) {
        VMReg r = regs[i].first();
        assert(r->is_valid(), "bad oop arg");
        if (r->is_stack()) {
          RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
          ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
          __ ld_ptr(SP, ld_off, temp_reg);
          __ verify_oop(temp_reg);
        } else {
          __ verify_oop(r->as_Register());
        }
      }
    }
  }
}

static void gen_special_dispatch(MacroAssembler* masm,
                                 int total_args_passed,
                                 int comp_args_on_stack,
                                 vmIntrinsics::ID special_dispatch,
                                 const BasicType* sig_bt,
                                 const VMRegPair* regs) {
  verify_oop_args(masm, total_args_passed, sig_bt, regs);

  // Now write the args into the outgoing interpreter space
  bool     has_receiver   = false;
  Register receiver_reg   = noreg;
  int      member_arg_pos = -1;
  Register member_reg     = noreg;
  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
  if (ref_kind != 0) {
    member_arg_pos = total_args_passed - 1;  // trailing MemberName argument
    member_reg = G5_method;  // known to be free at this point
    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
  } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
    has_receiver = true;
  } else {
    fatal(err_msg("special_dispatch=%d", special_dispatch));
  }

  if (member_reg != noreg) {
    // Load the member_arg into register, if necessary.
    assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
    assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
    VMReg r = regs[member_arg_pos].first();
    assert(r->is_valid(), "bad member arg");
    if (r->is_stack()) {
      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
      __ ld_ptr(SP, ld_off, member_reg);
    } else {
      // no data motion is needed
      member_reg = r->as_Register();
    }
  }

  if (has_receiver) {
    // Make sure the receiver is loaded into a register.
    assert(total_args_passed > 0, "oob");
    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
    VMReg r = regs[0].first();
    assert(r->is_valid(), "bad receiver arg");
    if (r->is_stack()) {
      // Porting note:  This assumes that compiled calling conventions always
      // pass the receiver oop in a register.  If this is not true on some
      // platform, pick a temp and load the receiver from stack.
      assert(false, "receiver always in a register");
      receiver_reg = G3_scratch;  // known to be free at this point
      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
      __ ld_ptr(SP, ld_off, receiver_reg);
    } else {
      // no data motion is needed
      receiver_reg = r->as_Register();
    }
  }

  // Figure out which address we are really jumping to:
  MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}

D
duke 已提交
2077 2078 2079 2080 2081 2082
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method.  The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and
// returns.
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
//
// Critical native functions are a shorthand for the use of
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions.  The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// lock_critical/unlock_critical semantics are followed.  Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
//    if (GC_locker::needs_gc())
//      SharedRuntime::block_for_jni_critical();
//    tranistion to thread_in_native
//    unpack arrray arguments and call native entry point
//    check for safepoint in progress
//    check if any thread suspend flags are set
//      call into JVM and possible unlock the JNI critical
//      if a GC was suppressed while in the critical native.
//    transition back to thread_in_Java
//    return to caller
//
D
duke 已提交
2106 2107
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
                                                methodHandle method,
2108
                                                int compile_id,
D
duke 已提交
2109 2110
                                                int total_in_args,
                                                int comp_args_on_stack, // in VMRegStackSlots
2111 2112
                                                BasicType* in_sig_bt,
                                                VMRegPair* in_regs,
D
duke 已提交
2113
                                                BasicType ret_type) {
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
  if (method->is_method_handle_intrinsic()) {
    vmIntrinsics::ID iid = method->intrinsic_id();
    intptr_t start = (intptr_t)__ pc();
    int vep_offset = ((intptr_t)__ pc()) - start;
    gen_special_dispatch(masm,
                         total_in_args,
                         comp_args_on_stack,
                         method->intrinsic_id(),
                         in_sig_bt,
                         in_regs);
    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
    __ flush();
    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
    return nmethod::new_native_nmethod(method,
                                       compile_id,
                                       masm->code(),
                                       vep_offset,
                                       frame_complete,
                                       stack_slots / VMRegImpl::slots_per_word,
                                       in_ByteSize(-1),
                                       in_ByteSize(-1),
                                       (OopMapSet*)NULL);
  }
2137 2138 2139 2140 2141 2142 2143
  bool is_critical_native = true;
  address native_func = method->critical_native_function();
  if (native_func == NULL) {
    native_func = method->native_function();
    is_critical_native = false;
  }
  assert(native_func != NULL, "must have function");
D
duke 已提交
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156

  // Native nmethod wrappers never take possesion of the oop arguments.
  // So the caller will gc the arguments. The only thing we need an
  // oopMap for is if the call is static
  //
  // An OopMap for lock (and class if static), and one for the VM call itself
  OopMapSet *oop_maps = new OopMapSet();
  intptr_t start = (intptr_t)__ pc();

  // First thing make an ic check to see if we should even be here
  {
    Label L;
    const Register temp_reg = G3_scratch;
2157
    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
D
duke 已提交
2158
    __ verify_oop(O0);
2159
    __ load_klass(O0, temp_reg);
K
kvn 已提交
2160
    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
D
duke 已提交
2161

2162
    __ jump_to(ic_miss, temp_reg);
D
duke 已提交
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
    __ delayed()->nop();
    __ align(CodeEntryAlignment);
    __ bind(L);
  }

  int vep_offset = ((intptr_t)__ pc()) - start;

#ifdef COMPILER1
  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
    // Object.hashCode can pull the hashCode from the header word
    // instead of doing a full VM transition once it's been computed.
    // Since hashCode is usually polymorphic at call sites we can't do
    // this optimization at the call site without a lot of work.
    Label slowCase;
    Register receiver             = O0;
    Register result               = O0;
    Register header               = G3_scratch;
    Register hash                 = G3_scratch; // overwrite header value with hash value
    Register mask                 = G1;         // to get hash field from header

    // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
    // We depend on hash_mask being at most 32 bits and avoid the use of
    // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
    // vm: see markOop.hpp.
    __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
    __ sethi(markOopDesc::hash_mask, mask);
    __ btst(markOopDesc::unlocked_value, header);
    __ br(Assembler::zero, false, Assembler::pn, slowCase);
    if (UseBiasedLocking) {
      // Check if biased and fall through to runtime if so
      __ delayed()->nop();
      __ btst(markOopDesc::biased_lock_bit_in_place, header);
      __ br(Assembler::notZero, false, Assembler::pn, slowCase);
    }
    __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);

    // Check for a valid (non-zero) hash code and get its value.
#ifdef _LP64
    __ srlx(header, markOopDesc::hash_shift, hash);
#else
    __ srl(header, markOopDesc::hash_shift, hash);
#endif
    __ andcc(hash, mask, hash);
    __ br(Assembler::equal, false, Assembler::pn, slowCase);
    __ delayed()->nop();

    // leaf return.
    __ retl();
    __ delayed()->mov(hash, result);
    __ bind(slowCase);
  }
#endif // COMPILER1


  // We have received a description of where all the java arg are located
  // on entry to the wrapper. We need to convert these args to where
  // the jni function will expect them. To figure out where they go
  // we convert the java signature to a C signature by inserting
  // the hidden arguments as arg[0] and possibly arg[1] (static method)

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236
  int total_c_args = total_in_args;
  int total_save_slots = 6 * VMRegImpl::slots_per_word;
  if (!is_critical_native) {
    total_c_args += 1;
    if (method->is_static()) {
      total_c_args++;
    }
  } else {
    for (int i = 0; i < total_in_args; i++) {
      if (in_sig_bt[i] == T_ARRAY) {
        // These have to be saved and restored across the safepoint
        total_c_args++;
      }
    }
D
duke 已提交
2237 2238 2239
  }

  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2240 2241
  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
  BasicType* in_elem_bt = NULL;
D
duke 已提交
2242 2243

  int argc = 0;
2244 2245 2246 2247 2248
  if (!is_critical_native) {
    out_sig_bt[argc++] = T_ADDRESS;
    if (method->is_static()) {
      out_sig_bt[argc++] = T_OBJECT;
    }
D
duke 已提交
2249

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
    for (int i = 0; i < total_in_args ; i++ ) {
      out_sig_bt[argc++] = in_sig_bt[i];
    }
  } else {
    Thread* THREAD = Thread::current();
    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
    SignatureStream ss(method->signature());
    for (int i = 0; i < total_in_args ; i++ ) {
      if (in_sig_bt[i] == T_ARRAY) {
        // Arrays are passed as int, elem* pair
        out_sig_bt[argc++] = T_INT;
        out_sig_bt[argc++] = T_ADDRESS;
        Symbol* atype = ss.as_symbol(CHECK_NULL);
        const char* at = atype->as_C_string();
        if (strlen(at) == 2) {
          assert(at[0] == '[', "must be");
          switch (at[1]) {
            case 'B': in_elem_bt[i]  = T_BYTE; break;
            case 'C': in_elem_bt[i]  = T_CHAR; break;
            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
            case 'F': in_elem_bt[i]  = T_FLOAT; break;
            case 'I': in_elem_bt[i]  = T_INT; break;
            case 'J': in_elem_bt[i]  = T_LONG; break;
            case 'S': in_elem_bt[i]  = T_SHORT; break;
            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
            default: ShouldNotReachHere();
          }
        }
      } else {
        out_sig_bt[argc++] = in_sig_bt[i];
        in_elem_bt[i] = T_VOID;
      }
      if (in_sig_bt[i] != T_VOID) {
        assert(in_sig_bt[i] == ss.type(), "must match");
        ss.next();
      }
    }
D
duke 已提交
2287 2288 2289 2290 2291 2292 2293 2294 2295
  }

  // Now figure out where the args must be stored and how much stack space
  // they require (neglecting out_preserve_stack_slots but space for storing
  // the 1st six register arguments). It's weird see int_stk_helper.
  //
  int out_arg_slots;
  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
  if (is_critical_native) {
    // Critical natives may have to call out so they need a save area
    // for register arguments.
    int double_slots = 0;
    int single_slots = 0;
    for ( int i = 0; i < total_in_args; i++) {
      if (in_regs[i].first()->is_Register()) {
        const Register reg = in_regs[i].first()->as_Register();
        switch (in_sig_bt[i]) {
          case T_ARRAY:
          case T_BOOLEAN:
          case T_BYTE:
          case T_SHORT:
          case T_CHAR:
          case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
          case T_LONG: if (reg->is_global()) double_slots++; break;
          default:  ShouldNotReachHere();
        }
      } else if (in_regs[i].first()->is_FloatRegister()) {
        switch (in_sig_bt[i]) {
          case T_FLOAT:  single_slots++; break;
          case T_DOUBLE: double_slots++; break;
          default:  ShouldNotReachHere();
        }
      }
    }
    total_save_slots = double_slots * 2 + single_slots;
  }

D
duke 已提交
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
  // Compute framesize for the wrapper.  We need to handlize all oops in
  // registers. We must create space for them here that is disjoint from
  // the windowed save area because we have no control over when we might
  // flush the window again and overwrite values that gc has since modified.
  // (The live window race)
  //
  // We always just allocate 6 word for storing down these object. This allow
  // us to simply record the base and use the Ireg number to decide which
  // slot to use. (Note that the reg number is the inbound number not the
  // outbound number).
  // We must shuffle args to match the native convention, and include var-args space.

  // Calculate the total number of stack slots we will need.

  // First count the abi requirement plus all of the outgoing args
  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;

  // Now the space for the inbound oop handle area

2344 2345
  int oop_handle_offset = round_to(stack_slots, 2);
  stack_slots += total_save_slots;
D
duke 已提交
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411

  // Now any space we need for handlizing a klass if static method

  int klass_slot_offset = 0;
  int klass_offset = -1;
  int lock_slot_offset = 0;
  bool is_static = false;

  if (method->is_static()) {
    klass_slot_offset = stack_slots;
    stack_slots += VMRegImpl::slots_per_word;
    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
    is_static = true;
  }

  // Plus a lock if needed

  if (method->is_synchronized()) {
    lock_slot_offset = stack_slots;
    stack_slots += VMRegImpl::slots_per_word;
  }

  // Now a place to save return value or as a temporary for any gpr -> fpr moves
  stack_slots += 2;

  // Ok The space we have allocated will look like:
  //
  //
  // FP-> |                     |
  //      |---------------------|
  //      | 2 slots for moves   |
  //      |---------------------|
  //      | lock box (if sync)  |
  //      |---------------------| <- lock_slot_offset
  //      | klass (if static)   |
  //      |---------------------| <- klass_slot_offset
  //      | oopHandle area      |
  //      |---------------------| <- oop_handle_offset
  //      | outbound memory     |
  //      | based arguments     |
  //      |                     |
  //      |---------------------|
  //      | vararg area         |
  //      |---------------------|
  //      |                     |
  // SP-> | out_preserved_slots |
  //
  //


  // Now compute actual number of stack words we need rounding to make
  // stack properly aligned.
  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);

  int stack_size = stack_slots * VMRegImpl::stack_slot_size;

  // Generate stack overflow check before creating frame
  __ generate_stack_overflow_check(stack_size);

  // Generate a new frame for the wrapper.
  __ save(SP, -stack_size, SP);

  int frame_complete = ((intptr_t)__ pc()) - start;

  __ verify_thread();

2412 2413 2414 2415
  if (is_critical_native) {
    check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
  }
D
duke 已提交
2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462

  //
  // We immediately shuffle the arguments so that any vm call we have to
  // make from here on out (sync slow path, jvmti, etc.) we will have
  // captured the oops from our caller and have a valid oopMap for
  // them.

  // -----------------
  // The Grand Shuffle
  //
  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
  // (derived from JavaThread* which is in L7_thread_cache) and, if static,
  // the class mirror instead of a receiver.  This pretty much guarantees that
  // register layout will not match.  We ignore these extra arguments during
  // the shuffle. The shuffle is described by the two calling convention
  // vectors we have in our possession. We simply walk the java vector to
  // get the source locations and the c vector to get the destinations.
  // Because we have a new window and the argument registers are completely
  // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
  // here.

  // This is a trick. We double the stack slots so we can claim
  // the oops in the caller's frame. Since we are sure to have
  // more args than the caller doubling is enough to make
  // sure we can capture all the incoming oop args from the
  // caller.
  //
  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  // Record sp-based slot for receiver on stack for non-static methods
  int receiver_offset = -1;

  // We move the arguments backward because the floating point registers
  // destination will always be to a register with a greater or equal register
  // number or the stack.

#ifdef ASSERT
  bool reg_destroyed[RegisterImpl::number_of_registers];
  bool freg_destroyed[FloatRegisterImpl::number_of_registers];
  for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
    reg_destroyed[r] = false;
  }
  for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
    freg_destroyed[f] = false;
  }

#endif /* ASSERT */

2463
  for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
D
duke 已提交
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479

#ifdef ASSERT
    if (in_regs[i].first()->is_Register()) {
      assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
    } else if (in_regs[i].first()->is_FloatRegister()) {
      assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
    }
    if (out_regs[c_arg].first()->is_Register()) {
      reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
    } else if (out_regs[c_arg].first()->is_FloatRegister()) {
      freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
    }
#endif /* ASSERT */

    switch (in_sig_bt[i]) {
      case T_ARRAY:
2480 2481 2482 2483 2484
        if (is_critical_native) {
          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
          c_arg--;
          break;
        }
D
duke 已提交
2485
      case T_OBJECT:
2486
        assert(!is_critical_native, "no oop arguments");
D
duke 已提交
2487 2488 2489 2490 2491 2492 2493 2494 2495
        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                    ((i == 0) && (!is_static)),
                    &receiver_offset);
        break;
      case T_VOID:
        break;

      case T_FLOAT:
        float_move(masm, in_regs[i], out_regs[c_arg]);
2496
        break;
D
duke 已提交
2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517

      case T_DOUBLE:
        assert( i + 1 < total_in_args &&
                in_sig_bt[i + 1] == T_VOID &&
                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
        double_move(masm, in_regs[i], out_regs[c_arg]);
        break;

      case T_LONG :
        long_move(masm, in_regs[i], out_regs[c_arg]);
        break;

      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");

      default:
        move32_64(masm, in_regs[i], out_regs[c_arg]);
    }
  }

  // Pre-load a static method's oop into O1.  Used both by locking code and
  // the normal JNI call code.
2518
  if (method->is_static() && !is_critical_native) {
D
duke 已提交
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
    __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);

    // Now handlize the static class mirror in O1.  It's known not-null.
    __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
    __ add(SP, klass_offset + STACK_BIAS, O1);
  }


  const Register L6_handle = L6;

  if (method->is_synchronized()) {
2531
    assert(!is_critical_native, "unhandled");
D
duke 已提交
2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
    __ mov(O1, L6_handle);
  }

  // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
  // except O6/O7. So if we must call out we must push a new frame. We immediately
  // push a new frame and flush the windows.
#ifdef _LP64
  intptr_t thepc = (intptr_t) __ pc();
  {
    address here = __ pc();
    // Call the next instruction
    __ call(here + 8, relocInfo::none);
    __ delayed()->nop();
  }
#else
  intptr_t thepc = __ load_pc_address(O7, 0);
#endif /* _LP64 */

  // We use the same pc/oopMap repeatedly when we call out
  oop_maps->add_gc_map(thepc - start, map);

  // O7 now has the pc loaded that we will use when we finally call to native.

  // Save thread in L7; it crosses a bunch of VM calls below
  // Don't use save_thread because it smashes G2 and we merely
  // want to save a copy
  __ mov(G2_thread, L7_thread_cache);


  // If we create an inner frame once is plenty
  // when we create it we must also save G2_thread
  bool inner_frame_created = false;

  // dtrace method entry support
  {
    SkipIfEqual skip_if(
      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
    // create inner frame
    __ save_frame(0);
    __ mov(G2_thread, L7_thread_cache);
2572
    __ set_metadata_constant(method(), O1);
D
duke 已提交
2573 2574 2575 2576 2577 2578
    __ call_VM_leaf(L7_thread_cache,
         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
         G2_thread, O1);
    __ restore();
  }

2579 2580 2581 2582 2583
  // RedefineClasses() tracing support for obsolete method entry
  if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
    // create inner frame
    __ save_frame(0);
    __ mov(G2_thread, L7_thread_cache);
2584
    __ set_metadata_constant(method(), O1);
2585 2586 2587 2588 2589 2590
    __ call_VM_leaf(L7_thread_cache,
         CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
         G2_thread, O1);
    __ restore();
  }

D
duke 已提交
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
  // We are in the jni frame unless saved_frame is true in which case
  // we are in one frame deeper (the "inner" frame). If we are in the
  // "inner" frames the args are in the Iregs and if the jni frame then
  // they are in the Oregs.
  // If we ever need to go to the VM (for locking, jvmti) then
  // we will always be in the "inner" frame.

  // Lock a synchronized method
  int lock_offset = -1;         // Set if locked
  if (method->is_synchronized()) {
    Register Roop = O1;
    const Register L3_box = L3;

    create_inner_frame(masm, &inner_frame_created);

    __ ld_ptr(I1, 0, O1);
    Label done;

    lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
    __ add(FP, lock_offset+STACK_BIAS, L3_box);
#ifdef ASSERT
    if (UseBiasedLocking) {
      // making the box point to itself will make it clear it went unused
      // but also be obviously invalid
      __ st_ptr(L3_box, L3_box, 0);
    }
#endif // ASSERT
    //
    // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
    //
    __ compiler_lock_object(Roop, L1,    L3_box, L2);
    __ br(Assembler::equal, false, Assembler::pt, done);
    __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);


    // None of the above fast optimizations worked so we have to get into the
    // slow case of monitor enter.  Inline a special case of call_VM that
    // disallows any pending_exception.
    __ mov(Roop, O0);            // Need oop in O0
    __ mov(L3_box, O1);

    // Record last_Java_sp, in case the VM code releases the JVM lock.

    __ set_last_Java_frame(FP, I7);

    // do the call
    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
    __ delayed()->mov(L7_thread_cache, O2);

    __ restore_thread(L7_thread_cache); // restore G2_thread
    __ reset_last_Java_frame();

#ifdef ASSERT
    { Label L;
    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
K
kvn 已提交
2646
    __ br_null_short(O0, Assembler::pt, L);
D
duke 已提交
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
    __ stop("no pending exception allowed on exit from IR::monitorenter");
    __ bind(L);
    }
#endif
    __ bind(done);
  }


  // Finally just about ready to make the JNI call

  __ flush_windows();
  if (inner_frame_created) {
    __ restore();
  } else {
    // Store only what we need from this frame
    // QQQ I think that non-v9 (like we care) we don't need these saves
    // either as the flush traps and the current window goes too.
    __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
    __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
  }

  // get JNIEnv* which is first argument to native
2669 2670 2671
  if (!is_critical_native) {
    __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
  }
D
duke 已提交
2672 2673 2674 2675

  // Use that pc we placed in O7 a while back as the current frame anchor
  __ set_last_Java_frame(SP, O7);

2676
  // We flushed the windows ages ago now mark them as flushed before transitioning.
D
duke 已提交
2677
  __ set(JavaFrameAnchor::flushed, G3_scratch);
2678
  __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
D
duke 已提交
2679

2680 2681
  // Transition from _thread_in_Java to _thread_in_native.
  __ set(_thread_in_native, G3_scratch);
D
duke 已提交
2682 2683

#ifdef _LP64
2684
  AddressLiteral dest(native_func);
D
duke 已提交
2685
  __ relocate(relocInfo::runtime_call_type);
2686
  __ jumpl_to(dest, O7, O7);
D
duke 已提交
2687
#else
2688
  __ call(native_func, relocInfo::runtime_call_type);
D
duke 已提交
2689
#endif
2690
  __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
D
duke 已提交
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721

  __ restore_thread(L7_thread_cache); // restore G2_thread

  // Unpack native results.  For int-types, we do any needed sign-extension
  // and move things into I0.  The return value there will survive any VM
  // calls for blocking or unlocking.  An FP or OOP result (handle) is done
  // specially in the slow-path code.
  switch (ret_type) {
  case T_VOID:    break;        // Nothing to do!
  case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
  case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
  // In 64 bits build result is in O0, in O0, O1 in 32bit build
  case T_LONG:
#ifndef _LP64
                  __ mov(O1, I1);
#endif
                  // Fall thru
  case T_OBJECT:                // Really a handle
  case T_ARRAY:
  case T_INT:
                  __ mov(O0, I0);
                  break;
  case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
  case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
  case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
  case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
    break;                      // Cannot de-handlize until after reclaiming jvm_lock
  default:
    ShouldNotReachHere();
  }

2722
  Label after_transition;
D
duke 已提交
2723 2724 2725 2726 2727
  // must we block?

  // Block, if necessary, before resuming in _thread_in_Java state.
  // In order for GC to work, don't clear the last_Java_sp until after blocking.
  { Label no_block;
2728
    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
D
duke 已提交
2729 2730 2731 2732 2733 2734 2735 2736 2737

    // Switch thread to "native transition" state before reading the synchronization state.
    // This additional state is necessary because reading and testing the synchronization
    // state is not atomic w.r.t. GC, as this scenario demonstrates:
    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
    //     VM thread changes sync state to synchronizing and suspends threads for GC.
    //     Thread A is resumed to finish this native method, but doesn't block here since it
    //     didn't see any synchronization is progress, and escapes.
    __ set(_thread_in_native_trans, G3_scratch);
2738
    __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
D
duke 已提交
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
    if(os::is_MP()) {
      if (UseMembar) {
        // Force this write out before the read below
        __ membar(Assembler::StoreLoad);
      } else {
        // Write serialization page so VM thread can do a pseudo remote membar.
        // We use the current thread pointer to calculate a thread specific
        // offset to write to within the page. This minimizes bus traffic
        // due to cache line collision.
        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
      }
    }
    __ load_contents(sync_state, G3_scratch);
    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);

    Label L;
2755
    Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
D
duke 已提交
2756
    __ br(Assembler::notEqual, false, Assembler::pn, L);
2757
    __ delayed()->ld(suspend_state, G3_scratch);
K
kvn 已提交
2758
    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
D
duke 已提交
2759 2760 2761 2762 2763 2764 2765 2766
    __ bind(L);

    // Block.  Save any potential method result value before the operation and
    // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
    // lets us share the oopMap we used when we went native rather the create
    // a distinct one for this pc
    //
    save_native_result(masm, ret_type, stack_slots);
2767 2768 2769 2770 2771 2772 2773 2774 2775
    if (!is_critical_native) {
      __ call_VM_leaf(L7_thread_cache,
                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
                      G2_thread);
    } else {
      __ call_VM_leaf(L7_thread_cache,
                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
                      G2_thread);
    }
D
duke 已提交
2776 2777 2778

    // Restore any method result value
    restore_native_result(masm, ret_type, stack_slots);
2779 2780 2781 2782 2783 2784 2785 2786

    if (is_critical_native) {
      // The call above performed the transition to thread_in_Java so
      // skip the transition logic below.
      __ ba(after_transition);
      __ delayed()->nop();
    }

D
duke 已提交
2787 2788 2789 2790 2791 2792
    __ bind(no_block);
  }

  // thread state is thread_in_native_trans. Any safepoint blocking has already
  // happened so we can now change state to _thread_in_Java.
  __ set(_thread_in_Java, G3_scratch);
2793
  __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2794
  __ bind(after_transition);
D
duke 已提交
2795 2796

  Label no_reguard;
2797
  __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
K
kvn 已提交
2798
  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
D
duke 已提交
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849

    save_native_result(masm, ret_type, stack_slots);
  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
  __ delayed()->nop();

  __ restore_thread(L7_thread_cache); // restore G2_thread
    restore_native_result(masm, ret_type, stack_slots);

  __ bind(no_reguard);

  // Handle possible exception (will unlock if necessary)

  // native result if any is live in freg or I0 (and I1 if long and 32bit vm)

  // Unlock
  if (method->is_synchronized()) {
    Label done;
    Register I2_ex_oop = I2;
    const Register L3_box = L3;
    // Get locked oop from the handle we passed to jni
    __ ld_ptr(L6_handle, 0, L4);
    __ add(SP, lock_offset+STACK_BIAS, L3_box);
    // Must save pending exception around the slow-path VM call.  Since it's a
    // leaf call, the pending exception (if any) can be kept in a register.
    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
    // Now unlock
    //                       (Roop, Rmark, Rbox,   Rscratch)
    __ compiler_unlock_object(L4,   L1,    L3_box, L2);
    __ br(Assembler::equal, false, Assembler::pt, done);
    __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);

    // save and restore any potential method result value around the unlocking
    // operation.  Will save in I0 (or stack for FP returns).
    save_native_result(masm, ret_type, stack_slots);

    // Must clear pending-exception before re-entering the VM.  Since this is
    // a leaf call, pending-exception-oop can be safely kept in a register.
    __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));

    // slow case of monitor enter.  Inline a special case of call_VM that
    // disallows any pending_exception.
    __ mov(L3_box, O1);

    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
    __ delayed()->mov(L4, O0);              // Need oop in O0

    __ restore_thread(L7_thread_cache); // restore G2_thread

#ifdef ASSERT
    { Label L;
    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
K
kvn 已提交
2850
    __ br_null_short(O0, Assembler::pt, L);
D
duke 已提交
2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
    __ stop("no pending exception allowed on exit from IR::monitorexit");
    __ bind(L);
    }
#endif
    restore_native_result(masm, ret_type, stack_slots);
    // check_forward_pending_exception jump to forward_exception if any pending
    // exception is set.  The forward_exception routine expects to see the
    // exception in pending_exception and not in a register.  Kind of clumsy,
    // since all folks who branch to forward_exception must have tested
    // pending_exception first and hence have it in a register already.
    __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
    __ bind(done);
  }

  // Tell dtrace about this method exit
  {
    SkipIfEqual skip_if(
      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
    save_native_result(masm, ret_type, stack_slots);
2870
    __ set_metadata_constant(method(), O1);
D
duke 已提交
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
    __ call_VM_leaf(L7_thread_cache,
       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
       G2_thread, O1);
    restore_native_result(masm, ret_type, stack_slots);
  }

  // Clear "last Java frame" SP and PC.
  __ verify_thread(); // G2_thread must be correct
  __ reset_last_Java_frame();

  // Unpack oop result
  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
      Label L;
      __ addcc(G0, I0, G0);
      __ brx(Assembler::notZero, true, Assembler::pt, L);
      __ delayed()->ld_ptr(I0, 0, I0);
      __ mov(G0, I0);
      __ bind(L);
      __ verify_oop(I0);
  }

2892 2893 2894 2895
  if (!is_critical_native) {
    // reset handle block
    __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
    __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
D
duke 已提交
2896

2897 2898 2899
    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
    check_forward_pending_exception(masm, G3_scratch);
  }
D
duke 已提交
2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919


  // Return

#ifndef _LP64
  if (ret_type == T_LONG) {

    // Must leave proper result in O0,O1 and G1 (c2/tiered only)
    __ sllx(I0, 32, G1);          // Shift bits into high G1
    __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
    __ or3 (I1, G1, G1);          // OR 64 bits into G1
  }
#endif

  __ ret();
  __ delayed()->restore();

  __ flush();

  nmethod *nm = nmethod::new_native_nmethod(method,
2920
                                            compile_id,
D
duke 已提交
2921 2922 2923 2924 2925 2926 2927
                                            masm->code(),
                                            vep_offset,
                                            frame_complete,
                                            stack_slots / VMRegImpl::slots_per_word,
                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
                                            in_ByteSize(lock_offset),
                                            oop_maps);
2928 2929 2930 2931

  if (is_critical_native) {
    nm->set_lazy_critical_native(true);
  }
D
duke 已提交
2932 2933 2934 2935
  return nm;

}

2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
#ifdef HAVE_DTRACE_H
// ---------------------------------------------------------------------------
// Generate a dtrace nmethod for a given signature.  The method takes arguments
// in the Java compiled code convention, marshals them to the native
// abi and then leaves nops at the position you would expect to call a native
// function. When the probe is enabled the nops are replaced with a trap
// instruction that dtrace inserts and the trace will cause a notification
// to dtrace.
//
// The probes are only able to take primitive types and java/lang/String as
// arguments.  No other java types are allowed. Strings are converted to utf8
// strings so that from dtrace point of view java strings are converted to C
// strings. There is an arbitrary fixed limit on the total space that a method
// can use for converting the strings. (256 chars per string in the signature).
// So any java string larger then this is truncated.

static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
static bool offsets_initialized = false;

nmethod *SharedRuntime::generate_dtrace_nmethod(
    MacroAssembler *masm, methodHandle method) {


  // generate_dtrace_nmethod is guarded by a mutex so we are sure to
  // be single threaded in this method.
  assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");

  // Fill in the signature array, for the calling-convention call.
  int total_args_passed = method->size_of_parameters();

  BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);

  // The signature we are going to use for the trap that dtrace will see
  // java/lang/String is converted. We drop "this" and any other object
  // is converted to NULL.  (A one-slot java/lang/Long object reference
  // is converted to a two-slot long, which is why we double the allocation).
  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);

  int i=0;
  int total_strings = 0;
  int first_arg_to_pass = 0;
  int total_c_args = 0;

  // Skip the receiver as dtrace doesn't want to see it
  if( !method->is_static() ) {
    in_sig_bt[i++] = T_OBJECT;
    first_arg_to_pass = 1;
  }

  SignatureStream ss(method->signature());
  for ( ; !ss.at_return_type(); ss.next()) {
    BasicType bt = ss.type();
    in_sig_bt[i++] = bt;  // Collect remaining bits of signature
    out_sig_bt[total_c_args++] = bt;
    if( bt == T_OBJECT) {
2993
      Symbol* s = ss.as_symbol_or_null();
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
      if (s == vmSymbols::java_lang_String()) {
        total_strings++;
        out_sig_bt[total_c_args-1] = T_ADDRESS;
      } else if (s == vmSymbols::java_lang_Boolean() ||
                 s == vmSymbols::java_lang_Byte()) {
        out_sig_bt[total_c_args-1] = T_BYTE;
      } else if (s == vmSymbols::java_lang_Character() ||
                 s == vmSymbols::java_lang_Short()) {
        out_sig_bt[total_c_args-1] = T_SHORT;
      } else if (s == vmSymbols::java_lang_Integer() ||
                 s == vmSymbols::java_lang_Float()) {
        out_sig_bt[total_c_args-1] = T_INT;
      } else if (s == vmSymbols::java_lang_Long() ||
                 s == vmSymbols::java_lang_Double()) {
        out_sig_bt[total_c_args-1] = T_LONG;
        out_sig_bt[total_c_args++] = T_VOID;
      }
    } else if ( bt == T_LONG || bt == T_DOUBLE ) {
      in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
      // We convert double to long
      out_sig_bt[total_c_args-1] = T_LONG;
      out_sig_bt[total_c_args++] = T_VOID;
    } else if ( bt == T_FLOAT) {
      // We convert float to int
      out_sig_bt[total_c_args-1] = T_INT;
    }
  }

  assert(i==total_args_passed, "validly parsed signature");

  // Now get the compiled-Java layout as input arguments
  int comp_args_on_stack;
  comp_args_on_stack = SharedRuntime::java_calling_convention(
      in_sig_bt, in_regs, total_args_passed, false);

  // We have received a description of where all the java arg are located
  // on entry to the wrapper. We need to convert these args to where
  // the a  native (non-jni) function would expect them. To figure out
  // where they go we convert the java signature to a C signature and remove
  // T_VOID for any long/double we might have received.


  // Now figure out where the args must be stored and how much stack space
  // they require (neglecting out_preserve_stack_slots but space for storing
  // the 1st six register arguments). It's weird see int_stk_helper.
  //
  int out_arg_slots;
  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);

  // Calculate the total number of stack slots we will need.

  // First count the abi requirement plus all of the outgoing args
  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;

  // Plus a temp for possible converion of float/double/long register args

  int conversion_temp = stack_slots;
  stack_slots += 2;


  // Now space for the string(s) we must convert

  int string_locs = stack_slots;
  stack_slots += total_strings *
                   (max_dtrace_string_size / VMRegImpl::stack_slot_size);

  // Ok The space we have allocated will look like:
  //
  //
  // FP-> |                     |
  //      |---------------------|
  //      | string[n]           |
  //      |---------------------| <- string_locs[n]
  //      | string[n-1]         |
  //      |---------------------| <- string_locs[n-1]
  //      | ...                 |
  //      | ...                 |
  //      |---------------------| <- string_locs[1]
  //      | string[0]           |
  //      |---------------------| <- string_locs[0]
  //      | temp                |
  //      |---------------------| <- conversion_temp
  //      | outbound memory     |
  //      | based arguments     |
  //      |                     |
  //      |---------------------|
  //      |                     |
  // SP-> | out_preserved_slots |
  //
  //

  // Now compute actual number of stack words we need rounding to make
  // stack properly aligned.
  stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);

  int stack_size = stack_slots * VMRegImpl::stack_slot_size;

  intptr_t start = (intptr_t)__ pc();

  // First thing make an ic check to see if we should even be here

  {
    Label L;
    const Register temp_reg = G3_scratch;
3098
    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
3099 3100
    __ verify_oop(O0);
    __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
K
kvn 已提交
3101
    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
3102

3103
    __ jump_to(ic_miss, temp_reg);
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
    __ delayed()->nop();
    __ align(CodeEntryAlignment);
    __ bind(L);
  }

  int vep_offset = ((intptr_t)__ pc()) - start;


  // The instruction at the verified entry point must be 5 bytes or longer
  // because it can be patched on the fly by make_non_entrant. The stack bang
  // instruction fits that requirement.

  // Generate stack overflow check before creating frame
  __ generate_stack_overflow_check(stack_size);

  assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
         "valid size for make_non_entrant");

  // Generate a new frame for the wrapper.
  __ save(SP, -stack_size, SP);

  // Frame is now completed as far a size and linkage.

  int frame_complete = ((intptr_t)__ pc()) - start;

#ifdef ASSERT
  bool reg_destroyed[RegisterImpl::number_of_registers];
  bool freg_destroyed[FloatRegisterImpl::number_of_registers];
  for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
    reg_destroyed[r] = false;
  }
  for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
    freg_destroyed[f] = false;
  }

#endif /* ASSERT */

  VMRegPair zero;
3142 3143
  const Register g0 = G0; // without this we get a compiler warning (why??)
  zero.set2(g0->as_VMReg());
3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199

  int c_arg, j_arg;

  Register conversion_off = noreg;

  for (j_arg = first_arg_to_pass, c_arg = 0 ;
       j_arg < total_args_passed ; j_arg++, c_arg++ ) {

    VMRegPair src = in_regs[j_arg];
    VMRegPair dst = out_regs[c_arg];

#ifdef ASSERT
    if (src.first()->is_Register()) {
      assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
    } else if (src.first()->is_FloatRegister()) {
      assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
                                               FloatRegisterImpl::S)], "ack!");
    }
    if (dst.first()->is_Register()) {
      reg_destroyed[dst.first()->as_Register()->encoding()] = true;
    } else if (dst.first()->is_FloatRegister()) {
      freg_destroyed[dst.first()->as_FloatRegister()->encoding(
                                                 FloatRegisterImpl::S)] = true;
    }
#endif /* ASSERT */

    switch (in_sig_bt[j_arg]) {
      case T_ARRAY:
      case T_OBJECT:
        {
          if (out_sig_bt[c_arg] == T_BYTE  || out_sig_bt[c_arg] == T_SHORT ||
              out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
            // need to unbox a one-slot value
            Register in_reg = L0;
            Register tmp = L2;
            if ( src.first()->is_reg() ) {
              in_reg = src.first()->as_Register();
            } else {
              assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
                     "must be");
              __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
            }
            // If the final destination is an acceptable register
            if ( dst.first()->is_reg() ) {
              if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
                tmp = dst.first()->as_Register();
              }
            }

            Label skipUnbox;
            if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
              __ mov(G0, tmp->successor());
            }
            __ br_null(in_reg, true, Assembler::pn, skipUnbox);
            __ delayed()->mov(G0, tmp);

3200 3201 3202
            BasicType bt = out_sig_bt[c_arg];
            int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
            switch (bt) {
3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
                case T_BYTE:
                  __ ldub(in_reg, box_offset, tmp); break;
                case T_SHORT:
                  __ lduh(in_reg, box_offset, tmp); break;
                case T_INT:
                  __ ld(in_reg, box_offset, tmp); break;
                case T_LONG:
                  __ ld_long(in_reg, box_offset, tmp); break;
                default: ShouldNotReachHere();
            }

            __ bind(skipUnbox);
            // If tmp wasn't final destination copy to final destination
            if (tmp == L2) {
              VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
              if (out_sig_bt[c_arg] == T_LONG) {
                long_move(masm, tmp_as_VM, dst);
              } else {
                move32_64(masm, tmp_as_VM, out_regs[c_arg]);
              }
            }
            if (out_sig_bt[c_arg] == T_LONG) {
              assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
              ++c_arg; // move over the T_VOID to keep the loop indices in sync
            }
          } else if (out_sig_bt[c_arg] == T_ADDRESS) {
            Register s =
                src.first()->is_reg() ? src.first()->as_Register() : L2;
            Register d =
                dst.first()->is_reg() ? dst.first()->as_Register() : L2;

            // We store the oop now so that the conversion pass can reach
            // while in the inner frame. This will be the only store if
            // the oop is NULL.
            if (s != L2) {
              // src is register
              if (d != L2) {
                // dst is register
                __ mov(s, d);
              } else {
                assert(Assembler::is_simm13(reg2offset(dst.first()) +
                          STACK_BIAS), "must be");
                __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
              }
            } else {
                // src not a register
                assert(Assembler::is_simm13(reg2offset(src.first()) +
                           STACK_BIAS), "must be");
                __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
                if (d == L2) {
                  assert(Assembler::is_simm13(reg2offset(dst.first()) +
                             STACK_BIAS), "must be");
                  __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
                }
            }
          } else if (out_sig_bt[c_arg] != T_VOID) {
            // Convert the arg to NULL
            if (dst.first()->is_reg()) {
              __ mov(G0, dst.first()->as_Register());
            } else {
              assert(Assembler::is_simm13(reg2offset(dst.first()) +
                         STACK_BIAS), "must be");
              __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
            }
          }
        }
        break;
      case T_VOID:
        break;

      case T_FLOAT:
        if (src.first()->is_stack()) {
          // Stack to stack/reg is simple
          move32_64(masm, src, dst);
        } else {
          if (dst.first()->is_reg()) {
            // freg -> reg
            int off =
              STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
            Register d = dst.first()->as_Register();
            if (Assembler::is_simm13(off)) {
              __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
                     SP, off);
              __ ld(SP, off, d);
            } else {
              if (conversion_off == noreg) {
                __ set(off, L6);
                conversion_off = L6;
              }
              __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
                     SP, conversion_off);
              __ ld(SP, conversion_off , d);
            }
          } else {
            // freg -> mem
            int off = STACK_BIAS + reg2offset(dst.first());
            if (Assembler::is_simm13(off)) {
              __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
                     SP, off);
            } else {
              if (conversion_off == noreg) {
                __ set(off, L6);
                conversion_off = L6;
              }
              __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
                     SP, conversion_off);
            }
          }
        }
        break;

      case T_DOUBLE:
        assert( j_arg + 1 < total_args_passed &&
                in_sig_bt[j_arg + 1] == T_VOID &&
                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
        if (src.first()->is_stack()) {
          // Stack to stack/reg is simple
          long_move(masm, src, dst);
        } else {
          Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;

          // Destination could be an odd reg on 32bit in which case
          // we can't load direct to the destination.

          if (!d->is_even() && wordSize == 4) {
            d = L2;
          }
          int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
          if (Assembler::is_simm13(off)) {
            __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
                   SP, off);
            __ ld_long(SP, off, d);
          } else {
            if (conversion_off == noreg) {
              __ set(off, L6);
              conversion_off = L6;
            }
            __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
                   SP, conversion_off);
            __ ld_long(SP, conversion_off, d);
          }
          if (d == L2) {
            long_move(masm, reg64_to_VMRegPair(L2), dst);
          }
        }
        break;

      case T_LONG :
        // 32bit can't do a split move of something like g1 -> O0, O1
        // so use a memory temp
        if (src.is_single_phys_reg() && wordSize == 4) {
          Register tmp = L2;
          if (dst.first()->is_reg() &&
              (wordSize == 8 || dst.first()->as_Register()->is_even())) {
            tmp = dst.first()->as_Register();
          }

          int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
          if (Assembler::is_simm13(off)) {
            __ stx(src.first()->as_Register(), SP, off);
            __ ld_long(SP, off, tmp);
          } else {
            if (conversion_off == noreg) {
              __ set(off, L6);
              conversion_off = L6;
            }
            __ stx(src.first()->as_Register(), SP, conversion_off);
            __ ld_long(SP, conversion_off, tmp);
          }

          if (tmp == L2) {
            long_move(masm, reg64_to_VMRegPair(L2), dst);
          }
        } else {
          long_move(masm, src, dst);
        }
        break;

      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");

      default:
        move32_64(masm, src, dst);
    }
  }


  // If we have any strings we must store any register based arg to the stack
  // This includes any still live xmm registers too.

  if (total_strings > 0 ) {

    // protect all the arg registers
    __ save_frame(0);
    __ mov(G2_thread, L7_thread_cache);
    const Register L2_string_off = L2;

    // Get first string offset
    __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);

    for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
      if (out_sig_bt[c_arg] == T_ADDRESS) {

        VMRegPair dst = out_regs[c_arg];
        const Register d = dst.first()->is_reg() ?
            dst.first()->as_Register()->after_save() : noreg;

        // It's a string the oop and it was already copied to the out arg
        // position
        if (d != noreg) {
          __ mov(d, O0);
        } else {
          assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
                 "must be");
          __ ld_ptr(FP,  reg2offset(dst.first()) + STACK_BIAS, O0);
        }
        Label skip;

        __ br_null(O0, false, Assembler::pn, skip);
        __ delayed()->add(FP, L2_string_off, O1);

        if (d != noreg) {
          __ mov(O1, d);
        } else {
          assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
                 "must be");
          __ st_ptr(O1, FP,  reg2offset(dst.first()) + STACK_BIAS);
        }

        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
                relocInfo::runtime_call_type);
        __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);

        __ bind(skip);

      }

    }
    __ mov(L7_thread_cache, G2_thread);
    __ restore();

  }


  // Ok now we are done. Need to place the nop that dtrace wants in order to
  // patch in the trap

  int patch_offset = ((intptr_t)__ pc()) - start;

  __ nop();


  // Return

  __ ret();
  __ delayed()->restore();

  __ flush();

  nmethod *nm = nmethod::new_dtrace_nmethod(
      method, masm->code(), vep_offset, patch_offset, frame_complete,
      stack_slots / VMRegImpl::slots_per_word);
  return nm;

}

#endif // HAVE_DTRACE_H

D
duke 已提交
3470 3471 3472 3473 3474 3475 3476
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
  assert(callee_locals >= callee_parameters,
          "test and remove; got more parms than locals");
  if (callee_locals < callee_parameters)
    return 0;                   // No adjustment for negative locals
3477
  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
D
duke 已提交
3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510
  return round_to(diff, WordsPerLong);
}

// "Top of Stack" slots that may be unused by the calling convention but must
// otherwise be preserved.
// On Intel these are not necessary and the value can be zero.
// On Sparc this describes the words reserved for storing a register window
// when an interrupt occurs.
uint SharedRuntime::out_preserve_stack_slots() {
  return frame::register_save_words * VMRegImpl::slots_per_word;
}

static void gen_new_frame(MacroAssembler* masm, bool deopt) {
//
// Common out the new frame generation for deopt and uncommon trap
//
  Register        G3pcs              = G3_scratch; // Array of new pcs (input)
  Register        Oreturn0           = O0;
  Register        Oreturn1           = O1;
  Register        O2UnrollBlock      = O2;
  Register        O3array            = O3;         // Array of frame sizes (input)
  Register        O4array_size       = O4;         // number of frames (input)
  Register        O7frame_size       = O7;         // number of frames (input)

  __ ld_ptr(O3array, 0, O7frame_size);
  __ sub(G0, O7frame_size, O7frame_size);
  __ save(SP, O7frame_size, SP);
  __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc

  #ifdef ASSERT
  // make sure that the frames are aligned properly
#ifndef _LP64
  __ btst(wordSize*2-1, SP);
C
coleenp 已提交
3511
  __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
D
duke 已提交
3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566
#endif
  #endif

  // Deopt needs to pass some extra live values from frame to frame

  if (deopt) {
    __ mov(Oreturn0->after_save(), Oreturn0);
    __ mov(Oreturn1->after_save(), Oreturn1);
  }

  __ mov(O4array_size->after_save(), O4array_size);
  __ sub(O4array_size, 1, O4array_size);
  __ mov(O3array->after_save(), O3array);
  __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
  __ add(G3pcs, wordSize, G3pcs);               // point to next pc value

  #ifdef ASSERT
  // trash registers to show a clear pattern in backtraces
  __ set(0xDEAD0000, I0);
  __ add(I0,  2, I1);
  __ add(I0,  4, I2);
  __ add(I0,  6, I3);
  __ add(I0,  8, I4);
  // Don't touch I5 could have valuable savedSP
  __ set(0xDEADBEEF, L0);
  __ mov(L0, L1);
  __ mov(L0, L2);
  __ mov(L0, L3);
  __ mov(L0, L4);
  __ mov(L0, L5);

  // trash the return value as there is nothing to return yet
  __ set(0xDEAD0001, O7);
  #endif

  __ mov(SP, O5_savedSP);
}


static void make_new_frames(MacroAssembler* masm, bool deopt) {
  //
  // loop through the UnrollBlock info and create new frames
  //
  Register        G3pcs              = G3_scratch;
  Register        Oreturn0           = O0;
  Register        Oreturn1           = O1;
  Register        O2UnrollBlock      = O2;
  Register        O3array            = O3;
  Register        O4array_size       = O4;
  Label           loop;

  // Before we make new frames, check to see if stack is available.
  // Do this after the caller's return address is on top of stack
  if (UseStackBanging) {
    // Get total frame size for interpreted frames
3567
    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
D
duke 已提交
3568 3569 3570
    __ bang_stack_size(O4, O3, G3_scratch);
  }

3571 3572 3573
  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
D
duke 已提交
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585

  // Adjust old interpreter frame to make space for new frame's extra java locals
  //
  // We capture the original sp for the transition frame only because it is needed in
  // order to properly calculate interpreter_sp_adjustment. Even though in real life
  // every interpreter frame captures a savedSP it is only needed at the transition
  // (fortunately). If we had to have it correct everywhere then we would need to
  // be told the sp_adjustment for each frame we create. If the frame size array
  // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
  // for each frame we create and keep up the illusion every where.
  //

3586
  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
D
duke 已提交
3587 3588 3589 3590 3591 3592
  __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
  __ sub(SP, O7, SP);

#ifdef ASSERT
  // make sure that there is at least one entry in the array
  __ tst(O4array_size);
C
coleenp 已提交
3593
  __ breakpoint_trap(Assembler::zero, Assembler::icc);
D
duke 已提交
3594 3595 3596 3597 3598 3599 3600 3601 3602
#endif

  // Now push the new interpreter frames
  __ bind(loop);

  // allocate a new frame, filling the registers

  gen_new_frame(masm, deopt);        // allocate an interpreter frame

K
kvn 已提交
3603
  __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
D
duke 已提交
3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616
  __ delayed()->add(O3array, wordSize, O3array);
  __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc

}

//------------------------------generate_deopt_blob----------------------------
// Ought to generate an ideal graph & compile, but here's some SPARC ASM
// instead.
void SharedRuntime::generate_deopt_blob() {
  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3617 3618 3619
  if (UseStackBanging) {
    pad += StackShadowPages*16 + 32;
  }
D
duke 已提交
3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
#ifdef _LP64
  CodeBuffer buffer("deopt_blob", 2100+pad, 512);
#else
  // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
  // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
  CodeBuffer buffer("deopt_blob", 1600+pad, 512);
#endif /* _LP64 */
  MacroAssembler* masm               = new MacroAssembler(&buffer);
  FloatRegister   Freturn0           = F0;
  Register        Greturn1           = G1;
  Register        Oreturn0           = O0;
  Register        Oreturn1           = O1;
  Register        O2UnrollBlock      = O2;
N
never 已提交
3633 3634
  Register        L0deopt_mode       = L0;
  Register        G4deopt_mode       = G4_scratch;
D
duke 已提交
3635
  int             frame_size_words;
3636
  Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
D
duke 已提交
3637
#if !defined(_LP64) && defined(COMPILER2)
3638
  Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
D
duke 已提交
3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
#endif
  Label           cont;

  OopMapSet *oop_maps = new OopMapSet();

  //
  // This is the entry point for code which is returning to a de-optimized
  // frame.
  // The steps taken by this frame are as follows:
  //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
  //     and all potentially live registers (at a pollpoint many registers can be live).
  //
  //   - call the C routine: Deoptimization::fetch_unroll_info (this function
  //     returns information about the number and size of interpreter frames
  //     which are equivalent to the frame which is being deoptimized)
  //   - deallocate the unpack frame, restoring only results values. Other
  //     volatile registers will now be captured in the vframeArray as needed.
  //   - deallocate the deoptimization frame
  //   - in a loop using the information returned in the previous step
  //     push new interpreter frames (take care to propagate the return
  //     values through each new frame pushed)
  //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
  //   - call the C routine: Deoptimization::unpack_frames (this function
  //     lays out values on the interpreter frame which was just created)
  //   - deallocate the dummy unpack_frame
  //   - ensure that all the return values are correctly set and then do
  //     a return to the interpreter entry point
  //
  // Refer to the following methods for more information:
  //   - Deoptimization::fetch_unroll_info
  //   - Deoptimization::unpack_frames

  OopMap* map = NULL;

  int start = __ offset();

  // restore G2, the trampoline destroyed it
  __ get_thread();

  // On entry we have been called by the deoptimized nmethod with a call that
  // replaced the original call (or safepoint polling location) so the deoptimizing
  // pc is now in O7. Return values are still in the expected places

  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
K
kvn 已提交
3683
  __ ba(cont);
N
never 已提交
3684
  __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
D
duke 已提交
3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699

  int exception_offset = __ offset() - start;

  // restore G2, the trampoline destroyed it
  __ get_thread();

  // On entry we have been jumped to by the exception handler (or exception_blob
  // for server).  O0 contains the exception oop and O7 contains the original
  // exception pc.  So if we push a frame here it will look to the
  // stack walking code (fetch_unroll_info) just like a normal call so
  // state will be extracted normally.

  // save exception oop in JavaThread and fall through into the
  // exception_in_tls case since they are handled in same way except
  // for where the pending exception is kept.
3700
  __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
D
duke 已提交
3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716

  //
  // Vanilla deoptimization with an exception pending in exception_oop
  //
  int exception_in_tls_offset = __ offset() - start;

  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);

  // Restore G2_thread
  __ get_thread();

#ifdef ASSERT
  {
    // verify that there is really an exception oop in exception_oop
    Label has_exception;
3717
    __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
K
kvn 已提交
3718
    __ br_notnull_short(Oexception, Assembler::pt, has_exception);
D
duke 已提交
3719 3720 3721 3722 3723
    __ stop("no exception in thread");
    __ bind(has_exception);

    // verify that there is no pending exception
    Label no_pending_exception;
3724
    Address exception_addr(G2_thread, Thread::pending_exception_offset());
D
duke 已提交
3725
    __ ld_ptr(exception_addr, Oexception);
K
kvn 已提交
3726
    __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
D
duke 已提交
3727 3728 3729 3730 3731
    __ stop("must not have pending exception here");
    __ bind(no_pending_exception);
  }
#endif

K
kvn 已提交
3732
  __ ba(cont);
N
never 已提交
3733
  __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
D
duke 已提交
3734 3735 3736 3737 3738 3739 3740 3741 3742

  //
  // Reexecute entry, similar to c2 uncommon trap
  //
  int reexecute_offset = __ offset() - start;

  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);

N
never 已提交
3743
  __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
D
duke 已提交
3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765

  __ bind(cont);

  __ set_last_Java_frame(SP, noreg);

  // do the call by hand so we can get the oopmap

  __ mov(G2_thread, L7_thread_cache);
  __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
  __ delayed()->mov(G2_thread, O0);

  // Set an oopmap for the call site this describes all our saved volatile registers

  oop_maps->add_gc_map( __ offset()-start, map);

  __ mov(L7_thread_cache, G2_thread);

  __ reset_last_Java_frame();

  // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
  // so this move will survive

N
never 已提交
3766
  __ mov(L0deopt_mode, G4deopt_mode);
D
duke 已提交
3767 3768 3769 3770 3771 3772

  __ mov(O0, O2UnrollBlock->after_save());

  RegisterSaver::restore_result_registers(masm);

  Label noException;
K
kvn 已提交
3773
  __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
D
duke 已提交
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798

  // Move the pending exception from exception_oop to Oexception so
  // the pending exception will be picked up the interpreter.
  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
  __ bind(noException);

  // deallocate the deoptimization frame taking care to preserve the return values
  __ mov(Oreturn0,     Oreturn0->after_save());
  __ mov(Oreturn1,     Oreturn1->after_save());
  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
  __ restore();

  // Allocate new interpreter frame(s) and possible c2i adapter frame

  make_new_frames(masm, true);

  // push a dummy "unpack_frame" taking care of float return values and
  // call Deoptimization::unpack_frames to have the unpacker layout
  // information in the interpreter frames just created and then return
  // to the interpreter entry point
  __ save(SP, -frame_size_words*wordSize, SP);
  __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
#if !defined(_LP64)
#if defined(COMPILER2)
I
iveresov 已提交
3799 3800
  // 32-bit 1-register longs return longs in G1
  __ stx(Greturn1, saved_Greturn1_addr);
D
duke 已提交
3801 3802
#endif
  __ set_last_Java_frame(SP, noreg);
N
never 已提交
3803
  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
D
duke 已提交
3804 3805
#else
  // LP64 uses g4 in set_last_Java_frame
N
never 已提交
3806
  __ mov(G4deopt_mode, O1);
D
duke 已提交
3807 3808 3809 3810 3811 3812 3813 3814
  __ set_last_Java_frame(SP, G0);
  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
#endif
  __ reset_last_Java_frame();
  __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);

#if !defined(_LP64) && defined(COMPILER2)
  // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
I
iveresov 已提交
3815 3816
  // I0/I1 if the return value is long.
  Label not_long;
K
kvn 已提交
3817
  __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
I
iveresov 已提交
3818 3819
  __ ldd(saved_Greturn1_addr,I0);
  __ bind(not_long);
D
duke 已提交
3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
#endif
  __ ret();
  __ delayed()->restore();

  masm->flush();
  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
}

#ifdef COMPILER2

//------------------------------generate_uncommon_trap_blob--------------------
// Ought to generate an ideal graph & compile, but here's some SPARC ASM
// instead.
void SharedRuntime::generate_uncommon_trap_blob() {
  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  int pad = VerifyThread ? 512 : 0;
3839 3840 3841
  if (UseStackBanging) {
    pad += StackShadowPages*16 + 32;
  }
D
duke 已提交
3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
#ifdef _LP64
  CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
#else
  // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
  // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
  CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
#endif
  MacroAssembler* masm               = new MacroAssembler(&buffer);
  Register        O2UnrollBlock      = O2;
  Register        O2klass_index      = O2;

  //
  // This is the entry point for all traps the compiler takes when it thinks
  // it cannot handle further execution of compilation code. The frame is
  // deoptimized in these cases and converted into interpreter frames for
  // execution
  // The steps taken by this frame are as follows:
  //   - push a fake "unpack_frame"
  //   - call the C routine Deoptimization::uncommon_trap (this function
  //     packs the current compiled frame into vframe arrays and returns
  //     information about the number and size of interpreter frames which
  //     are equivalent to the frame which is being deoptimized)
  //   - deallocate the "unpack_frame"
  //   - deallocate the deoptimization frame
  //   - in a loop using the information returned in the previous step
  //     push interpreter frames;
  //   - create a dummy "unpack_frame"
  //   - call the C routine: Deoptimization::unpack_frames (this function
  //     lays out values on the interpreter frame which was just created)
  //   - deallocate the dummy unpack_frame
  //   - return to the interpreter entry point
  //
  //  Refer to the following methods for more information:
  //   - Deoptimization::uncommon_trap
  //   - Deoptimization::unpack_frame

  // the unloaded class index is in O0 (first parameter to this blob)

  // push a dummy "unpack_frame"
  // and call Deoptimization::uncommon_trap to pack the compiled frame into
  // vframe array and return the UnrollBlock information
  __ save_frame(0);
  __ set_last_Java_frame(SP, noreg);
  __ mov(I0, O2klass_index);
  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
  __ reset_last_Java_frame();
  __ mov(O0, O2UnrollBlock->after_save());
  __ restore();

  // deallocate the deoptimized frame taking care to preserve the return values
  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
  __ restore();

  // Allocate new interpreter frame(s) and possible c2i adapter frame

  make_new_frames(masm, false);

  // push a dummy "unpack_frame" taking care of float return values and
  // call Deoptimization::unpack_frames to have the unpacker layout
  // information in the interpreter frames just created and then return
  // to the interpreter entry point
  __ save_frame(0);
  __ set_last_Java_frame(SP, noreg);
  __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
  __ reset_last_Java_frame();
  __ ret();
  __ delayed()->restore();

  masm->flush();
  _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
}

#endif // COMPILER2

//------------------------------generate_handler_blob-------------------
//
// Generate a special Compile2Runtime blob that saves all registers, and sets
// up an OopMap.
//
// This blob is jumped to (via a breakpoint and the signal handler) from a
// safepoint in compiled code.  On entry to this blob, O7 contains the
// address in the original nmethod at which we should resume normal execution.
// Thus, this blob looks like a subroutine which must preserve lots of
// registers and return normally.  Note that O7 is never register-allocated,
// so it is guaranteed to be free here.
//

// The hardest part of what this blob must do is to save the 64-bit %o
// registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
// an interrupt will chop off their heads.  Making space in the caller's frame
// first will let us save the 64-bit %o's before save'ing, but we cannot hand
// the adjusted FP off to the GC stack-crawler: this will modify the caller's
// SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
// Tricky, tricky, tricky...

3939
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
D
duke 已提交
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");

  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
  // even larger with TraceJumps
  int pad = TraceJumps ? 512 : 0;
  CodeBuffer buffer("handler_blob", 1600 + pad, 512);
  MacroAssembler* masm                = new MacroAssembler(&buffer);
  int             frame_size_words;
  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map = NULL;

  int start = __ offset();

  // If this causes a return before the processing, then do a "restore"
  if (cause_return) {
    __ restore();
  } else {
    // Make it look like we were called via the poll
    // so that frame constructor always sees a valid return address
    __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
    __ sub(O7, frame::pc_return_offset, O7);
  }

  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);

  // setup last_Java_sp (blows G4)
  __ set_last_Java_frame(SP, noreg);

  // call into the runtime to handle illegal instructions exception
  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
  __ mov(G2_thread, O0);
  __ save_thread(L7_thread_cache);
  __ call(call_ptr);
  __ delayed()->nop();

  // Set an oopmap for the call site.
  // We need this not only for callee-saved registers, but also for volatile
  // registers that the compiler might be keeping live across a safepoint.

  oop_maps->add_gc_map( __ offset() - start, map);

  __ restore_thread(L7_thread_cache);
  // clear last_Java_sp
  __ reset_last_Java_frame();

  // Check for exceptions
  Label pending;

  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
K
kvn 已提交
3993
  __ br_notnull_short(O1, Assembler::pn, pending);
D
duke 已提交
3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031

  RegisterSaver::restore_live_registers(masm);

  // We are back the the original state on entry and ready to go.

  __ retl();
  __ delayed()->nop();

  // Pending exception after the safepoint

  __ bind(pending);

  RegisterSaver::restore_live_registers(masm);

  // We are back the the original state on entry.

  // Tail-call forward_exception_entry, with the issuing PC in O7,
  // so it looks like the original nmethod called forward_exception_entry.
  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
  __ JMP(O0, 0);
  __ delayed()->nop();

  // -------------
  // make sure all code is generated
  masm->flush();

  // return exception blob
  return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
}

//
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
//
// Generate a stub that calls into vm to find out the proper destination
// of a java call. All the argument registers are live at this point
// but since this is generic code we don't know what they are and the caller
// must do any gc of the args.
//
4032
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
D
duke 已提交
4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079
  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");

  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
  // even larger with TraceJumps
  int pad = TraceJumps ? 512 : 0;
  CodeBuffer buffer(name, 1600 + pad, 512);
  MacroAssembler* masm                = new MacroAssembler(&buffer);
  int             frame_size_words;
  OopMapSet *oop_maps = new OopMapSet();
  OopMap* map = NULL;

  int start = __ offset();

  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);

  int frame_complete = __ offset();

  // setup last_Java_sp (blows G4)
  __ set_last_Java_frame(SP, noreg);

  // call into the runtime to handle illegal instructions exception
  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
  __ mov(G2_thread, O0);
  __ save_thread(L7_thread_cache);
  __ call(destination, relocInfo::runtime_call_type);
  __ delayed()->nop();

  // O0 contains the address we are going to jump to assuming no exception got installed

  // Set an oopmap for the call site.
  // We need this not only for callee-saved registers, but also for volatile
  // registers that the compiler might be keeping live across a safepoint.

  oop_maps->add_gc_map( __ offset() - start, map);

  __ restore_thread(L7_thread_cache);
  // clear last_Java_sp
  __ reset_last_Java_frame();

  // Check for exceptions
  Label pending;

  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
K
kvn 已提交
4080
  __ br_notnull_short(O1, Assembler::pn, pending);
D
duke 已提交
4081

4082
  // get the returned Method*
D
duke 已提交
4083

4084
  __ get_vm_result_2(G5_method);
D
duke 已提交
4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119
  __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);

  // O0 is where we want to jump, overwrite G3 which is saved and scratch

  __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);

  RegisterSaver::restore_live_registers(masm);

  // We are back the the original state on entry and ready to go.

  __ JMP(G3, 0);
  __ delayed()->nop();

  // Pending exception after the safepoint

  __ bind(pending);

  RegisterSaver::restore_live_registers(masm);

  // We are back the the original state on entry.

  // Tail-call forward_exception_entry, with the issuing PC in O7,
  // so it looks like the original nmethod called forward_exception_entry.
  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
  __ JMP(O0, 0);
  __ delayed()->nop();

  // -------------
  // make sure all code is generated
  masm->flush();

  // return the  blob
  // frame_size_words or bytes??
  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}