assembler_x86.cpp 170.7 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25
#include "precompiled.hpp"
26 27
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
28 29 30 31 32 33 34 35 36 37 38
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
39 40
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
41 42 43
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
44
#endif // INCLUDE_ALL_GCS
D
duke 已提交
45

46 47 48 49 50 51 52 53 54
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#define STOP(error) stop(error)
#else
#define BLOCK_COMMENT(str) block_comment(str)
#define STOP(error) block_comment(error); stop(error)
#endif

#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
D
duke 已提交
55 56 57 58 59 60 61
// Implementation of AddressLiteral

AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
  _is_lval = false;
  _target = target;
  switch (rtype) {
  case relocInfo::oop_type:
62
  case relocInfo::metadata_type:
D
duke 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    // Oops are a special case. Normally they would be their own section
    // but in cases like icBuffer they are literals in the code stream that
    // we don't have a section for. We use none so that we get a literal address
    // which is always patchable.
    break;
  case relocInfo::external_word_type:
    _rspec = external_word_Relocation::spec(target);
    break;
  case relocInfo::internal_word_type:
    _rspec = internal_word_Relocation::spec(target);
    break;
  case relocInfo::opt_virtual_call_type:
    _rspec = opt_virtual_call_Relocation::spec();
    break;
  case relocInfo::static_call_type:
    _rspec = static_call_Relocation::spec();
    break;
  case relocInfo::runtime_call_type:
    _rspec = runtime_call_Relocation::spec();
    break;
83 84 85 86
  case relocInfo::poll_type:
  case relocInfo::poll_return_type:
    _rspec = Relocation::spec_simple(rtype);
    break;
D
duke 已提交
87 88 89 90 91 92 93 94 95 96 97
  case relocInfo::none:
    break;
  default:
    ShouldNotReachHere();
    break;
  }
}

// Implementation of Address

#ifdef _LP64
98 99

Address Address::make_array(ArrayAddress adr) {
D
duke 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
  // Not implementable on 64bit machines
  // Should have been handled higher up the call chain.
  ShouldNotReachHere();
  return Address();
}

// exceedingly dangerous constructor
Address::Address(int disp, address loc, relocInfo::relocType rtype) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = disp;
  switch (rtype) {
    case relocInfo::external_word_type:
      _rspec = external_word_Relocation::spec(loc);
      break;
    case relocInfo::internal_word_type:
      _rspec = internal_word_Relocation::spec(loc);
      break;
    case relocInfo::runtime_call_type:
      // HMM
      _rspec = runtime_call_Relocation::spec();
      break;
123 124 125 126
    case relocInfo::poll_type:
    case relocInfo::poll_return_type:
      _rspec = Relocation::spec_simple(rtype);
      break;
D
duke 已提交
127 128 129 130 131 132
    case relocInfo::none:
      break;
    default:
      ShouldNotReachHere();
  }
}
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#else // LP64

Address Address::make_array(ArrayAddress adr) {
  AddressLiteral base = adr.base();
  Address index = adr.index();
  assert(index._disp == 0, "must not have disp"); // maybe it can?
  Address array(index._base, index._index, index._scale, (intptr_t) base.target());
  array._rspec = base._rspec;
  return array;
}

// exceedingly dangerous constructor
Address::Address(address loc, RelocationHolder spec) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = (intptr_t) loc;
  _rspec = spec;
}

#endif // _LP64


D
duke 已提交
156 157 158 159

// Convert the raw encoding form into the form expected by the constructor for
// Address.  An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
160
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
161
  RelocationHolder rspec;
162 163
  if (disp_reloc != relocInfo::none) {
    rspec = Relocation::spec_simple(disp_reloc);
164
  }
D
duke 已提交
165 166 167
  bool valid_index = index != rsp->encoding();
  if (valid_index) {
    Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
168
    madr._rspec = rspec;
D
duke 已提交
169 170 171
    return madr;
  } else {
    Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
172
    madr._rspec = rspec;
D
duke 已提交
173 174 175 176 177
    return madr;
  }
}

// Implementation of Assembler
178

D
duke 已提交
179 180 181 182
int AbstractAssembler::code_fill_byte() {
  return (u_char)'\xF4'; // hlt
}

183 184 185
// make this go away someday
void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
  if (rtype == relocInfo::none)
186
        emit_int32(data);
187
  else  emit_data(data, Relocation::spec_simple(rtype), format);
D
duke 已提交
188 189
}

190 191
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
D
duke 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  if (rspec.type() !=  relocInfo::none) {
    #ifdef ASSERT
      check_relocation(rspec, format);
    #endif
    // Do not use AbstractAssembler::relocate, which is not intended for
    // embedded words.  Instead, relocate to the enclosing instruction.

    // hack. call32 is too wide for mask so use disp32
    if (format == call32_operand)
      code_section()->relocate(inst_mark(), rspec, disp32_operand);
    else
      code_section()->relocate(inst_mark(), rspec, format);
  }
206
  emit_int32(data);
D
duke 已提交
207 208
}

209 210 211 212
static int encode(Register r) {
  int enc = r->encoding();
  if (enc >= 8) {
    enc -= 8;
D
duke 已提交
213
  }
214
  return enc;
D
duke 已提交
215 216 217
}

void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
218
  assert(dst->has_byte_register(), "must have byte register");
D
duke 已提交
219 220 221
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert(isByte(imm8), "not a byte");
  assert((op1 & 0x01) == 0, "should be 8bit operation");
222 223 224
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
  emit_int8(imm8);
D
duke 已提交
225 226
}

227 228

void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
D
duke 已提交
229 230 231 232
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
233 234 235
    emit_int8(op1 | 0x02); // set sign bit
    emit_int8(op2 | encode(dst));
    emit_int8(imm32 & 0xFF);
D
duke 已提交
236
  } else {
237 238
    emit_int8(op1);
    emit_int8(op2 | encode(dst));
239
    emit_int32(imm32);
D
duke 已提交
240 241 242
  }
}

243 244 245 246 247
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
248 249
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
250
  emit_int32(imm32);
251 252
}

D
duke 已提交
253
// immediate-to-memory forms
254
void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
D
duke 已提交
255 256 257
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
258
    emit_int8(op1 | 0x02); // set sign bit
D
duke 已提交
259
    emit_operand(rm, adr, 1);
260
    emit_int8(imm32 & 0xFF);
D
duke 已提交
261
  } else {
262
    emit_int8(op1);
D
duke 已提交
263
    emit_operand(rm, adr, 4);
264
    emit_int32(imm32);
D
duke 已提交
265 266 267 268 269 270
  }
}


void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
271 272
  emit_int8(op1);
  emit_int8(op2 | encode(dst) << 3 | encode(src));
D
duke 已提交
273 274
}

275

D
duke 已提交
276 277 278 279 280
void Assembler::emit_operand(Register reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
                             RelocationHolder const& rspec,
                             int rip_relative_correction) {
  relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
281 282 283 284 285 286 287

  // Encode the registers as needed in the fields they are used in

  int regenc = encode(reg) << 3;
  int indexenc = index->is_valid() ? encode(index) << 3 : 0;
  int baseenc = base->is_valid() ? encode(base) : 0;

D
duke 已提交
288 289 290 291 292
  if (base->is_valid()) {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [base + index*scale + disp]
      if (disp == 0 && rtype == relocInfo::none  &&
293
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
294 295 296
        // [base + index*scale]
        // [00 reg 100][ss index base]
        assert(index != rsp, "illegal addressing mode");
297 298
        emit_int8(0x04 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
299 300 301 302
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + index*scale + imm8]
        // [01 reg 100][ss index base] imm8
        assert(index != rsp, "illegal addressing mode");
303 304 305
        emit_int8(0x44 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
306 307 308 309
      } else {
        // [base + index*scale + disp32]
        // [10 reg 100][ss index base] disp32
        assert(index != rsp, "illegal addressing mode");
310 311
        emit_int8(0x84 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
312 313
        emit_data(disp, rspec, disp32_operand);
      }
314
    } else if (base == rsp LP64_ONLY(|| base == r12)) {
D
duke 已提交
315 316 317 318
      // [rsp + disp]
      if (disp == 0 && rtype == relocInfo::none) {
        // [rsp]
        // [00 reg 100][00 100 100]
319 320
        emit_int8(0x04 | regenc);
        emit_int8(0x24);
D
duke 已提交
321 322 323
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [rsp + imm8]
        // [01 reg 100][00 100 100] disp8
324 325 326
        emit_int8(0x44 | regenc);
        emit_int8(0x24);
        emit_int8(disp & 0xFF);
D
duke 已提交
327 328 329
      } else {
        // [rsp + imm32]
        // [10 reg 100][00 100 100] disp32
330 331
        emit_int8(0x84 | regenc);
        emit_int8(0x24);
D
duke 已提交
332 333 334 335
        emit_data(disp, rspec, disp32_operand);
      }
    } else {
      // [base + disp]
336
      assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
D
duke 已提交
337
      if (disp == 0 && rtype == relocInfo::none &&
338
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
339 340
        // [base]
        // [00 reg base]
341
        emit_int8(0x00 | regenc | baseenc);
D
duke 已提交
342 343 344
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + disp8]
        // [01 reg base] disp8
345 346
        emit_int8(0x40 | regenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
347 348 349
      } else {
        // [base + disp32]
        // [10 reg base] disp32
350
        emit_int8(0x80 | regenc | baseenc);
D
duke 已提交
351 352 353 354 355 356 357 358 359
        emit_data(disp, rspec, disp32_operand);
      }
    }
  } else {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [index*scale + disp]
      // [00 reg 100][ss index 101] disp32
      assert(index != rsp, "illegal addressing mode");
360 361
      emit_int8(0x04 | regenc);
      emit_int8(scale << 6 | indexenc | 0x05);
D
duke 已提交
362 363
      emit_data(disp, rspec, disp32_operand);
    } else if (rtype != relocInfo::none ) {
364
      // [disp] (64bit) RIP-RELATIVE (32bit) abs
D
duke 已提交
365 366
      // [00 000 101] disp32

367
      emit_int8(0x05 | regenc);
D
duke 已提交
368 369 370 371 372 373 374 375
      // Note that the RIP-rel. correction applies to the generated
      // disp field, but _not_ to the target address in the rspec.

      // disp was created by converting the target address minus the pc
      // at the start of the instruction. That needs more correction here.
      // intptr_t disp = target - next_ip;
      assert(inst_mark() != NULL, "must be inside InstructionMark");
      address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
376 377 378
      int64_t adjusted = disp;
      // Do rip-rel adjustment for 64bit
      LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
D
duke 已提交
379 380
      assert(is_simm32(adjusted),
             "must be 32bit offset (RIP relative address)");
381
      emit_data((int32_t) adjusted, rspec, disp32_operand);
D
duke 已提交
382 383

    } else {
384
      // 32bit never did this, did everything as the rip-rel/disp code above
D
duke 已提交
385 386
      // [disp] ABSOLUTE
      // [00 reg 100][00 100 101] disp32
387 388
      emit_int8(0x04 | regenc);
      emit_int8(0x25);
D
duke 已提交
389 390 391 392 393 394 395
      emit_data(disp, rspec, disp32_operand);
    }
  }
}

void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
396 397
                             RelocationHolder const& rspec) {
  emit_operand((Register)reg, base, index, scale, disp, rspec);
D
duke 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

// Secret local extension to Assembler::WhichOperand:
#define end_pc_operand (_WhichOperand_limit)

address Assembler::locate_operand(address inst, WhichOperand which) {
  // Decode the given instruction, and return the address of
  // an embedded 32-bit operand word.

  // If "which" is disp32_operand, selects the displacement portion
  // of an effective address specifier.
  // If "which" is imm64_operand, selects the trailing immediate constant.
  // If "which" is call32_operand, selects the displacement of a call or jump.
  // Caller is responsible for ensuring that there is such an operand,
  // and that it is 32/64 bits wide.

  // If "which" is end_pc_operand, find the end of the instruction.

  address ip = inst;
  bool is_64bit = false;

  debug_only(bool has_disp32 = false);
  int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn

  again_after_prefix:
  switch (0xFF & *ip++) {

  // These convenience macros generate groups of "case" labels for the switch.
#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
             case (x)+4: case (x)+5: case (x)+6: case (x)+7
#define REP16(x) REP8((x)+0): \
              case REP8((x)+8)

  case CS_segment:
  case SS_segment:
  case DS_segment:
  case ES_segment:
  case FS_segment:
  case GS_segment:
438 439 440
    // Seems dubious
    LP64_ONLY(assert(false, "shouldn't have that prefix"));
    assert(ip == inst+1, "only one prefix allowed");
D
duke 已提交
441 442 443 444 445 446 447 448 449 450 451
    goto again_after_prefix;

  case 0x67:
  case REX:
  case REX_B:
  case REX_X:
  case REX_XB:
  case REX_R:
  case REX_RB:
  case REX_RX:
  case REX_RXB:
452
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
453 454 455 456 457 458 459 460 461 462
    goto again_after_prefix;

  case REX_W:
  case REX_WB:
  case REX_WX:
  case REX_WXB:
  case REX_WR:
  case REX_WRB:
  case REX_WRX:
  case REX_WRXB:
463
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
464 465 466 467 468 469 470 471 472
    is_64bit = true;
    goto again_after_prefix;

  case 0xFF: // pushq a; decl a; incl a; call a; jmp a
  case 0x88: // movb a, r
  case 0x89: // movl a, r
  case 0x8A: // movb r, a
  case 0x8B: // movl r, a
  case 0x8F: // popl a
473
    debug_only(has_disp32 = true);
D
duke 已提交
474 475 476 477 478 479
    break;

  case 0x68: // pushq #32
    if (which == end_pc_operand) {
      return ip + 4;
    }
480 481
    assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
    return ip;                  // not produced by emit_operand
D
duke 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

  case 0x66: // movw ... (size prefix)
    again_after_size_prefix2:
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
502
      NOT_LP64(assert(false, "64bit prefix found"));
D
duke 已提交
503 504 505
      goto again_after_size_prefix2;
    case 0x8B: // movw r, a
    case 0x89: // movw a, r
506
      debug_only(has_disp32 = true);
D
duke 已提交
507 508
      break;
    case 0xC7: // movw a, #16
509
      debug_only(has_disp32 = true);
D
duke 已提交
510 511 512 513 514 515 516 517 518 519 520 521
      tail_size = 2;  // the imm16
      break;
    case 0x0F: // several SSE/SSE2 variants
      ip--;    // reparse the 0x0F
      goto again_after_prefix;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP8(0xB8): // movl/q r, #32/#64(oop?)
    if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
522 523
    // these asserts are somewhat nonsensical
#ifndef _LP64
524 525
    assert(which == imm_operand || which == disp32_operand,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
526 527
#else
    assert((which == call32_operand || which == imm_operand) && is_64bit ||
528 529
           which == narrow_oop_operand && !is_64bit,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
530
#endif // _LP64
D
duke 已提交
531 532 533 534 535 536 537 538 539 540
    return ip;

  case 0x69: // imul r, a, #32
  case 0xC7: // movl a, #32(oop?)
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x0F: // movx..., etc.
    switch (0xFF & *ip++) {
K
kvn 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553
    case 0x3A: // pcmpestri
      tail_size = 1;
    case 0x38: // ptest, pmovzxbw
      ip++; // skip opcode
      debug_only(has_disp32 = true); // has both kinds of operands!
      break;

    case 0x70: // pshufd r, r/a, #8
      debug_only(has_disp32 = true); // has both kinds of operands!
    case 0x73: // psrldq r, #8
      tail_size = 1;
      break;

D
duke 已提交
554 555 556 557 558
    case 0x12: // movlps
    case 0x28: // movaps
    case 0x2E: // ucomiss
    case 0x2F: // comiss
    case 0x54: // andps
559 560
    case 0x55: // andnps
    case 0x56: // orps
D
duke 已提交
561 562 563
    case 0x57: // xorps
    case 0x6E: // movd
    case 0x7E: // movd
K
kvn 已提交
564
    case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
565
      debug_only(has_disp32 = true);
D
duke 已提交
566
      break;
567

D
duke 已提交
568 569
    case 0xAD: // shrd r, a, %cl
    case 0xAF: // imul r, a
570 571 572 573
    case 0xBE: // movsbl r, a (movsxb)
    case 0xBF: // movswl r, a (movsxw)
    case 0xB6: // movzbl r, a (movzxb)
    case 0xB7: // movzwl r, a (movzxw)
D
duke 已提交
574 575 576 577 578 579 580 581 582
    case REP16(0x40): // cmovl cc, r, a
    case 0xB0: // cmpxchgb
    case 0xB1: // cmpxchg
    case 0xC1: // xaddl
    case 0xC7: // cmpxchg8
    case REP16(0x90): // setcc a
      debug_only(has_disp32 = true);
      // fall out of the switch to decode the address
      break;
583

K
kvn 已提交
584 585 586 587 588 589
    case 0xC4: // pinsrw r, a, #8
      debug_only(has_disp32 = true);
    case 0xC5: // pextrw r, r, #8
      tail_size = 1;  // the imm8
      break;

D
duke 已提交
590 591 592 593
    case 0xAC: // shrd r, a, #8
      debug_only(has_disp32 = true);
      tail_size = 1;  // the imm8
      break;
594

D
duke 已提交
595 596
    case REP16(0x80): // jcc rdisp32
      if (which == end_pc_operand)  return ip + 4;
597
      assert(which == call32_operand, "jcc has no disp32 or imm");
D
duke 已提交
598 599 600 601 602 603 604 605
      return ip;
    default:
      ShouldNotReachHere();
    }
    break;

  case 0x81: // addl a, #32; addl r, #32
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
606
    // on 32bit in the case of cmpl, the imm might be an oop
D
duke 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x83: // addl a, #8; addl r, #8
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1;
    break;

  case 0x9B:
    switch (0xFF & *ip++) {
    case 0xD9: // fnstcw a
      debug_only(has_disp32 = true);
      break;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
  case REP4(0x10): // adc...
  case REP4(0x20): // and...
  case REP4(0x30): // xor...
  case REP4(0x08): // or...
  case REP4(0x18): // sbb...
  case REP4(0x28): // sub...
  case 0xF7: // mull a
635
  case 0x8D: // lea r, a
D
duke 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649
  case 0x87: // xchg r, a
  case REP4(0x38): // cmp...
  case 0x85: // test r, a
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
  case 0xC6: // movb a, #8
  case 0x80: // cmpb a, #8
  case 0x6B: // imul r, a, #8
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1; // the imm8
    break;

K
kvn 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
  case 0xC4: // VEX_3bytes
  case 0xC5: // VEX_2bytes
    assert((UseAVX > 0), "shouldn't have VEX prefix");
    assert(ip == inst+1, "no prefixes allowed");
    // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
    // but they have prefix 0x0F and processed when 0x0F processed above.
    //
    // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
    // instructions (these instructions are not supported in 64-bit mode).
    // To distinguish them bits [7:6] are set in the VEX second byte since
    // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
    // those VEX bits REX and vvvv bits are inverted.
    //
    // Fortunately C2 doesn't generate these instructions so we don't need
    // to check for them in product version.

    // Check second byte
    NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));

    // First byte
    if ((0xFF & *inst) == VEX_3bytes) {
      ip++; // third byte
      is_64bit = ((VEX_W & *ip) == VEX_W);
    }
    ip++; // opcode
    // To find the end of instruction (which == end_pc_operand).
    switch (0xFF & *ip) {
    case 0x61: // pcmpestri r, r/a, #8
    case 0x70: // pshufd r, r/a, #8
    case 0x73: // psrldq r, #8
      tail_size = 1;  // the imm8
      break;
    default:
      break;
    }
    ip++; // skip opcode
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
D
duke 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700

  case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
  case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
  case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
  case 0xDD: // fld_d a; fst_d a; fstp_d a
  case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
  case 0xDF: // fild_d a; fistp_d a
  case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
  case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
  case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
    debug_only(has_disp32 = true);
    break;

K
kvn 已提交
701 702 703 704 705 706
  case 0xE8: // call rdisp32
  case 0xE9: // jmp  rdisp32
    if (which == end_pc_operand)  return ip + 4;
    assert(which == call32_operand, "call has no disp32 or imm");
    return ip;

707 708 709 710
  case 0xF0:                    // Lock
    assert(os::is_MP(), "only on MP");
    goto again_after_prefix;

D
duke 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
  case 0xF3:                    // For SSE
  case 0xF2:                    // For SSE2
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
730
      NOT_LP64(assert(false, "found 64bit prefix"));
D
duke 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
      ip++;
    default:
      ip++;
    }
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  default:
    ShouldNotReachHere();

#undef REP8
#undef REP16
  }

  assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
746 747 748 749 750 751
#ifdef _LP64
  assert(which != imm_operand, "instruction is not a movq reg, imm64");
#else
  // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
  assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
#endif // LP64
D
duke 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
  assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");

  // parse the output of emit_operand
  int op2 = 0xFF & *ip++;
  int base = op2 & 0x07;
  int op3 = -1;
  const int b100 = 4;
  const int b101 = 5;
  if (base == b100 && (op2 >> 6) != 3) {
    op3 = 0xFF & *ip++;
    base = op3 & 0x07;   // refetch the base
  }
  // now ip points at the disp (if any)

  switch (op2 >> 6) {
  case 0:
    // [00 reg  100][ss index base]
    // [00 reg  100][00   100  esp]
    // [00 reg base]
    // [00 reg  100][ss index  101][disp32]
    // [00 reg  101]               [disp32]

    if (base == b101) {
      if (which == disp32_operand)
        return ip;              // caller wants the disp32
      ip += 4;                  // skip the disp32
    }
    break;

  case 1:
    // [01 reg  100][ss index base][disp8]
    // [01 reg  100][00   100  esp][disp8]
    // [01 reg base]               [disp8]
    ip += 1;                    // skip the disp8
    break;

  case 2:
    // [10 reg  100][ss index base][disp32]
    // [10 reg  100][00   100  esp][disp32]
    // [10 reg base]               [disp32]
    if (which == disp32_operand)
      return ip;                // caller wants the disp32
    ip += 4;                    // skip the disp32
    break;

  case 3:
    // [11 reg base]  (not a memory addressing mode)
    break;
  }

  if (which == end_pc_operand) {
    return ip + tail_size;
  }

806
#ifdef _LP64
807
  assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
808 809 810
#else
  assert(which == imm_operand, "instruction has only an imm field");
#endif // LP64
D
duke 已提交
811 812 813 814 815 816 817 818
  return ip;
}

address Assembler::locate_next_instruction(address inst) {
  // Secretly share code with locate_operand:
  return locate_operand(inst, end_pc_operand);
}

819

D
duke 已提交
820 821 822
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
  address inst = inst_mark();
823
  assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
D
duke 已提交
824 825 826 827 828 829
  address opnd;

  Relocation* r = rspec.reloc();
  if (r->type() == relocInfo::none) {
    return;
  } else if (r->is_call() || format == call32_operand) {
830
    // assert(format == imm32_operand, "cannot specify a nonzero format");
D
duke 已提交
831 832
    opnd = locate_operand(inst, call32_operand);
  } else if (r->is_data()) {
833 834 835
    assert(format == imm_operand || format == disp32_operand
           LP64_ONLY(|| format == narrow_oop_operand), "format ok");
    opnd = locate_operand(inst, (WhichOperand)format);
D
duke 已提交
836
  } else {
837
    assert(format == imm_operand, "cannot specify a format");
D
duke 已提交
838 839 840 841
    return;
  }
  assert(opnd == pc(), "must put operand where relocs can find it");
}
842
#endif // ASSERT
D
duke 已提交
843

844 845 846 847 848
void Assembler::emit_operand32(Register reg, Address adr) {
  assert(reg->encoding() < 8, "no extended registers");
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec);
D
duke 已提交
849 850 851 852 853 854 855 856 857
}

void Assembler::emit_operand(Register reg, Address adr,
                             int rip_relative_correction) {
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec,
               rip_relative_correction);
}

858
void Assembler::emit_operand(XMMRegister reg, Address adr) {
D
duke 已提交
859
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
860 861 862 863 864 865 866 867 868 869 870 871 872
               adr._rspec);
}

// MMX operations
void Assembler::emit_operand(MMXRegister reg, Address adr) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}

// work around gcc (3.2.1-7a) bug
void Assembler::emit_operand(Address adr, MMXRegister reg) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
D
duke 已提交
873 874
}

875

D
duke 已提交
876 877 878
void Assembler::emit_farith(int b1, int b2, int i) {
  assert(isByte(b1) && isByte(b2), "wrong opcode");
  assert(0 <= i &&  i < 8, "illegal stack offset");
879 880
  emit_int8(b1);
  emit_int8(b2 + i);
D
duke 已提交
881 882 883
}


884 885 886 887 888 889 890 891 892 893 894
// Now the Assembler instructions (identical for 32/64 bits)

void Assembler::adcl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rdx, dst, imm32);
}

void Assembler::adcl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
895
  emit_int8(0x11);
896 897
  emit_operand(src, dst);
}
D
duke 已提交
898

899 900 901
void Assembler::adcl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD0, dst, imm32);
D
duke 已提交
902 903
}

904 905 906
void Assembler::adcl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
907
  emit_int8(0x13);
908
  emit_operand(dst, src);
D
duke 已提交
909 910
}

911 912 913
void Assembler::adcl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x13, 0xC0, dst, src);
D
duke 已提交
914 915
}

916 917 918 919
void Assembler::addl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rax, dst, imm32);
D
duke 已提交
920 921
}

922 923 924
void Assembler::addl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
925
  emit_int8(0x01);
926
  emit_operand(src, dst);
D
duke 已提交
927 928
}

929 930 931 932
void Assembler::addl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC0, dst, imm32);
}
D
duke 已提交
933

934 935 936
void Assembler::addl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
937
  emit_int8(0x03);
938
  emit_operand(dst, src);
D
duke 已提交
939 940
}

941 942 943
void Assembler::addl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
D
duke 已提交
944 945
}

946
void Assembler::addr_nop_4() {
947
  assert(UseAddressNop, "no CPU support");
948
  // 4 bytes: NOP DWORD PTR [EAX+0]
949 950 951 952
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
953 954
}

955
void Assembler::addr_nop_5() {
956
  assert(UseAddressNop, "no CPU support");
957
  // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
958 959 960 961 962
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
963 964
}

965
void Assembler::addr_nop_7() {
966
  assert(UseAddressNop, "no CPU support");
967
  // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
968 969 970 971
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x80);
                   // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
972
  emit_int32(0);   // 32-bits offset (4 bytes)
D
duke 已提交
973 974
}

975
void Assembler::addr_nop_8() {
976
  assert(UseAddressNop, "no CPU support");
977
  // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
978 979 980 981 982
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x84);
                   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
983
  emit_int32(0);   // 32-bits offset (4 bytes)
984 985 986 987
}

void Assembler::addsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
988
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
989 990 991 992
}

void Assembler::addsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
993
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
D
duke 已提交
994 995
}

996 997
void Assembler::addss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
998
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
999 1000 1001 1002
}

void Assembler::addss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1003
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1004 1005
}

1006 1007 1008 1009
void Assembler::aesdec(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1010
  emit_int8((unsigned char)0xDE);
1011 1012 1013 1014 1015 1016
  emit_operand(dst, src);
}

void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1017 1018
  emit_int8((unsigned char)0xDE);
  emit_int8(0xC0 | encode);
1019 1020 1021 1022 1023 1024
}

void Assembler::aesdeclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1025
  emit_int8((unsigned char)0xDF);
1026 1027 1028 1029 1030 1031
  emit_operand(dst, src);
}

void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1032 1033
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)(0xC0 | encode));
1034 1035 1036 1037 1038 1039
}

void Assembler::aesenc(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1040
  emit_int8((unsigned char)0xDC);
1041 1042 1043 1044 1045 1046
  emit_operand(dst, src);
}

void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1047 1048
  emit_int8((unsigned char)0xDC);
  emit_int8(0xC0 | encode);
1049 1050 1051 1052 1053 1054
}

void Assembler::aesenclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1055
  emit_int8((unsigned char)0xDD);
1056 1057 1058 1059 1060 1061
  emit_operand(dst, src);
}

void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1062 1063
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)(0xC0 | encode));
1064 1065 1066
}


K
kvn 已提交
1067 1068 1069
void Assembler::andl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1070
  emit_int8((unsigned char)0x81);
K
kvn 已提交
1071
  emit_operand(rsp, dst, 4);
1072
  emit_int32(imm32);
K
kvn 已提交
1073 1074
}

1075
void Assembler::andl(Register dst, int32_t imm32) {
D
duke 已提交
1076
  prefix(dst);
1077
  emit_arith(0x81, 0xE0, dst, imm32);
D
duke 已提交
1078 1079
}

1080
void Assembler::andl(Register dst, Address src) {
D
duke 已提交
1081
  InstructionMark im(this);
1082
  prefix(src, dst);
1083
  emit_int8(0x23);
1084
  emit_operand(dst, src);
D
duke 已提交
1085 1086
}

1087 1088 1089
void Assembler::andl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x23, 0xC0, dst, src);
D
duke 已提交
1090 1091
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
void Assembler::andnl(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::andnl(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}

1107 1108
void Assembler::bsfl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1109 1110 1111
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
1112 1113 1114 1115
}

void Assembler::bsrl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1116 1117 1118
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1119 1120
}

1121 1122
void Assembler::bswapl(Register reg) { // bswap
  int encode = prefix_and_encode(reg->encoding());
1123 1124
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
1125 1126
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
void Assembler::blsil(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsil(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}

void Assembler::blsmskl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsmskl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}

void Assembler::blsrl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsrl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
void Assembler::call(Label& L, relocInfo::relocType rtype) {
  // suspect disp32 is always good
  int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);

  if (L.is_bound()) {
    const int long_size = 5;
    int offs = (int)( target(L) - pc() );
    assert(offs <= 0, "assembler error");
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
1182
    emit_int8((unsigned char)0xE8);
1183 1184 1185 1186 1187 1188
    emit_data(offs - long_size, rtype, operand);
  } else {
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
    L.add_patch_at(code(), locator());

1189
    emit_int8((unsigned char)0xE8);
1190 1191 1192 1193 1194
    emit_data(int(0), rtype, operand);
  }
}

void Assembler::call(Register dst) {
K
kvn 已提交
1195
  int encode = prefix_and_encode(dst->encoding());
1196 1197
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xD0 | encode));
1198 1199 1200 1201
}


void Assembler::call(Address adr) {
D
duke 已提交
1202
  InstructionMark im(this);
1203
  prefix(adr);
1204
  emit_int8((unsigned char)0xFF);
1205
  emit_operand(rdx, adr);
D
duke 已提交
1206 1207
}

1208 1209 1210
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  assert(entry != NULL, "call most probably wrong");
  InstructionMark im(this);
1211
  emit_int8((unsigned char)0xE8);
1212
  intptr_t disp = entry - (pc() + sizeof(int32_t));
1213 1214 1215 1216 1217 1218
  assert(is_simm32(disp), "must be 32bit offset (call2)");
  // Technically, should use call32_operand, but this format is
  // implied by the fact that we're emitting a call instruction.

  int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
  emit_data((int) disp, rspec, operand);
D
duke 已提交
1219 1220
}

1221
void Assembler::cdql() {
1222
  emit_int8((unsigned char)0x99);
1223 1224
}

1225
void Assembler::cld() {
1226
  emit_int8((unsigned char)0xFC);
1227 1228
}

1229 1230
void Assembler::cmovl(Condition cc, Register dst, Register src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1231
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1232 1233 1234
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1235 1236
}

1237 1238 1239

void Assembler::cmovl(Condition cc, Register dst, Address src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1240
  prefix(src, dst);
1241 1242
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
D
duke 已提交
1243 1244 1245
  emit_operand(dst, src);
}

1246
void Assembler::cmpb(Address dst, int imm8) {
D
duke 已提交
1247 1248
  InstructionMark im(this);
  prefix(dst);
1249
  emit_int8((unsigned char)0x80);
1250
  emit_operand(rdi, dst, 1);
1251
  emit_int8(imm8);
D
duke 已提交
1252 1253
}

1254
void Assembler::cmpl(Address dst, int32_t imm32) {
D
duke 已提交
1255
  InstructionMark im(this);
1256
  prefix(dst);
1257
  emit_int8((unsigned char)0x81);
1258
  emit_operand(rdi, dst, 4);
1259
  emit_int32(imm32);
D
duke 已提交
1260 1261
}

1262 1263 1264
void Assembler::cmpl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF8, dst, imm32);
D
duke 已提交
1265 1266
}

1267 1268 1269
void Assembler::cmpl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
D
duke 已提交
1270 1271 1272
}


1273
void Assembler::cmpl(Register dst, Address  src) {
D
duke 已提交
1274
  InstructionMark im(this);
1275
  prefix(src, dst);
1276
  emit_int8((unsigned char)0x3B);
D
duke 已提交
1277 1278 1279
  emit_operand(dst, src);
}

1280
void Assembler::cmpw(Address dst, int imm16) {
D
duke 已提交
1281
  InstructionMark im(this);
1282
  assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1283 1284
  emit_int8(0x66);
  emit_int8((unsigned char)0x81);
1285
  emit_operand(rdi, dst, 2);
1286
  emit_int16(imm16);
D
duke 已提交
1287 1288
}

1289 1290 1291 1292
// The 32-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
C
coleenp 已提交
1293 1294
  InstructionMark im(this);
  prefix(adr, reg);
1295 1296
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
C
coleenp 已提交
1297
  emit_operand(reg, adr);
D
duke 已提交
1298 1299
}

1300 1301 1302 1303
void Assembler::comisd(XMMRegister dst, Address src) {
  // NOTE: dbx seems to decode this as comiss even though the
  // 0x66 is there. Strangly ucomisd comes out correct
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1304
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
K
kvn 已提交
1305 1306 1307 1308
}

void Assembler::comisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1309
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
D
duke 已提交
1310 1311
}

1312 1313
void Assembler::comiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1314
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1315 1316
}

K
kvn 已提交
1317 1318
void Assembler::comiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1319
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
K
kvn 已提交
1320 1321
}

1322
void Assembler::cpuid() {
1323 1324
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA2);
1325 1326
}

1327 1328
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1329
  emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
D
duke 已提交
1330 1331
}

1332 1333
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1334
  emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1335 1336
}

1337 1338
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1339
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
D
duke 已提交
1340 1341
}

K
kvn 已提交
1342 1343
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1344
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1345 1346
}

1347 1348
void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1349
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
1350 1351
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1352 1353
}

K
kvn 已提交
1354 1355
void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1356
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1357 1358
}

1359 1360
void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1361
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
1362 1363
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1364 1365
}

K
kvn 已提交
1366 1367
void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1368
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1369 1370
}

1371 1372
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1373
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
D
duke 已提交
1374 1375
}

K
kvn 已提交
1376 1377
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1378
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1379 1380 1381
}


1382 1383
void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1384
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
1385 1386
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1387 1388
}

1389 1390
void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1391
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
1392 1393
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1394 1395
}

1396 1397 1398 1399
void Assembler::decl(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrement() instead.
  InstructionMark im(this);
  prefix(dst);
1400
  emit_int8((unsigned char)0xFF);
1401
  emit_operand(rcx, dst);
D
duke 已提交
1402 1403
}

1404 1405
void Assembler::divsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1406
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1407 1408
}

1409 1410
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1411
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1412 1413
}

1414 1415
void Assembler::divss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1416
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1417 1418
}

1419 1420
void Assembler::divss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1421
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1422 1423
}

1424 1425
void Assembler::emms() {
  NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1426 1427
  emit_int8(0x0F);
  emit_int8(0x77);
D
duke 已提交
1428 1429
}

1430
void Assembler::hlt() {
1431
  emit_int8((unsigned char)0xF4);
D
duke 已提交
1432 1433
}

1434 1435
void Assembler::idivl(Register src) {
  int encode = prefix_and_encode(src->encoding());
1436 1437
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
D
duke 已提交
1438 1439
}

1440 1441
void Assembler::divl(Register src) { // Unsigned
  int encode = prefix_and_encode(src->encoding());
1442 1443
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF0 | encode));
1444 1445
}

1446
void Assembler::imull(Register dst, Register src) {
D
duke 已提交
1447
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1448 1449 1450
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1451 1452 1453
}


1454
void Assembler::imull(Register dst, Register src, int value) {
D
duke 已提交
1455
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1456
  if (is8bit(value)) {
1457 1458 1459
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
1460
  } else {
1461 1462
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
1463
    emit_int32(value);
1464
  }
D
duke 已提交
1465 1466
}

1467 1468 1469 1470 1471 1472 1473 1474 1475
void Assembler::imull(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}


1476 1477
void Assembler::incl(Address dst) {
  // Don't use it directly. Use MacroAssembler::increment() instead.
D
duke 已提交
1478
  InstructionMark im(this);
1479
  prefix(dst);
1480
  emit_int8((unsigned char)0xFF);
1481
  emit_operand(rax, dst);
D
duke 已提交
1482 1483
}

1484
void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
D
duke 已提交
1485
  InstructionMark im(this);
1486 1487 1488 1489 1490 1491 1492
  assert((0 <= cc) && (cc < 16), "illegal cc");
  if (L.is_bound()) {
    address dst = target(L);
    assert(dst != NULL, "jcc most probably wrong");

    const int short_size = 2;
    const int long_size = 6;
1493
    intptr_t offs = (intptr_t)dst - (intptr_t)pc();
1494
    if (maybe_short && is8bit(offs - short_size)) {
1495
      // 0111 tttn #8-bit disp
1496 1497
      emit_int8(0x70 | cc);
      emit_int8((offs - short_size) & 0xFF);
1498 1499 1500 1501
    } else {
      // 0000 1111 1000 tttn #32-bit disp
      assert(is_simm32(offs - long_size),
             "must be 32bit offset (call4)");
1502 1503
      emit_int8(0x0F);
      emit_int8((unsigned char)(0x80 | cc));
1504
      emit_int32(offs - long_size);
1505 1506 1507 1508 1509 1510 1511
    }
  } else {
    // Note: could eliminate cond. jumps to this jump if condition
    //       is the same however, seems to be rather unlikely case.
    // Note: use jccb() if label to be bound is very close to get
    //       an 8-bit displacement
    L.add_patch_at(code(), locator());
1512 1513
    emit_int8(0x0F);
    emit_int8((unsigned char)(0x80 | cc));
1514
    emit_int32(0);
1515
  }
D
duke 已提交
1516 1517
}

1518 1519 1520 1521
void Assembler::jccb(Condition cc, Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
1522
#ifdef ASSERT
1523
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1524 1525 1526 1527 1528 1529
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1530
    intptr_t offs = (intptr_t)entry - (intptr_t)pc();
1531
    // 0111 tttn #8-bit disp
1532 1533
    emit_int8(0x70 | cc);
    emit_int8((offs - short_size) & 0xFF);
1534 1535 1536
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1537 1538
    emit_int8(0x70 | cc);
    emit_int8(0);
1539
  }
D
duke 已提交
1540 1541
}

1542
void Assembler::jmp(Address adr) {
D
duke 已提交
1543
  InstructionMark im(this);
1544
  prefix(adr);
1545
  emit_int8((unsigned char)0xFF);
1546
  emit_operand(rsp, adr);
D
duke 已提交
1547 1548
}

1549
void Assembler::jmp(Label& L, bool maybe_short) {
1550 1551 1552 1553 1554 1555
  if (L.is_bound()) {
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
    InstructionMark im(this);
    const int short_size = 2;
    const int long_size = 5;
1556
    intptr_t offs = entry - pc();
1557
    if (maybe_short && is8bit(offs - short_size)) {
1558 1559
      emit_int8((unsigned char)0xEB);
      emit_int8((offs - short_size) & 0xFF);
1560
    } else {
1561
      emit_int8((unsigned char)0xE9);
1562
      emit_int32(offs - long_size);
1563 1564 1565 1566 1567 1568 1569 1570
    }
  } else {
    // By default, forward jumps are always 32-bit displacements, since
    // we can't yet know where the label will be bound.  If you're sure that
    // the forward jump will not run beyond 256 bytes, use jmpb to
    // force an 8-bit displacement.
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1571
    emit_int8((unsigned char)0xE9);
1572
    emit_int32(0);
1573
  }
D
duke 已提交
1574 1575
}

1576 1577
void Assembler::jmp(Register entry) {
  int encode = prefix_and_encode(entry->encoding());
1578 1579
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
1580 1581
}

1582
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
D
duke 已提交
1583
  InstructionMark im(this);
1584
  emit_int8((unsigned char)0xE9);
1585
  assert(dest != NULL, "must have a target");
1586
  intptr_t disp = dest - (pc() + sizeof(int32_t));
1587 1588
  assert(is_simm32(disp), "must be 32bit offset (jmp)");
  emit_data(disp, rspec.reloc(), call32_operand);
D
duke 已提交
1589 1590
}

1591 1592 1593 1594 1595
void Assembler::jmpb(Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
1596
#ifdef ASSERT
1597
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1598 1599 1600 1601 1602 1603
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1604
    intptr_t offs = entry - pc();
1605 1606
    emit_int8((unsigned char)0xEB);
    emit_int8((offs - short_size) & 0xFF);
1607 1608 1609
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1610 1611
    emit_int8((unsigned char)0xEB);
    emit_int8(0);
1612
  }
D
duke 已提交
1613 1614
}

1615 1616
void Assembler::ldmxcsr( Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
D
duke 已提交
1617
  InstructionMark im(this);
1618
  prefix(src);
1619 1620
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
1621
  emit_operand(as_Register(2), src);
D
duke 已提交
1622 1623
}

1624
void Assembler::leal(Register dst, Address src) {
D
duke 已提交
1625
  InstructionMark im(this);
1626
#ifdef _LP64
1627
  emit_int8(0x67); // addr32
1628 1629
  prefix(src, dst);
#endif // LP64
1630
  emit_int8((unsigned char)0x8D);
1631
  emit_operand(dst, src);
D
duke 已提交
1632 1633
}

1634
void Assembler::lfence() {
1635 1636 1637
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xE8);
1638 1639
}

1640
void Assembler::lock() {
1641
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1642 1643
}

1644 1645
void Assembler::lzcntl(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
1646
  emit_int8((unsigned char)0xF3);
1647
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1648 1649 1650
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1651 1652
}

1653
// Emit mfence instruction
1654
void Assembler::mfence() {
1655
  NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1656 1657 1658
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1659 1660
}

1661 1662
void Assembler::mov(Register dst, Register src) {
  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
D
duke 已提交
1663 1664
}

1665 1666
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1667
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
D
duke 已提交
1668 1669
}

1670 1671
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1672
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
1673 1674
}

1675 1676 1677
void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
1678 1679
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
1680 1681
}

1682 1683
void Assembler::movb(Register dst, Address src) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
D
duke 已提交
1684
  InstructionMark im(this);
1685
  prefix(src, dst, true);
1686
  emit_int8((unsigned char)0x8A);
D
duke 已提交
1687 1688 1689 1690
  emit_operand(dst, src);
}


1691
void Assembler::movb(Address dst, int imm8) {
D
duke 已提交
1692
  InstructionMark im(this);
1693
   prefix(dst);
1694
  emit_int8((unsigned char)0xC6);
1695
  emit_operand(rax, dst, 1);
1696
  emit_int8(imm8);
D
duke 已提交
1697 1698
}

1699 1700 1701

void Assembler::movb(Address dst, Register src) {
  assert(src->has_byte_register(), "must have byte register");
D
duke 已提交
1702
  InstructionMark im(this);
1703
  prefix(dst, src, true);
1704
  emit_int8((unsigned char)0x88);
D
duke 已提交
1705 1706 1707
  emit_operand(src, dst);
}

1708 1709
void Assembler::movdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1710
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
1711 1712
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1713 1714
}

1715 1716 1717
void Assembler::movdl(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // swap src/dst to get correct prefix
K
kvn 已提交
1718
  int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
1719 1720
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
1721 1722
}

1723 1724 1725
void Assembler::movdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1726
  simd_prefix(dst, src, VEX_SIMD_66);
1727
  emit_int8(0x6E);
1728 1729 1730
  emit_operand(dst, src);
}

1731 1732 1733 1734
void Assembler::movdl(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
1735
  emit_int8(0x7E);
1736 1737 1738
  emit_operand(src, dst);
}

1739 1740
void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1741
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
D
duke 已提交
1742 1743
}

1744 1745 1746 1747 1748
void Assembler::movdqa(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}

1749 1750
void Assembler::movdqu(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1751
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1752 1753 1754 1755
}

void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1756
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1757 1758 1759 1760 1761
}

void Assembler::movdqu(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1762
  simd_prefix(dst, src, VEX_SIMD_F3);
1763
  emit_int8(0x7F);
1764 1765 1766
  emit_operand(src, dst);
}

1767 1768
// Move Unaligned 256bit Vector
void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
1769
  assert(UseAVX > 0, "");
1770 1771
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1772 1773
  emit_int8(0x6F);
  emit_int8((unsigned char)(0xC0 | encode));
1774 1775 1776
}

void Assembler::vmovdqu(XMMRegister dst, Address src) {
1777
  assert(UseAVX > 0, "");
1778 1779 1780
  InstructionMark im(this);
  bool vector256 = true;
  vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1781
  emit_int8(0x6F);
1782 1783 1784 1785
  emit_operand(dst, src);
}

void Assembler::vmovdqu(Address dst, XMMRegister src) {
1786
  assert(UseAVX > 0, "");
1787 1788 1789 1790 1791
  InstructionMark im(this);
  bool vector256 = true;
  // swap src<->dst for encoding
  assert(src != xnoreg, "sanity");
  vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
1792
  emit_int8(0x7F);
1793 1794 1795
  emit_operand(src, dst);
}

1796 1797 1798 1799
// Uses zero extension on 64bit

void Assembler::movl(Register dst, int32_t imm32) {
  int encode = prefix_and_encode(dst->encoding());
1800
  emit_int8((unsigned char)(0xB8 | encode));
1801
  emit_int32(imm32);
D
duke 已提交
1802 1803
}

1804 1805
void Assembler::movl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1806 1807
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1808 1809
}

1810
void Assembler::movl(Register dst, Address src) {
D
duke 已提交
1811
  InstructionMark im(this);
1812
  prefix(src, dst);
1813
  emit_int8((unsigned char)0x8B);
D
duke 已提交
1814 1815 1816
  emit_operand(dst, src);
}

1817 1818 1819
void Assembler::movl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1820
  emit_int8((unsigned char)0xC7);
1821
  emit_operand(rax, dst, 4);
1822
  emit_int32(imm32);
D
duke 已提交
1823 1824
}

1825 1826 1827
void Assembler::movl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
1828
  emit_int8((unsigned char)0x89);
1829
  emit_operand(src, dst);
D
duke 已提交
1830 1831
}

1832 1833 1834 1835 1836
// New cpus require to use movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
void Assembler::movlpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1837
  emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
D
duke 已提交
1838 1839
}

1840 1841
void Assembler::movq( MMXRegister dst, Address src ) {
  assert( VM_Version::supports_mmx(), "" );
1842 1843
  emit_int8(0x0F);
  emit_int8(0x6F);
1844
  emit_operand(dst, src);
D
duke 已提交
1845 1846
}

1847 1848
void Assembler::movq( Address dst, MMXRegister src ) {
  assert( VM_Version::supports_mmx(), "" );
1849 1850
  emit_int8(0x0F);
  emit_int8(0x7F);
1851 1852 1853 1854 1855 1856 1857
  // workaround gcc (3.2.1-7a) bug
  // In that version of gcc with only an emit_operand(MMX, Address)
  // gcc will tail jump and try and reverse the parameters completely
  // obliterating dst in the process. By having a version available
  // that doesn't need to swap the args at the tail jump the bug is
  // avoided.
  emit_operand(dst, src);
D
duke 已提交
1858 1859
}

1860 1861
void Assembler::movq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1862
  InstructionMark im(this);
K
kvn 已提交
1863
  simd_prefix(dst, src, VEX_SIMD_F3);
1864
  emit_int8(0x7E);
D
duke 已提交
1865 1866 1867
  emit_operand(dst, src);
}

1868 1869
void Assembler::movq(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1870
  InstructionMark im(this);
K
kvn 已提交
1871
  simd_prefix(dst, src, VEX_SIMD_66);
1872
  emit_int8((unsigned char)0xD6);
1873
  emit_operand(src, dst);
D
duke 已提交
1874 1875
}

1876
void Assembler::movsbl(Register dst, Address src) { // movsxb
D
duke 已提交
1877
  InstructionMark im(this);
1878
  prefix(src, dst);
1879 1880
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
1881
  emit_operand(dst, src);
D
duke 已提交
1882 1883
}

1884 1885 1886
void Assembler::movsbl(Register dst, Register src) { // movsxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1887 1888 1889
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1890 1891
}

1892 1893
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1894
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1895 1896
}

1897 1898
void Assembler::movsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1899
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1900 1901
}

1902 1903
void Assembler::movsd(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1904
  InstructionMark im(this);
K
kvn 已提交
1905
  simd_prefix(dst, src, VEX_SIMD_F2);
1906
  emit_int8(0x11);
1907
  emit_operand(src, dst);
D
duke 已提交
1908 1909
}

1910 1911
void Assembler::movss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1912
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1913 1914
}

1915 1916
void Assembler::movss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1917
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1918 1919
}

1920 1921 1922
void Assembler::movss(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
K
kvn 已提交
1923
  simd_prefix(dst, src, VEX_SIMD_F3);
1924
  emit_int8(0x11);
1925
  emit_operand(src, dst);
D
duke 已提交
1926 1927
}

1928
void Assembler::movswl(Register dst, Address src) { // movsxw
D
duke 已提交
1929
  InstructionMark im(this);
1930
  prefix(src, dst);
1931 1932
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
D
duke 已提交
1933 1934 1935
  emit_operand(dst, src);
}

1936
void Assembler::movswl(Register dst, Register src) { // movsxw
D
duke 已提交
1937
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1938 1939 1940
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1941 1942
}

1943 1944
void Assembler::movw(Address dst, int imm16) {
  InstructionMark im(this);
D
duke 已提交
1945

1946
  emit_int8(0x66); // switch to 16-bit mode
1947
  prefix(dst);
1948
  emit_int8((unsigned char)0xC7);
1949
  emit_operand(rax, dst, 2);
1950
  emit_int16(imm16);
D
duke 已提交
1951 1952
}

1953
void Assembler::movw(Register dst, Address src) {
D
duke 已提交
1954
  InstructionMark im(this);
1955
  emit_int8(0x66);
1956
  prefix(src, dst);
1957
  emit_int8((unsigned char)0x8B);
1958
  emit_operand(dst, src);
D
duke 已提交
1959 1960
}

1961 1962
void Assembler::movw(Address dst, Register src) {
  InstructionMark im(this);
1963
  emit_int8(0x66);
1964
  prefix(dst, src);
1965
  emit_int8((unsigned char)0x89);
1966
  emit_operand(src, dst);
D
duke 已提交
1967 1968
}

1969
void Assembler::movzbl(Register dst, Address src) { // movzxb
D
duke 已提交
1970
  InstructionMark im(this);
1971
  prefix(src, dst);
1972 1973
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
1974
  emit_operand(dst, src);
D
duke 已提交
1975 1976
}

1977 1978 1979
void Assembler::movzbl(Register dst, Register src) { // movzxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1980 1981 1982
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
D
duke 已提交
1983 1984
}

1985 1986 1987
void Assembler::movzwl(Register dst, Address src) { // movzxw
  InstructionMark im(this);
  prefix(src, dst);
1988 1989
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
1990
  emit_operand(dst, src);
D
duke 已提交
1991 1992
}

1993
void Assembler::movzwl(Register dst, Register src) { // movzxw
D
duke 已提交
1994
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1995 1996 1997
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8(0xC0 | encode);
D
duke 已提交
1998 1999 2000 2001 2002
}

void Assembler::mull(Address src) {
  InstructionMark im(this);
  prefix(src);
2003
  emit_int8((unsigned char)0xF7);
D
duke 已提交
2004 2005 2006 2007 2008
  emit_operand(rsp, src);
}

void Assembler::mull(Register src) {
  int encode = prefix_and_encode(src->encoding());
2009 2010
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
2011 2012
}

2013 2014
void Assembler::mulsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2015
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
2016 2017
}

2018 2019
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2020
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
2021 2022
}

2023 2024
void Assembler::mulss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2025
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
2026 2027
}

2028 2029
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2030
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
2031 2032
}

2033
void Assembler::negl(Register dst) {
D
duke 已提交
2034
  int encode = prefix_and_encode(dst->encoding());
2035 2036
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
D
duke 已提交
2037 2038
}

2039 2040 2041 2042 2043 2044 2045
void Assembler::nop(int i) {
#ifdef ASSERT
  assert(i > 0, " ");
  // The fancy nops aren't currently recognized by debuggers making it a
  // pain to disassemble code while debugging. If asserts are on clearly
  // speed is not an issue so simply use the single byte traditional nop
  // to do alignment.
D
duke 已提交
2046

2047
  for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
2048
  return;
D
duke 已提交
2049

2050
#endif // ASSERT
D
duke 已提交
2051

2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
  if (UseAddressNop && VM_Version::is_intel()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
D
duke 已提交
2066

2067
    // The rest coding is Intel specific - don't use consecutive address nops
D
duke 已提交
2068

2069 2070 2071 2072
    // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
D
duke 已提交
2073

2074 2075 2076
    while(i >= 15) {
      // For Intel don't generate consecutive addess nops (mix with regular nops)
      i -= 15;
2077 2078 2079
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
2080
      addr_nop_8();
2081 2082 2083 2084 2085
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8((unsigned char)0x90);
                         // nop
2086 2087 2088
    }
    switch (i) {
      case 14:
2089
        emit_int8(0x66); // size prefix
2090
      case 13:
2091
        emit_int8(0x66); // size prefix
2092 2093
      case 12:
        addr_nop_8();
2094 2095 2096 2097 2098
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8((unsigned char)0x90);
                         // nop
2099 2100
        break;
      case 11:
2101
        emit_int8(0x66); // size prefix
2102
      case 10:
2103
        emit_int8(0x66); // size prefix
2104
      case 9:
2105
        emit_int8(0x66); // size prefix
2106 2107 2108 2109 2110 2111 2112
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2113
        emit_int8(0x66); // size prefix
2114 2115 2116 2117 2118 2119 2120 2121
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2122
        emit_int8(0x66); // size prefix
2123
      case 2:
2124
        emit_int8(0x66); // size prefix
2125
      case 1:
2126 2127
        emit_int8((unsigned char)0x90);
                         // nop
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }
  if (UseAddressNop && VM_Version::is_amd()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00

    // The rest coding is AMD specific - use consecutive address nops

    // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //     Size prefixes (0x66) are added for larger sizes

    while(i >= 22) {
      i -= 11;
2160 2161 2162
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
2163 2164 2165 2166 2167 2168
      addr_nop_8();
    }
    // Generate first nop for size between 21-12
    switch (i) {
      case 21:
        i -= 1;
2169
        emit_int8(0x66); // size prefix
2170 2171 2172
      case 20:
      case 19:
        i -= 1;
2173
        emit_int8(0x66); // size prefix
2174 2175 2176
      case 18:
      case 17:
        i -= 1;
2177
        emit_int8(0x66); // size prefix
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
      case 16:
      case 15:
        i -= 8;
        addr_nop_8();
        break;
      case 14:
      case 13:
        i -= 7;
        addr_nop_7();
        break;
      case 12:
        i -= 6;
2190
        emit_int8(0x66); // size prefix
2191 2192 2193 2194 2195 2196 2197 2198 2199
        addr_nop_5();
        break;
      default:
        assert(i < 12, " ");
    }

    // Generate second nop for size between 11-1
    switch (i) {
      case 11:
2200
        emit_int8(0x66); // size prefix
2201
      case 10:
2202
        emit_int8(0x66); // size prefix
2203
      case 9:
2204
        emit_int8(0x66); // size prefix
2205 2206 2207 2208 2209 2210 2211
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2212
        emit_int8(0x66); // size prefix
2213 2214 2215 2216 2217 2218 2219 2220
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2221
        emit_int8(0x66); // size prefix
2222
      case 2:
2223
        emit_int8(0x66); // size prefix
2224
      case 1:
2225 2226
        emit_int8((unsigned char)0x90);
                         // nop
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }

  // Using nops with size prefixes "0x66 0x90".
  // From AMD Optimization Guide:
  //  1: 0x90
  //  2: 0x66 0x90
  //  3: 0x66 0x66 0x90
  //  4: 0x66 0x66 0x66 0x90
  //  5: 0x66 0x66 0x90 0x66 0x90
  //  6: 0x66 0x66 0x90 0x66 0x66 0x90
  //  7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
  //  8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
  //  9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  //
  while(i > 12) {
    i -= 4;
2249 2250 2251 2252 2253
    emit_int8(0x66); // size prefix
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
                     // nop
2254 2255 2256 2257 2258
  }
  // 1 - 12 nops
  if(i > 8) {
    if(i > 9) {
      i -= 1;
2259
      emit_int8(0x66);
2260 2261
    }
    i -= 3;
2262 2263 2264
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2265 2266 2267 2268 2269
  }
  // 1 - 8 nops
  if(i > 4) {
    if(i > 6) {
      i -= 1;
2270
      emit_int8(0x66);
2271 2272
    }
    i -= 3;
2273 2274 2275
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2276 2277 2278
  }
  switch (i) {
    case 4:
2279
      emit_int8(0x66);
2280
    case 3:
2281
      emit_int8(0x66);
2282
    case 2:
2283
      emit_int8(0x66);
2284
    case 1:
2285
      emit_int8((unsigned char)0x90);
2286 2287 2288 2289
      break;
    default:
      assert(i == 0, " ");
  }
D
duke 已提交
2290 2291
}

2292 2293
void Assembler::notl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2294 2295
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
D
duke 已提交
2296 2297
}

2298
void Assembler::orl(Address dst, int32_t imm32) {
D
duke 已提交
2299
  InstructionMark im(this);
2300
  prefix(dst);
2301
  emit_arith_operand(0x81, rcx, dst, imm32);
D
duke 已提交
2302 2303
}

2304 2305 2306
void Assembler::orl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC8, dst, imm32);
D
duke 已提交
2307 2308
}

2309
void Assembler::orl(Register dst, Address src) {
D
duke 已提交
2310
  InstructionMark im(this);
2311
  prefix(src, dst);
2312
  emit_int8(0x0B);
D
duke 已提交
2313 2314 2315
  emit_operand(dst, src);
}

2316 2317 2318
void Assembler::orl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
D
duke 已提交
2319 2320
}

K
kvn 已提交
2321 2322 2323
void Assembler::packuswb(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2324
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2325 2326 2327 2328
}

void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2329
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2330
}
C
cfang 已提交
2331

2332 2333 2334 2335 2336 2337
void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
2338 2339 2340 2341 2342
  assert(VM_Version::supports_avx2(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
  emit_int8(0x00);
  emit_int8(0xC0 | encode);
  emit_int8(imm8);
2343 2344
}

2345 2346 2347 2348 2349
void Assembler::pause() {
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)0x90);
}

C
cfang 已提交
2350 2351 2352
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
  InstructionMark im(this);
K
kvn 已提交
2353
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2354
  emit_int8(0x61);
C
cfang 已提交
2355
  emit_operand(dst, src);
2356
  emit_int8(imm8);
C
cfang 已提交
2357 2358 2359 2360
}

void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
2361
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2362 2363 2364
  emit_int8(0x61);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
C
cfang 已提交
2365 2366
}

2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

K
kvn 已提交
2399 2400 2401 2402
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2403
  emit_int8(0x30);
K
kvn 已提交
2404 2405 2406 2407 2408
  emit_operand(dst, src);
}

void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2409
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2410 2411
  emit_int8(0x30);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
2412 2413
}

2414 2415
// generic
void Assembler::pop(Register dst) {
D
duke 已提交
2416
  int encode = prefix_and_encode(dst->encoding());
2417
  emit_int8(0x58 | encode);
D
duke 已提交
2418 2419
}

2420 2421 2422
void Assembler::popcntl(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
2423
  emit_int8((unsigned char)0xF3);
2424
  prefix(src, dst);
2425 2426
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
2427 2428 2429 2430 2431
  emit_operand(dst, src);
}

void Assembler::popcntl(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
2432
  emit_int8((unsigned char)0xF3);
2433
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
2434 2435 2436
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
2437 2438
}

2439
void Assembler::popf() {
2440
  emit_int8((unsigned char)0x9D);
D
duke 已提交
2441 2442
}

R
roland 已提交
2443
#ifndef _LP64 // no 32bit push/pop on amd64
2444 2445 2446 2447
void Assembler::popl(Address dst) {
  // NOTE: this will adjust stack by 8byte on 64bits
  InstructionMark im(this);
  prefix(dst);
2448
  emit_int8((unsigned char)0x8F);
2449
  emit_operand(rax, dst);
D
duke 已提交
2450
}
R
roland 已提交
2451
#endif
D
duke 已提交
2452

2453 2454
void Assembler::prefetch_prefix(Address src) {
  prefix(src);
2455
  emit_int8(0x0F);
D
duke 已提交
2456 2457
}

2458
void Assembler::prefetchnta(Address src) {
2459
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2460 2461
  InstructionMark im(this);
  prefetch_prefix(src);
2462
  emit_int8(0x18);
2463
  emit_operand(rax, src); // 0, src
D
duke 已提交
2464 2465
}

2466
void Assembler::prefetchr(Address src) {
2467
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
2468 2469
  InstructionMark im(this);
  prefetch_prefix(src);
2470
  emit_int8(0x0D);
2471
  emit_operand(rax, src); // 0, src
D
duke 已提交
2472 2473
}

2474 2475 2476 2477
void Assembler::prefetcht0(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2478
  emit_int8(0x18);
2479
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2480 2481
}

2482 2483
void Assembler::prefetcht1(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
D
duke 已提交
2484
  InstructionMark im(this);
2485
  prefetch_prefix(src);
2486
  emit_int8(0x18);
2487
  emit_operand(rdx, src); // 2, src
D
duke 已提交
2488 2489
}

2490 2491 2492 2493
void Assembler::prefetcht2(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2494
  emit_int8(0x18);
2495
  emit_operand(rbx, src); // 3, src
D
duke 已提交
2496 2497
}

2498
void Assembler::prefetchw(Address src) {
2499
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
D
duke 已提交
2500
  InstructionMark im(this);
2501
  prefetch_prefix(src);
2502
  emit_int8(0x0D);
2503
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2504 2505
}

2506
void Assembler::prefix(Prefix p) {
2507
  emit_int8(p);
D
duke 已提交
2508 2509
}

2510 2511 2512
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_ssse3(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2513 2514
  emit_int8(0x00);
  emit_int8((unsigned char)(0xC0 | encode));
2515 2516 2517 2518 2519 2520
}

void Assembler::pshufb(XMMRegister dst, Address src) {
  assert(VM_Version::supports_ssse3(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2521
  emit_int8(0x00);
2522 2523 2524
  emit_operand(dst, src);
}

2525 2526 2527
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2528
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
2529
  emit_int8(mode & 0xFF);
2530

D
duke 已提交
2531 2532
}

2533 2534 2535
void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2536
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2537
  InstructionMark im(this);
K
kvn 已提交
2538
  simd_prefix(dst, src, VEX_SIMD_66);
2539
  emit_int8(0x70);
2540
  emit_operand(dst, src);
2541
  emit_int8(mode & 0xFF);
D
duke 已提交
2542 2543
}

2544 2545 2546
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2547
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
2548
  emit_int8(mode & 0xFF);
D
duke 已提交
2549 2550
}

2551 2552 2553
void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2554
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2555
  InstructionMark im(this);
K
kvn 已提交
2556
  simd_prefix(dst, src, VEX_SIMD_F2);
2557
  emit_int8(0x70);
D
duke 已提交
2558
  emit_operand(dst, src);
2559
  emit_int8(mode & 0xFF);
D
duke 已提交
2560 2561
}

2562 2563 2564
void Assembler::psrldq(XMMRegister dst, int shift) {
  // Shift 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2565
  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
2566 2567 2568
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
2569 2570
}

C
cfang 已提交
2571 2572
void Assembler::ptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
K
kvn 已提交
2573
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
C
cfang 已提交
2574
  InstructionMark im(this);
K
kvn 已提交
2575
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2576
  emit_int8(0x17);
C
cfang 已提交
2577 2578 2579 2580 2581
  emit_operand(dst, src);
}

void Assembler::ptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2582
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2583 2584
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
C
cfang 已提交
2585 2586
}

2587 2588 2589 2590 2591 2592 2593
void Assembler::vptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
2594
  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
  emit_int8(0x17);
  emit_operand(dst, src);
}

void Assembler::vptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
}

K
kvn 已提交
2607 2608 2609
void Assembler::punpcklbw(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2610
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
K
kvn 已提交
2611 2612
}

2613 2614
void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2615
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
D
duke 已提交
2616 2617
}

K
kvn 已提交
2618 2619 2620
void Assembler::punpckldq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2621
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2622 2623 2624 2625
}

void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2626
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2627 2628
}

K
kvn 已提交
2629 2630
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2631
  emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
K
kvn 已提交
2632 2633
}

2634 2635 2636
void Assembler::push(int32_t imm32) {
  // in 64bits we push 64bits onto the stack but only
  // take a 32bit immediate
2637
  emit_int8(0x68);
2638
  emit_int32(imm32);
D
duke 已提交
2639 2640
}

2641 2642 2643
void Assembler::push(Register src) {
  int encode = prefix_and_encode(src->encoding());

2644
  emit_int8(0x50 | encode);
D
duke 已提交
2645 2646
}

2647
void Assembler::pushf() {
2648
  emit_int8((unsigned char)0x9C);
D
duke 已提交
2649 2650
}

R
roland 已提交
2651
#ifndef _LP64 // no 32bit push/pop on amd64
2652 2653 2654 2655
void Assembler::pushl(Address src) {
  // Note this will push 64bit on 64bit
  InstructionMark im(this);
  prefix(src);
2656
  emit_int8((unsigned char)0xFF);
2657
  emit_operand(rsi, src);
D
duke 已提交
2658
}
R
roland 已提交
2659
#endif
D
duke 已提交
2660

2661 2662 2663 2664
void Assembler::rcll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1) {
2665 2666
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
2667
  } else {
2668 2669 2670
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)0xD0 | encode);
    emit_int8(imm8);
2671
  }
D
duke 已提交
2672 2673
}

2674 2675 2676 2677 2678
void Assembler::rdtsc() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x31);
}

2679 2680 2681
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
void Assembler::rep_mov() {
2682
  emit_int8((unsigned char)0xF3);
2683 2684
  // MOVSQ
  LP64_ONLY(prefix(REX_W));
2685
  emit_int8((unsigned char)0xA5);
D
duke 已提交
2686 2687
}

2688 2689 2690 2691 2692 2693 2694
// sets rcx bytes with rax, value at [edi]
void Assembler::rep_stosb() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xAA); // STOSB
}

2695 2696
// sets rcx pointer sized words with rax, value at [edi]
// generic
2697 2698 2699
void Assembler::rep_stos() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));       // LP64:STOSQ, LP32:STOSD
2700
  emit_int8((unsigned char)0xAB);
2701 2702 2703 2704 2705
}

// scans rcx pointer sized words at [edi] for occurance of rax,
// generic
void Assembler::repne_scan() { // repne_scan
2706
  emit_int8((unsigned char)0xF2);
2707 2708
  // SCASQ
  LP64_ONLY(prefix(REX_W));
2709
  emit_int8((unsigned char)0xAF);
2710 2711 2712 2713 2714 2715
}

#ifdef _LP64
// scans rcx 4 byte words at [edi] for occurance of rax,
// generic
void Assembler::repne_scanl() { // repne_scan
2716
  emit_int8((unsigned char)0xF2);
2717
  // SCASL
2718
  emit_int8((unsigned char)0xAF);
2719 2720 2721 2722 2723
}
#endif

void Assembler::ret(int imm16) {
  if (imm16 == 0) {
2724
    emit_int8((unsigned char)0xC3);
2725
  } else {
2726
    emit_int8((unsigned char)0xC2);
2727
    emit_int16(imm16);
2728 2729 2730 2731 2732 2733 2734 2735
  }
}

void Assembler::sahf() {
#ifdef _LP64
  // Not supported in 64bit mode
  ShouldNotReachHere();
#endif
2736
  emit_int8((unsigned char)0x9E);
2737 2738 2739 2740 2741 2742
}

void Assembler::sarl(Register dst, int imm8) {
  int encode = prefix_and_encode(dst->encoding());
  assert(isShiftCount(imm8), "illegal shift count");
  if (imm8 == 1) {
2743 2744
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
2745
  } else {
2746 2747 2748
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
2749 2750 2751 2752 2753
  }
}

void Assembler::sarl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2754 2755
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
}

void Assembler::sbbl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}

void Assembler::sbbl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD8, dst, imm32);
}


void Assembler::sbbl(Register dst, Address src) {
D
duke 已提交
2771 2772
  InstructionMark im(this);
  prefix(src, dst);
2773
  emit_int8(0x1B);
D
duke 已提交
2774 2775 2776
  emit_operand(dst, src);
}

2777 2778 2779
void Assembler::sbbl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
D
duke 已提交
2780 2781
}

2782 2783 2784
void Assembler::setb(Condition cc, Register dst) {
  assert(0 <= cc && cc < 16, "illegal cc");
  int encode = prefix_and_encode(dst->encoding(), true);
2785 2786 2787
  emit_int8(0x0F);
  emit_int8((unsigned char)0x90 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
2788 2789
}

2790 2791 2792 2793
void Assembler::shll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1 ) {
2794 2795
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
2796
  } else {
2797 2798 2799
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
2800
  }
D
duke 已提交
2801 2802
}

2803 2804
void Assembler::shll(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2805 2806
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
2807 2808 2809 2810 2811
}

void Assembler::shrl(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
2812 2813 2814
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
2815 2816 2817 2818
}

void Assembler::shrl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2819 2820
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE8 | encode));
2821 2822 2823 2824
}

// copies a single word from [esi] to [edi]
void Assembler::smovl() {
2825
  emit_int8((unsigned char)0xA5);
2826 2827 2828 2829
}

void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2830
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
D
duke 已提交
2831 2832
}

2833 2834
void Assembler::sqrtsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2835
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2836 2837 2838
}

void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
K
kvn 已提交
2839
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2840
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2841 2842
}

2843
void Assembler::std() {
2844
  emit_int8((unsigned char)0xFD);
2845 2846
}

2847
void Assembler::sqrtss(XMMRegister dst, Address src) {
K
kvn 已提交
2848
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2849
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2850 2851
}

2852 2853 2854 2855
void Assembler::stmxcsr( Address dst) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  prefix(dst);
2856 2857
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
2858
  emit_operand(as_Register(3), dst);
D
duke 已提交
2859 2860
}

2861 2862 2863
void Assembler::subl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
2864
  emit_arith_operand(0x81, rbp, dst, imm32);
2865 2866 2867 2868 2869
}

void Assembler::subl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
2870
  emit_int8(0x29);
2871 2872 2873
  emit_operand(src, dst);
}

2874 2875 2876 2877 2878
void Assembler::subl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xE8, dst, imm32);
}

2879 2880 2881 2882 2883 2884
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subl_imm32(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

2885
void Assembler::subl(Register dst, Address src) {
D
duke 已提交
2886 2887
  InstructionMark im(this);
  prefix(src, dst);
2888
  emit_int8(0x2B);
D
duke 已提交
2889 2890 2891
  emit_operand(dst, src);
}

2892 2893 2894 2895 2896 2897 2898
void Assembler::subl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::subsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2899
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2900 2901
}

2902 2903
void Assembler::subsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2904
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2905 2906
}

2907 2908
void Assembler::subss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2909
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2910 2911
}

2912 2913
void Assembler::subss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2914
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2915 2916
}

2917 2918 2919 2920
void Assembler::testb(Register dst, int imm8) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  (void) prefix_and_encode(dst->encoding(), true);
  emit_arith_b(0xF6, 0xC0, dst, imm8);
D
duke 已提交
2921 2922
}

2923 2924 2925 2926 2927 2928
void Assembler::testl(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
2929
    emit_int8((unsigned char)0xA9);
2930 2931
  } else {
    encode = prefix_and_encode(encode);
2932 2933
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
2934
  }
2935
  emit_int32(imm32);
D
duke 已提交
2936 2937
}

2938 2939 2940 2941
void Assembler::testl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
}
D
duke 已提交
2942

2943 2944 2945
void Assembler::testl(Register dst, Address  src) {
  InstructionMark im(this);
  prefix(src, dst);
2946
  emit_int8((unsigned char)0x85);
2947
  emit_operand(dst, src);
D
duke 已提交
2948 2949
}

2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
void Assembler::tzcntl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)0xC0 | encode);
}

void Assembler::tzcntq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
}

2968 2969
void Assembler::ucomisd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2970
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2971 2972
}

2973 2974
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2975
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2976 2977
}

2978 2979
void Assembler::ucomiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2980
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2981 2982 2983 2984
}

void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2985
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2986 2987
}

2988 2989 2990 2991 2992
void Assembler::xabort(int8_t imm8) {
  emit_int8((unsigned char)0xC6);
  emit_int8((unsigned char)0xF8);
  emit_int8((unsigned char)(imm8 & 0xFF));
}
2993 2994 2995 2996

void Assembler::xaddl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
2997 2998
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
2999 3000 3001
  emit_operand(src, dst);
}

3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
  InstructionMark im(this);
  relocate(rtype);
  if (abort.is_bound()) {
    address entry = target(abort);
    assert(entry != NULL, "abort entry NULL");
    intptr_t offset = entry - pc();
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(offset - 6); // 2 opcode + 4 address
  } else {
    abort.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(0);
  }
}

3020 3021 3022
void Assembler::xchgl(Register dst, Address src) { // xchg
  InstructionMark im(this);
  prefix(src, dst);
3023
  emit_int8((unsigned char)0x87);
3024 3025 3026 3027 3028
  emit_operand(dst, src);
}

void Assembler::xchgl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
3029 3030
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xC0 | encode));
3031 3032
}

3033 3034 3035 3036 3037 3038
void Assembler::xend() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x01);
  emit_int8((unsigned char)0xD5);
}

3039
void Assembler::xgetbv() {
3040 3041 3042
  emit_int8(0x0F);
  emit_int8(0x01);
  emit_int8((unsigned char)0xD0);
3043 3044
}

3045 3046 3047 3048 3049 3050 3051 3052
void Assembler::xorl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF0, dst, imm32);
}

void Assembler::xorl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
3053
  emit_int8(0x33);
3054 3055 3056 3057 3058 3059 3060 3061 3062
  emit_operand(dst, src);
}

void Assembler::xorl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
}


3063
// AVX 3-operands scalar float-point arithmetic instructions
3064 3065 3066

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3067
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3068 3069 3070 3071
}

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3072
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3073 3074 3075 3076
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3077
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3078 3079 3080 3081
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3082
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3083 3084 3085 3086
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3087
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3088 3089 3090 3091
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3092
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3093 3094 3095 3096
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3097
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3098 3099 3100 3101
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3102
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3103 3104 3105 3106
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3107
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3108 3109 3110 3111
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3112
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3113 3114 3115
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
3116 3117
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3118 3119 3120 3121
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3122
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3123 3124 3125 3126
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3127
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3128 3129 3130 3131
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3132
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3133 3134 3135 3136
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3137
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3138 3139 3140 3141
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3142
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3143 3144
}

3145 3146 3147 3148 3149 3150 3151
//====================VECTOR ARITHMETIC=====================================

// Float-point vector arithmetic

void Assembler::addpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
3152 3153
}

3154 3155 3156
void Assembler::addps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
3157 3158
}

3159
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3160
  assert(VM_Version::supports_avx(), "");
3161
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
3162 3163
}

3164
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3165
  assert(VM_Version::supports_avx(), "");
3166
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3167 3168
}

3169 3170 3171
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
K
kvn 已提交
3172 3173
}

3174
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3175
  assert(VM_Version::supports_avx(), "");
3176
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3177 3178
}

3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
void Assembler::subpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
}

void Assembler::subps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
}

void Assembler::mulps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::divpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
}

void Assembler::divps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::andpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::andps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::xorpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}


// Integer vector arithmetic
void Assembler::paddb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
}

void Assembler::paddw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
}

void Assembler::paddd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
}

void Assembler::paddq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::psubb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
}

void Assembler::psubw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
}

void Assembler::psubd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
}

void Assembler::psubq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
}

void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
3479 3480
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
3491 3492
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  InstructionMark im(this);
  int dst_enc = dst->encoding();
  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
3506
  emit_int8(0x40);
3507 3508 3509 3510 3511 3512 3513 3514
  emit_operand(dst, src);
}

// Shift packed integers left by specified number of bits.
void Assembler::psllw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3515 3516 3517
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3518 3519 3520 3521 3522 3523
}

void Assembler::pslld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3524 3525 3526
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3527 3528 3529 3530 3531 3532
}

void Assembler::psllq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3533 3534 3535
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556
}

void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
}

void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
}

void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
3557
  emit_int8(shift & 0xFF);
3558 3559 3560 3561 3562 3563
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
3564
  emit_int8(shift & 0xFF);
3565 3566 3567 3568 3569 3570
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
3571
  emit_int8(shift & 0xFF);
3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers logically right by specified number of bits.
void Assembler::psrlw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 71 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3594 3595 3596
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3597 3598 3599 3600 3601 3602
}

void Assembler::psrld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 72 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3603 3604 3605
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3606 3607 3608 3609 3610 3611 3612 3613
}

void Assembler::psrlq(XMMRegister dst, int shift) {
  // Do not confuse it with psrldq SSE2 instruction which
  // shifts 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3614 3615 3616
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637
}

void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
}

void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
3638
  emit_int8(shift & 0xFF);
3639 3640 3641 3642 3643 3644
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
3645
  emit_int8(shift & 0xFF);
3646 3647 3648 3649 3650 3651
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
3652
  emit_int8(shift & 0xFF);
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers arithmetically right by specified number of bits.
void Assembler::psraw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3675 3676 3677
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3678 3679 3680 3681 3682 3683
}

void Assembler::psrad(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 72 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3684 3685 3686
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702
}

void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
3703
  emit_int8(shift & 0xFF);
3704 3705 3706 3707 3708 3709
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
3710
  emit_int8(shift & 0xFF);
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
}


// AND packed integers
void Assembler::pand(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::por(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pxor(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}


void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3775 3776
  emit_int8(0x18);
  emit_int8((unsigned char)(0xC0 | encode));
3777 3778
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3779
  emit_int8(0x01);
3780 3781
}

3782 3783 3784 3785 3786 3787 3788 3789
void Assembler::vinsertf128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3790
  emit_int8(0x18);
3791 3792
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3793
  emit_int8(0x01);
3794 3795 3796 3797 3798 3799 3800 3801 3802
}

void Assembler::vextractf128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3803
  emit_int8(0x19);
3804 3805
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3806
  emit_int8(0x01);
3807 3808
}

3809 3810 3811
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
K
kvn 已提交
3812
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3813 3814
  emit_int8(0x38);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
3815 3816
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3817
  emit_int8(0x01);
K
kvn 已提交
3818 3819
}

3820 3821 3822 3823 3824 3825 3826 3827
void Assembler::vinserti128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3828
  emit_int8(0x38);
3829 3830
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3831
  emit_int8(0x01);
3832 3833 3834 3835 3836 3837 3838 3839 3840
}

void Assembler::vextracti128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3841
  emit_int8(0x39);
3842 3843
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3844
  emit_int8(0x01);
3845 3846
}

3847 3848 3849 3850 3851 3852 3853 3854 3855
// duplicate 4-bytes integer data from src into 8 locations in dest
void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x58);
  emit_int8((unsigned char)(0xC0 | encode));
}

3856 3857 3858 3859 3860 3861 3862 3863 3864 3865
// Carry-Less Multiplication Quadword
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
  assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
  bool vector256 = false;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}

3866 3867 3868
void Assembler::vzeroupper() {
  assert(VM_Version::supports_avx(), "");
  (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3869
  emit_int8(0x77);
3870 3871
}

3872

3873 3874 3875 3876 3877 3878
#ifndef _LP64
// 32bit only pieces of the assembler

void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT
  InstructionMark im(this);
3879 3880
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | src1->encoding()));
3881 3882 3883 3884 3885 3886
  emit_data(imm32, rspec, 0);
}

void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
  InstructionMark im(this);
3887
  emit_int8((unsigned char)0x81);
3888 3889 3890 3891 3892 3893 3894 3895 3896
  emit_operand(rdi, src1);
  emit_data(imm32, rspec, 0);
}

// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
// into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchg8(Address adr) {
  InstructionMark im(this);
3897 3898
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC7);
3899 3900 3901 3902 3903
  emit_operand(rcx, adr);
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
3904
 emit_int8(0x48 | dst->encoding());
3905 3906 3907 3908 3909 3910 3911
}

#endif // _LP64

// 64bit typically doesn't use the x87 but needs to for the trig funcs

void Assembler::fabs() {
3912 3913
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE1);
3914 3915 3916 3917 3918 3919 3920 3921
}

void Assembler::fadd(int i) {
  emit_farith(0xD8, 0xC0, i);
}

void Assembler::fadd_d(Address src) {
  InstructionMark im(this);
3922
  emit_int8((unsigned char)0xDC);
3923 3924 3925 3926 3927
  emit_operand32(rax, src);
}

void Assembler::fadd_s(Address src) {
  InstructionMark im(this);
3928
  emit_int8((unsigned char)0xD8);
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
  emit_operand32(rax, src);
}

void Assembler::fadda(int i) {
  emit_farith(0xDC, 0xC0, i);
}

void Assembler::faddp(int i) {
  emit_farith(0xDE, 0xC0, i);
}

void Assembler::fchs() {
3941 3942
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE0);
3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954
}

void Assembler::fcom(int i) {
  emit_farith(0xD8, 0xD0, i);
}

void Assembler::fcomp(int i) {
  emit_farith(0xD8, 0xD8, i);
}

void Assembler::fcomp_d(Address src) {
  InstructionMark im(this);
3955
  emit_int8((unsigned char)0xDC);
3956 3957 3958 3959 3960
  emit_operand32(rbx, src);
}

void Assembler::fcomp_s(Address src) {
  InstructionMark im(this);
3961
  emit_int8((unsigned char)0xD8);
3962 3963 3964 3965
  emit_operand32(rbx, src);
}

void Assembler::fcompp() {
3966 3967
  emit_int8((unsigned char)0xDE);
  emit_int8((unsigned char)0xD9);
3968 3969 3970
}

void Assembler::fcos() {
3971 3972
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFF);
3973 3974 3975
}

void Assembler::fdecstp() {
3976 3977
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF6);
3978 3979 3980 3981 3982 3983 3984 3985
}

void Assembler::fdiv(int i) {
  emit_farith(0xD8, 0xF0, i);
}

void Assembler::fdiv_d(Address src) {
  InstructionMark im(this);
3986
  emit_int8((unsigned char)0xDC);
3987 3988 3989 3990 3991
  emit_operand32(rsi, src);
}

void Assembler::fdiv_s(Address src) {
  InstructionMark im(this);
3992
  emit_int8((unsigned char)0xD8);
3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012
  emit_operand32(rsi, src);
}

void Assembler::fdiva(int i) {
  emit_farith(0xDC, 0xF8, i);
}

// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
//       is erroneous for some of the floating-point instructions below.

void Assembler::fdivp(int i) {
  emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}

void Assembler::fdivr(int i) {
  emit_farith(0xD8, 0xF8, i);
}

void Assembler::fdivr_d(Address src) {
  InstructionMark im(this);
4013
  emit_int8((unsigned char)0xDC);
4014 4015 4016 4017 4018
  emit_operand32(rdi, src);
}

void Assembler::fdivr_s(Address src) {
  InstructionMark im(this);
4019
  emit_int8((unsigned char)0xD8);
4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
  emit_operand32(rdi, src);
}

void Assembler::fdivra(int i) {
  emit_farith(0xDC, 0xF0, i);
}

void Assembler::fdivrp(int i) {
  emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
}

void Assembler::ffree(int i) {
  emit_farith(0xDD, 0xC0, i);
}

void Assembler::fild_d(Address adr) {
  InstructionMark im(this);
4037
  emit_int8((unsigned char)0xDF);
4038 4039 4040 4041 4042
  emit_operand32(rbp, adr);
}

void Assembler::fild_s(Address adr) {
  InstructionMark im(this);
4043
  emit_int8((unsigned char)0xDB);
4044 4045 4046 4047
  emit_operand32(rax, adr);
}

void Assembler::fincstp() {
4048 4049
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF7);
4050 4051 4052
}

void Assembler::finit() {
4053 4054 4055
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xDB);
  emit_int8((unsigned char)0xE3);
4056 4057 4058 4059
}

void Assembler::fist_s(Address adr) {
  InstructionMark im(this);
4060
  emit_int8((unsigned char)0xDB);
4061 4062 4063 4064 4065
  emit_operand32(rdx, adr);
}

void Assembler::fistp_d(Address adr) {
  InstructionMark im(this);
4066
  emit_int8((unsigned char)0xDF);
4067 4068 4069 4070 4071
  emit_operand32(rdi, adr);
}

void Assembler::fistp_s(Address adr) {
  InstructionMark im(this);
4072
  emit_int8((unsigned char)0xDB);
4073 4074 4075 4076
  emit_operand32(rbx, adr);
}

void Assembler::fld1() {
4077 4078
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE8);
4079 4080 4081 4082
}

void Assembler::fld_d(Address adr) {
  InstructionMark im(this);
4083
  emit_int8((unsigned char)0xDD);
4084 4085 4086 4087 4088
  emit_operand32(rax, adr);
}

void Assembler::fld_s(Address adr) {
  InstructionMark im(this);
4089
  emit_int8((unsigned char)0xD9);
4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
  emit_operand32(rax, adr);
}


void Assembler::fld_s(int index) {
  emit_farith(0xD9, 0xC0, index);
}

void Assembler::fld_x(Address adr) {
  InstructionMark im(this);
4100
  emit_int8((unsigned char)0xDB);
4101 4102 4103 4104 4105
  emit_operand32(rbp, adr);
}

void Assembler::fldcw(Address src) {
  InstructionMark im(this);
4106
  emit_int8((unsigned char)0xD9);
4107 4108 4109 4110 4111
  emit_operand32(rbp, src);
}

void Assembler::fldenv(Address src) {
  InstructionMark im(this);
4112
  emit_int8((unsigned char)0xD9);
4113 4114 4115 4116
  emit_operand32(rsp, src);
}

void Assembler::fldlg2() {
4117 4118
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEC);
4119 4120 4121
}

void Assembler::fldln2() {
4122 4123
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xED);
4124 4125 4126
}

void Assembler::fldz() {
4127 4128
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEE);
4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
}

void Assembler::flog() {
  fldln2();
  fxch();
  fyl2x();
}

void Assembler::flog10() {
  fldlg2();
  fxch();
  fyl2x();
}

void Assembler::fmul(int i) {
  emit_farith(0xD8, 0xC8, i);
}

void Assembler::fmul_d(Address src) {
  InstructionMark im(this);
4149
  emit_int8((unsigned char)0xDC);
4150 4151 4152 4153 4154
  emit_operand32(rcx, src);
}

void Assembler::fmul_s(Address src) {
  InstructionMark im(this);
4155
  emit_int8((unsigned char)0xD8);
4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
  emit_operand32(rcx, src);
}

void Assembler::fmula(int i) {
  emit_farith(0xDC, 0xC8, i);
}

void Assembler::fmulp(int i) {
  emit_farith(0xDE, 0xC8, i);
}

void Assembler::fnsave(Address dst) {
  InstructionMark im(this);
4169
  emit_int8((unsigned char)0xDD);
4170 4171 4172 4173 4174
  emit_operand32(rsi, dst);
}

void Assembler::fnstcw(Address src) {
  InstructionMark im(this);
4175 4176
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xD9);
4177 4178 4179 4180
  emit_operand32(rdi, src);
}

void Assembler::fnstsw_ax() {
4181 4182
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)0xE0);
4183 4184 4185
}

void Assembler::fprem() {
4186 4187
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF8);
4188 4189 4190
}

void Assembler::fprem1() {
4191 4192
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF5);
4193 4194 4195 4196
}

void Assembler::frstor(Address src) {
  InstructionMark im(this);
4197
  emit_int8((unsigned char)0xDD);
4198 4199 4200 4201
  emit_operand32(rsp, src);
}

void Assembler::fsin() {
4202 4203
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFE);
4204 4205 4206
}

void Assembler::fsqrt() {
4207 4208
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFA);
4209 4210 4211 4212
}

void Assembler::fst_d(Address adr) {
  InstructionMark im(this);
4213
  emit_int8((unsigned char)0xDD);
4214 4215 4216 4217 4218
  emit_operand32(rdx, adr);
}

void Assembler::fst_s(Address adr) {
  InstructionMark im(this);
4219
  emit_int8((unsigned char)0xD9);
4220 4221 4222 4223 4224
  emit_operand32(rdx, adr);
}

void Assembler::fstp_d(Address adr) {
  InstructionMark im(this);
4225
  emit_int8((unsigned char)0xDD);
4226 4227 4228 4229 4230 4231 4232 4233 4234
  emit_operand32(rbx, adr);
}

void Assembler::fstp_d(int index) {
  emit_farith(0xDD, 0xD8, index);
}

void Assembler::fstp_s(Address adr) {
  InstructionMark im(this);
4235
  emit_int8((unsigned char)0xD9);
4236 4237 4238 4239 4240
  emit_operand32(rbx, adr);
}

void Assembler::fstp_x(Address adr) {
  InstructionMark im(this);
4241
  emit_int8((unsigned char)0xDB);
4242 4243 4244 4245 4246 4247 4248 4249 4250
  emit_operand32(rdi, adr);
}

void Assembler::fsub(int i) {
  emit_farith(0xD8, 0xE0, i);
}

void Assembler::fsub_d(Address src) {
  InstructionMark im(this);
4251
  emit_int8((unsigned char)0xDC);
4252 4253 4254 4255 4256
  emit_operand32(rsp, src);
}

void Assembler::fsub_s(Address src) {
  InstructionMark im(this);
4257
  emit_int8((unsigned char)0xD8);
4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274
  emit_operand32(rsp, src);
}

void Assembler::fsuba(int i) {
  emit_farith(0xDC, 0xE8, i);
}

void Assembler::fsubp(int i) {
  emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
}

void Assembler::fsubr(int i) {
  emit_farith(0xD8, 0xE8, i);
}

void Assembler::fsubr_d(Address src) {
  InstructionMark im(this);
4275
  emit_int8((unsigned char)0xDC);
4276 4277 4278 4279 4280
  emit_operand32(rbp, src);
}

void Assembler::fsubr_s(Address src) {
  InstructionMark im(this);
4281
  emit_int8((unsigned char)0xD8);
4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
  emit_operand32(rbp, src);
}

void Assembler::fsubra(int i) {
  emit_farith(0xDC, 0xE0, i);
}

void Assembler::fsubrp(int i) {
  emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
}

void Assembler::ftan() {
4294 4295 4296 4297
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)0xD8);
4298 4299 4300
}

void Assembler::ftst() {
4301 4302
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE4);
4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
}

void Assembler::fucomi(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDB, 0xE8, i);
}

void Assembler::fucomip(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDF, 0xE8, i);
}

void Assembler::fwait() {
4318
  emit_int8((unsigned char)0x9B);
4319 4320 4321 4322 4323 4324 4325
}

void Assembler::fxch(int i) {
  emit_farith(0xD9, 0xC8, i);
}

void Assembler::fyl2x() {
4326 4327
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF1);
4328 4329
}

4330
void Assembler::frndint() {
4331 4332
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFC);
4333 4334 4335
}

void Assembler::f2xm1() {
4336 4337
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF0);
4338 4339 4340
}

void Assembler::fldl2e() {
4341 4342
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEA);
4343 4344
}

K
kvn 已提交
4345 4346 4347 4348 4349 4350 4351 4352
// SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
// SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
static int simd_opc[4] = { 0,    0, 0x38, 0x3A };

// Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4353
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4354 4355 4356 4357 4358 4359 4360
  }
  if (rex_w) {
    prefixq(adr, xreg);
  } else {
    prefix(adr, xreg);
  }
  if (opc > 0) {
4361
    emit_int8(0x0F);
K
kvn 已提交
4362 4363
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4364
      emit_int8(opc2);
K
kvn 已提交
4365 4366 4367 4368 4369 4370
    }
  }
}

int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4371
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4372 4373 4374 4375
  }
  int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
                          prefix_and_encode(dst_enc, src_enc);
  if (opc > 0) {
4376
    emit_int8(0x0F);
K
kvn 已提交
4377 4378
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4379
      emit_int8(opc2);
K
kvn 已提交
4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392
    }
  }
  return encode;
}


void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
  if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
    prefix(VEX_3bytes);

    int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
    byte1 = (~byte1) & 0xE0;
    byte1 |= opc;
4393
    emit_int8(byte1);
K
kvn 已提交
4394 4395 4396

    int byte2 = ((~nds_enc) & 0xf) << 3;
    byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
4397
    emit_int8(byte2);
K
kvn 已提交
4398 4399 4400 4401 4402 4403 4404
  } else {
    prefix(VEX_2bytes);

    int byte1 = vex_r ? VEX_R : 0;
    byte1 = (~byte1) & 0x80;
    byte1 |= ((~nds_enc) & 0xf) << 3;
    byte1 |= (vector256 ? 4 : 0) | pre;
4405
    emit_int8(byte1);
K
kvn 已提交
4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446
  }
}

void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
  bool vex_r = (xreg_enc >= 8);
  bool vex_b = adr.base_needs_rex();
  bool vex_x = adr.index_needs_rex();
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
}

int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
  bool vex_r = (dst_enc >= 8);
  bool vex_b = (src_enc >= 8);
  bool vex_x = false;
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  return (((dst_enc & 7) << 3) | (src_enc & 7));
}


void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  if (UseAVX > 0) {
    int xreg_enc = xreg->encoding();
    int  nds_enc = nds->is_valid() ? nds->encoding() : 0;
    vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
    rex_prefix(adr, xreg, pre, opc, rex_w);
  }
}

int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  int dst_enc = dst->encoding();
  int src_enc = src->encoding();
  if (UseAVX > 0) {
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
    return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
  }
}
4447

4448 4449 4450
void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, dst, src, pre);
4451
  emit_int8(opcode);
4452 4453 4454 4455 4456
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, dst, src, pre);
4457 4458
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4459 4460 4461 4462 4463 4464
}

// Versions with no second source register (non-destructive source).
void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, xnoreg, src, pre);
4465
  emit_int8(opcode);
4466 4467 4468 4469 4470
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
4471 4472
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4473 4474 4475 4476 4477 4478 4479
}

// 3-operands AVX instructions
void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               Address src, VexSimdPrefix pre, bool vector256) {
  InstructionMark im(this);
  vex_prefix(dst, nds, src, pre, vector256);
4480
  emit_int8(opcode);
4481 4482 4483 4484 4485 4486
  emit_operand(dst, src);
}

void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               XMMRegister src, VexSimdPrefix pre, bool vector256) {
  int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
4487 4488
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4489 4490
}

4491 4492 4493 4494
#ifndef _LP64

void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
4495
  emit_int8(0x40 | dst->encoding());
4496 4497 4498 4499 4500 4501 4502 4503
}

void Assembler::lea(Register dst, Address src) {
  leal(dst, src);
}

void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
4504
  emit_int8((unsigned char)0xC7);
4505 4506 4507 4508
  emit_operand(rax, dst);
  emit_data((int)imm32, rspec, 0);
}

4509 4510 4511
void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
4512
  emit_int8((unsigned char)(0xB8 | encode));
4513 4514
  emit_data((int)imm32, rspec, 0);
}
4515 4516

void Assembler::popa() { // 32bit
4517
  emit_int8(0x61);
4518 4519 4520 4521
}

void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
4522
  emit_int8(0x68);
4523 4524 4525 4526
  emit_data(imm32, rspec, 0);
}

void Assembler::pusha() { // 32bit
4527
  emit_int8(0x60);
4528 4529 4530
}

void Assembler::set_byte_if_not_zero(Register dst) {
4531 4532 4533
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | dst->encoding()));
4534 4535 4536
}

void Assembler::shldl(Register dst, Register src) {
4537 4538 4539
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA5);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4540 4541 4542
}

void Assembler::shrdl(Register dst, Register src) {
4543 4544 4545
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAD);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4546 4547 4548 4549
}

#else // LP64

I
iveresov 已提交
4550 4551
void Assembler::set_byte_if_not_zero(Register dst) {
  int enc = prefix_and_encode(dst->encoding(), true);
4552 4553 4554
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | enc));
I
iveresov 已提交
4555 4556
}

4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608
// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.

bool Assembler::reachable(AddressLiteral adr) {
  int64_t disp;
  // None will force a 64bit literal to the code stream. Likely a placeholder
  // for something that will be patched later and we need to certain it will
  // always be reachable.
  if (adr.reloc() == relocInfo::none) {
    return false;
  }
  if (adr.reloc() == relocInfo::internal_word_type) {
    // This should be rip relative and easily reachable.
    return true;
  }
  if (adr.reloc() == relocInfo::virtual_call_type ||
      adr.reloc() == relocInfo::opt_virtual_call_type ||
      adr.reloc() == relocInfo::static_call_type ||
      adr.reloc() == relocInfo::static_stub_type ) {
    // This should be rip relative within the code cache and easily
    // reachable until we get huge code caches. (At which point
    // ic code is going to have issues).
    return true;
  }
  if (adr.reloc() != relocInfo::external_word_type &&
      adr.reloc() != relocInfo::poll_return_type &&  // these are really external_word but need special
      adr.reloc() != relocInfo::poll_type &&         // relocs to identify them
      adr.reloc() != relocInfo::runtime_call_type ) {
    return false;
  }

  // Stress the correction code
  if (ForceUnreachable) {
    // Must be runtimecall reloc, see if it is in the codecache
    // Flipping stuff in the codecache to be unreachable causes issues
    // with things like inline caches where the additional instructions
    // are not handled.
    if (CodeCache::find_blob(adr._target) == NULL) {
      return false;
    }
  }
  // For external_word_type/runtime_call_type if it is reachable from where we
  // are now (possibly a temp buffer) and where we might end up
  // anywhere in the codeCache then we are always reachable.
  // This would have to change if we ever save/restore shared code
  // to be more pessimistic.
  disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;
  disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;

4609
  disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626

  // Because rip relative is a disp + address_of_next_instruction and we
  // don't know the value of address_of_next_instruction we apply a fudge factor
  // to make sure we will be ok no matter the size of the instruction we get placed into.
  // We don't have to fudge the checks above here because they are already worst case.

  // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
  // + 4 because better safe than sorry.
  const int fudge = 12 + 4;
  if (disp < 0) {
    disp -= fudge;
  } else {
    disp += fudge;
  }
  return is_simm32(disp);
}

4627 4628 4629 4630
// Check if the polling page is not reachable from the code cache using rip-relative
// addressing.
bool Assembler::is_polling_page_far() {
  intptr_t addr = (intptr_t)os::get_polling_page();
4631 4632
  return ForceUnreachable ||
         !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
4633 4634 4635
         !is_simm32(addr - (intptr_t)CodeCache::high_bound());
}

4636 4637 4638 4639
void Assembler::emit_data64(jlong data,
                            relocInfo::relocType rtype,
                            int format) {
  if (rtype == relocInfo::none) {
4640
    emit_int64(data);
4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657
  } else {
    emit_data64(data, Relocation::spec_simple(rtype), format);
  }
}

void Assembler::emit_data64(jlong data,
                            RelocationHolder const& rspec,
                            int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
  assert(imm_operand == format, "must be immediate");
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  // Do not use AbstractAssembler::relocate, which is not intended for
  // embedded words.  Instead, relocate to the enclosing instruction.
  code_section()->relocate(inst_mark(), rspec, format);
#ifdef ASSERT
  check_relocation(rspec, format);
#endif
4658
  emit_int64(data);
4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
}

int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
  if (reg_enc >= 8) {
    prefix(REX_B);
    reg_enc -= 8;
  } else if (byteinst && reg_enc >= 4) {
    prefix(REX);
  }
  return reg_enc;
}

int Assembler::prefixq_and_encode(int reg_enc) {
  if (reg_enc < 8) {
    prefix(REX_W);
  } else {
    prefix(REX_WB);
    reg_enc -= 8;
  }
  return reg_enc;
}

int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
  if (dst_enc < 8) {
    if (src_enc >= 8) {
      prefix(REX_B);
      src_enc -= 8;
    } else if (byteinst && src_enc >= 4) {
      prefix(REX);
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_R);
    } else {
      prefix(REX_RB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
  if (dst_enc < 8) {
    if (src_enc < 8) {
      prefix(REX_W);
    } else {
      prefix(REX_WB);
      src_enc -= 8;
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_WR);
    } else {
      prefix(REX_WRB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

void Assembler::prefix(Register reg) {
  if (reg->encoding() >= 8) {
    prefix(REX_B);
  }
}

void Assembler::prefix(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_XB);
    } else {
      prefix(REX_B);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_X);
    }
  }
}

void Assembler::prefixq(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_WXB);
    } else {
      prefix(REX_WB);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_WX);
    } else {
      prefix(REX_W);
    }
  }
}


void Assembler::prefix(Address adr, Register reg, bool byteinst) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
4769
      } else if (byteinst && reg->encoding() >= 4 ) {
4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851
        prefix(REX);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

void Assembler::prefixq(Address adr, Register src) {
  if (src->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WXB);
      } else {
        prefix(REX_WB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WX);
      } else {
        prefix(REX_W);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WRXB);
      } else {
        prefix(REX_WRB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WRX);
      } else {
        prefix(REX_WR);
      }
    }
  }
}

void Assembler::prefix(Address adr, XMMRegister reg) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

K
kvn 已提交
4852 4853
void Assembler::prefixq(Address adr, XMMRegister src) {
  if (src->encoding() < 8) {
4854 4855
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4856
        prefix(REX_WXB);
4857
      } else {
K
kvn 已提交
4858
        prefix(REX_WB);
4859 4860 4861
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4862 4863 4864
        prefix(REX_WX);
      } else {
        prefix(REX_W);
4865 4866 4867 4868 4869
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4870
        prefix(REX_WRXB);
4871
      } else {
K
kvn 已提交
4872
        prefix(REX_WRB);
4873 4874 4875
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4876
        prefix(REX_WRX);
4877
      } else {
K
kvn 已提交
4878
        prefix(REX_WR);
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891
      }
    }
  }
}

void Assembler::adcq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD0, dst, imm32);
}

void Assembler::adcq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4892
  emit_int8(0x13);
4893 4894 4895 4896
  emit_operand(dst, src);
}

void Assembler::adcq(Register dst, Register src) {
4897
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909
  emit_arith(0x13, 0xC0, dst, src);
}

void Assembler::addq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rax, dst,imm32);
}

void Assembler::addq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
4910
  emit_int8(0x01);
4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921
  emit_operand(src, dst);
}

void Assembler::addq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC0, dst, imm32);
}

void Assembler::addq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4922
  emit_int8(0x03);
4923 4924 4925 4926 4927 4928 4929 4930
  emit_operand(dst, src);
}

void Assembler::addq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
}

4931 4932 4933
void Assembler::andq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
4934
  emit_int8((unsigned char)0x81);
4935
  emit_operand(rsp, dst, 4);
4936
  emit_int32(imm32);
4937 4938
}

4939 4940 4941 4942 4943 4944 4945 4946
void Assembler::andq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE0, dst, imm32);
}

void Assembler::andq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4947
  emit_int8(0x23);
4948 4949 4950 4951
  emit_operand(dst, src);
}

void Assembler::andq(Register dst, Register src) {
4952
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4953 4954 4955
  emit_arith(0x23, 0xC0, dst, src);
}

4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970
void Assembler::andnq(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::andnq(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}

4971 4972
void Assembler::bsfq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4973 4974 4975
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
4976 4977 4978 4979
}

void Assembler::bsrq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4980 4981 4982
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
4983 4984
}

4985 4986
void Assembler::bswapq(Register reg) {
  int encode = prefixq_and_encode(reg->encoding());
4987 4988
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
4989 4990
}

4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035
void Assembler::blsiq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsiq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}

void Assembler::blsmskq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsmskq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}

void Assembler::blsrq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsrq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}

5036 5037
void Assembler::cdqq() {
  prefix(REX_W);
5038
  emit_int8((unsigned char)0x99);
5039 5040 5041 5042
}

void Assembler::clflush(Address adr) {
  prefix(adr);
5043 5044
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5045 5046 5047 5048 5049
  emit_operand(rdi, adr);
}

void Assembler::cmovq(Condition cc, Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5050 5051 5052
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
5053 5054 5055 5056 5057
}

void Assembler::cmovq(Condition cc, Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5058 5059
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
5060 5061 5062 5063 5064 5065
  emit_operand(dst, src);
}

void Assembler::cmpq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5066
  emit_int8((unsigned char)0x81);
5067
  emit_operand(rdi, dst, 4);
5068
  emit_int32(imm32);
5069 5070 5071 5072 5073 5074 5075 5076 5077 5078
}

void Assembler::cmpq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xF8, dst, imm32);
}

void Assembler::cmpq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5079
  emit_int8(0x3B);
5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090
  emit_operand(src, dst);
}

void Assembler::cmpq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
}

void Assembler::cmpq(Register dst, Address  src) {
  InstructionMark im(this);
  prefixq(src, dst);
5091
  emit_int8(0x3B);
5092 5093 5094 5095 5096 5097
  emit_operand(dst, src);
}

void Assembler::cmpxchgq(Register reg, Address adr) {
  InstructionMark im(this);
  prefixq(adr, reg);
5098 5099
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
5100 5101 5102 5103 5104
  emit_operand(reg, adr);
}

void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
5105
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
5106 5107
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
5108 5109
}

K
kvn 已提交
5110 5111 5112 5113
void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
5114
  emit_int8(0x2A);
K
kvn 已提交
5115 5116 5117
  emit_operand(dst, src);
}

5118 5119
void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
5120
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
5121 5122
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
5123 5124
}

K
kvn 已提交
5125 5126 5127 5128
void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
5129
  emit_int8(0x2A);
K
kvn 已提交
5130 5131 5132
  emit_operand(dst, src);
}

5133 5134
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
5135
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
5136 5137
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
5138 5139 5140 5141
}

void Assembler::cvttss2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
5142
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
5143 5144
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
5145 5146 5147 5148 5149 5150
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
  // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
5151 5152
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC8 | encode));
5153 5154 5155 5156 5157 5158
}

void Assembler::decq(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
5159 5160
  emit_int8((unsigned char)0xFF);
  emit_int8(0xC8 | encode);
5161 5162 5163 5164 5165 5166
}

void Assembler::decq(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
5167
  emit_int8((unsigned char)0xFF);
5168 5169 5170 5171 5172
  emit_operand(rcx, dst);
}

void Assembler::fxrstor(Address src) {
  prefixq(src);
5173 5174
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5175 5176 5177 5178 5179
  emit_operand(as_Register(1), src);
}

void Assembler::fxsave(Address dst) {
  prefixq(dst);
5180 5181
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5182 5183 5184 5185 5186
  emit_operand(as_Register(0), dst);
}

void Assembler::idivq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
5187 5188
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
5189 5190 5191 5192
}

void Assembler::imulq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5193 5194 5195
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
5196 5197 5198 5199 5200
}

void Assembler::imulq(Register dst, Register src, int value) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  if (is8bit(value)) {
5201 5202 5203
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
5204
  } else {
5205 5206
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
5207
    emit_int32(value);
5208 5209 5210
  }
}

5211 5212 5213 5214 5215 5216 5217 5218
void Assembler::imulq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}

5219 5220 5221 5222
void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
5223 5224
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5225 5226 5227 5228 5229 5230
}

void Assembler::incq(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
5231 5232
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5233 5234 5235 5236 5237 5238
}

void Assembler::incq(Address dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
5239
  emit_int8((unsigned char)0xFF);
5240 5241 5242 5243 5244 5245 5246 5247 5248 5249
  emit_operand(rax, dst);
}

void Assembler::lea(Register dst, Address src) {
  leaq(dst, src);
}

void Assembler::leaq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5250
  emit_int8((unsigned char)0x8D);
5251 5252 5253 5254 5255 5256
  emit_operand(dst, src);
}

void Assembler::mov64(Register dst, int64_t imm64) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5257
  emit_int8((unsigned char)(0xB8 | encode));
5258
  emit_int64(imm64);
5259 5260 5261 5262 5263
}

void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5264
  emit_int8(0xB8 | encode);
5265 5266 5267
  emit_data64(imm64, rspec);
}

5268 5269 5270
void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
5271
  emit_int8((unsigned char)(0xB8 | encode));
5272 5273 5274 5275 5276 5277
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(dst);
5278
  emit_int8((unsigned char)0xC7);
5279 5280 5281 5282 5283 5284 5285
  emit_operand(rax, dst, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(src1->encoding());
5286 5287
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | encode));
5288 5289 5290 5291 5292 5293
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(src1);
5294
  emit_int8((unsigned char)0x81);
5295 5296 5297 5298
  emit_operand(rax, src1, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

5299 5300
void Assembler::lzcntq(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
5301
  emit_int8((unsigned char)0xF3);
5302
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5303 5304 5305
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
5306 5307
}

5308 5309
void Assembler::movdq(XMMRegister dst, Register src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5310 5311
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
5312 5313
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
5314 5315 5316 5317
}

void Assembler::movdq(Register dst, XMMRegister src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5318
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5319
  // swap src/dst to get correct prefix
K
kvn 已提交
5320
  int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
5321 5322
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5323 5324
}

5325 5326
void Assembler::movq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5327 5328
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
5329
}
D
duke 已提交
5330

5331 5332 5333
void Assembler::movq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5334
  emit_int8((unsigned char)0x8B);
5335 5336
  emit_operand(dst, src);
}
D
duke 已提交
5337

5338 5339 5340
void Assembler::movq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5341
  emit_int8((unsigned char)0x89);
5342 5343
  emit_operand(src, dst);
}
D
duke 已提交
5344

5345 5346 5347
void Assembler::movsbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5348 5349
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
5350 5351 5352 5353 5354
  emit_operand(dst, src);
}

void Assembler::movsbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5355 5356 5357
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
5358 5359
}

5360 5361 5362 5363 5364 5365 5366
void Assembler::movslq(Register dst, int32_t imm32) {
  // dbx shows movslq(rcx, 3) as movq     $0x0000000049000000,(%rbx)
  // and movslq(r8, 3); as movl     $0x0000000048000000,(%rbx)
  // as a result we shouldn't use until tested at runtime...
  ShouldNotReachHere();
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5367
  emit_int8((unsigned char)(0xC7 | encode));
5368
  emit_int32(imm32);
5369 5370 5371 5372 5373 5374
}

void Assembler::movslq(Address dst, int32_t imm32) {
  assert(is_simm32(imm32), "lost bits");
  InstructionMark im(this);
  prefixq(dst);
5375
  emit_int8((unsigned char)0xC7);
5376
  emit_operand(rax, dst, 4);
5377
  emit_int32(imm32);
5378 5379 5380 5381 5382
}

void Assembler::movslq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5383
  emit_int8(0x63);
5384 5385 5386 5387 5388
  emit_operand(dst, src);
}

void Assembler::movslq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5389 5390
  emit_int8(0x63);
  emit_int8((unsigned char)(0xC0 | encode));
5391 5392
}

5393 5394 5395
void Assembler::movswq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5396 5397
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
5398 5399 5400 5401 5402
  emit_operand(dst, src);
}

void Assembler::movswq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5403 5404 5405
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
5406 5407 5408 5409 5410
}

void Assembler::movzbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5411 5412
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB6);
5413 5414 5415 5416 5417
  emit_operand(dst, src);
}

void Assembler::movzbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5418 5419 5420
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
5421 5422 5423 5424 5425
}

void Assembler::movzwq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5426 5427
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
5428 5429 5430 5431 5432
  emit_operand(dst, src);
}

void Assembler::movzwq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5433 5434 5435
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8((unsigned char)(0xC0 | encode));
5436 5437
}

5438 5439
void Assembler::negq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5440 5441
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
5442 5443 5444 5445
}

void Assembler::notq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5446 5447
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
5448 5449 5450 5451 5452
}

void Assembler::orq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5453
  emit_int8((unsigned char)0x81);
5454
  emit_operand(rcx, dst, 4);
5455
  emit_int32(imm32);
5456 5457 5458 5459 5460 5461 5462 5463 5464 5465
}

void Assembler::orq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC8, dst, imm32);
}

void Assembler::orq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5466
  emit_int8(0x0B);
5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495
  emit_operand(dst, src);
}

void Assembler::orq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
}

void Assembler::popa() { // 64bit
  movq(r15, Address(rsp, 0));
  movq(r14, Address(rsp, wordSize));
  movq(r13, Address(rsp, 2 * wordSize));
  movq(r12, Address(rsp, 3 * wordSize));
  movq(r11, Address(rsp, 4 * wordSize));
  movq(r10, Address(rsp, 5 * wordSize));
  movq(r9,  Address(rsp, 6 * wordSize));
  movq(r8,  Address(rsp, 7 * wordSize));
  movq(rdi, Address(rsp, 8 * wordSize));
  movq(rsi, Address(rsp, 9 * wordSize));
  movq(rbp, Address(rsp, 10 * wordSize));
  // skip rsp
  movq(rbx, Address(rsp, 12 * wordSize));
  movq(rdx, Address(rsp, 13 * wordSize));
  movq(rcx, Address(rsp, 14 * wordSize));
  movq(rax, Address(rsp, 15 * wordSize));

  addq(rsp, 16 * wordSize);
}

5496 5497 5498
void Assembler::popcntq(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
5499
  emit_int8((unsigned char)0xF3);
5500
  prefixq(src, dst);
5501 5502
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
5503 5504 5505 5506 5507
  emit_operand(dst, src);
}

void Assembler::popcntq(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
5508
  emit_int8((unsigned char)0xF3);
5509
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5510 5511 5512
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
5513 5514
}

5515 5516 5517
void Assembler::popq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
5518
  emit_int8((unsigned char)0x8F);
5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549
  emit_operand(rax, dst);
}

void Assembler::pusha() { // 64bit
  // we have to store original rsp.  ABI says that 128 bytes
  // below rsp are local scratch.
  movq(Address(rsp, -5 * wordSize), rsp);

  subq(rsp, 16 * wordSize);

  movq(Address(rsp, 15 * wordSize), rax);
  movq(Address(rsp, 14 * wordSize), rcx);
  movq(Address(rsp, 13 * wordSize), rdx);
  movq(Address(rsp, 12 * wordSize), rbx);
  // skip rsp
  movq(Address(rsp, 10 * wordSize), rbp);
  movq(Address(rsp, 9 * wordSize), rsi);
  movq(Address(rsp, 8 * wordSize), rdi);
  movq(Address(rsp, 7 * wordSize), r8);
  movq(Address(rsp, 6 * wordSize), r9);
  movq(Address(rsp, 5 * wordSize), r10);
  movq(Address(rsp, 4 * wordSize), r11);
  movq(Address(rsp, 3 * wordSize), r12);
  movq(Address(rsp, 2 * wordSize), r13);
  movq(Address(rsp, wordSize), r14);
  movq(Address(rsp, 0), r15);
}

void Assembler::pushq(Address src) {
  InstructionMark im(this);
  prefixq(src);
5550
  emit_int8((unsigned char)0xFF);
5551 5552 5553 5554 5555 5556 5557
  emit_operand(rsi, src);
}

void Assembler::rclq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5558 5559
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
5560
  } else {
5561 5562 5563
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD0 | encode));
    emit_int8(imm8);
D
duke 已提交
5564
  }
5565 5566 5567 5568 5569
}
void Assembler::sarq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5570 5571
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
5572
  } else {
5573 5574 5575
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
5576 5577
  }
}
D
duke 已提交
5578

5579 5580
void Assembler::sarq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5581 5582
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
5583
}
5584

5585 5586 5587 5588 5589
void Assembler::sbbq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}
D
duke 已提交
5590

5591 5592 5593 5594
void Assembler::sbbq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD8, dst, imm32);
}
D
duke 已提交
5595

5596 5597 5598
void Assembler::sbbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5599
  emit_int8(0x1B);
5600 5601
  emit_operand(dst, src);
}
D
duke 已提交
5602

5603 5604 5605 5606
void Assembler::sbbq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
}
D
duke 已提交
5607

5608 5609 5610 5611
void Assembler::shlq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5612 5613
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
5614
  } else {
5615 5616 5617
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
D
duke 已提交
5618
  }
5619 5620 5621 5622
}

void Assembler::shlq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5623 5624
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
5625 5626 5627 5628 5629
}

void Assembler::shrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
5630 5631 5632
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
5633 5634 5635 5636
}

void Assembler::shrq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5637 5638
  emit_int8((unsigned char)0xD3);
  emit_int8(0xE8 | encode);
5639 5640 5641 5642 5643
}

void Assembler::subq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5644
  emit_arith_operand(0x81, rbp, dst, imm32);
5645 5646 5647 5648 5649
}

void Assembler::subq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5650
  emit_int8(0x29);
5651 5652 5653
  emit_operand(src, dst);
}

5654 5655 5656 5657 5658
void Assembler::subq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE8, dst, imm32);
}

5659 5660 5661 5662 5663 5664
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subq_imm32(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

5665 5666 5667
void Assembler::subq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5668
  emit_int8(0x2B);
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683
  emit_operand(dst, src);
}

void Assembler::subq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::testq(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
    prefix(REX_W);
5684
    emit_int8((unsigned char)0xA9);
D
duke 已提交
5685
  } else {
5686
    encode = prefixq_and_encode(encode);
5687 5688
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5689
  }
5690
  emit_int32(imm32);
D
duke 已提交
5691 5692
}

5693 5694 5695
void Assembler::testq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
D
duke 已提交
5696 5697
}

5698 5699 5700
void Assembler::xaddq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5701 5702
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
5703
  emit_operand(src, dst);
D
duke 已提交
5704 5705
}

5706 5707 5708
void Assembler::xchgq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5709
  emit_int8((unsigned char)0x87);
5710
  emit_operand(dst, src);
D
duke 已提交
5711 5712
}

5713 5714
void Assembler::xchgq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5715 5716
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xc0 | encode));
D
duke 已提交
5717 5718
}

5719 5720 5721
void Assembler::xorq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
D
duke 已提交
5722 5723
}

5724 5725 5726
void Assembler::xorq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5727
  emit_int8(0x33);
5728
  emit_operand(dst, src);
5729 5730
}

5731
#endif // !LP64