assembler_x86.cpp 174.0 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25
#include "precompiled.hpp"
26 27
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
28 29 30 31 32 33 34 35 36 37 38
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
39 40
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
41 42 43
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
44
#endif // INCLUDE_ALL_GCS
D
duke 已提交
45

46 47 48 49 50 51 52 53 54
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#define STOP(error) stop(error)
#else
#define BLOCK_COMMENT(str) block_comment(str)
#define STOP(error) block_comment(error); stop(error)
#endif

#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
D
duke 已提交
55 56 57 58 59 60 61
// Implementation of AddressLiteral

AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
  _is_lval = false;
  _target = target;
  switch (rtype) {
  case relocInfo::oop_type:
62
  case relocInfo::metadata_type:
D
duke 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    // Oops are a special case. Normally they would be their own section
    // but in cases like icBuffer they are literals in the code stream that
    // we don't have a section for. We use none so that we get a literal address
    // which is always patchable.
    break;
  case relocInfo::external_word_type:
    _rspec = external_word_Relocation::spec(target);
    break;
  case relocInfo::internal_word_type:
    _rspec = internal_word_Relocation::spec(target);
    break;
  case relocInfo::opt_virtual_call_type:
    _rspec = opt_virtual_call_Relocation::spec();
    break;
  case relocInfo::static_call_type:
    _rspec = static_call_Relocation::spec();
    break;
  case relocInfo::runtime_call_type:
    _rspec = runtime_call_Relocation::spec();
    break;
83 84 85 86
  case relocInfo::poll_type:
  case relocInfo::poll_return_type:
    _rspec = Relocation::spec_simple(rtype);
    break;
D
duke 已提交
87 88 89 90 91 92 93 94 95 96 97
  case relocInfo::none:
    break;
  default:
    ShouldNotReachHere();
    break;
  }
}

// Implementation of Address

#ifdef _LP64
98 99

Address Address::make_array(ArrayAddress adr) {
D
duke 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
  // Not implementable on 64bit machines
  // Should have been handled higher up the call chain.
  ShouldNotReachHere();
  return Address();
}

// exceedingly dangerous constructor
Address::Address(int disp, address loc, relocInfo::relocType rtype) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = disp;
  switch (rtype) {
    case relocInfo::external_word_type:
      _rspec = external_word_Relocation::spec(loc);
      break;
    case relocInfo::internal_word_type:
      _rspec = internal_word_Relocation::spec(loc);
      break;
    case relocInfo::runtime_call_type:
      // HMM
      _rspec = runtime_call_Relocation::spec();
      break;
123 124 125 126
    case relocInfo::poll_type:
    case relocInfo::poll_return_type:
      _rspec = Relocation::spec_simple(rtype);
      break;
D
duke 已提交
127 128 129 130 131 132
    case relocInfo::none:
      break;
    default:
      ShouldNotReachHere();
  }
}
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#else // LP64

Address Address::make_array(ArrayAddress adr) {
  AddressLiteral base = adr.base();
  Address index = adr.index();
  assert(index._disp == 0, "must not have disp"); // maybe it can?
  Address array(index._base, index._index, index._scale, (intptr_t) base.target());
  array._rspec = base._rspec;
  return array;
}

// exceedingly dangerous constructor
Address::Address(address loc, RelocationHolder spec) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = (intptr_t) loc;
  _rspec = spec;
}

#endif // _LP64


D
duke 已提交
156 157 158 159

// Convert the raw encoding form into the form expected by the constructor for
// Address.  An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
160
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
161
  RelocationHolder rspec;
162 163
  if (disp_reloc != relocInfo::none) {
    rspec = Relocation::spec_simple(disp_reloc);
164
  }
D
duke 已提交
165 166 167
  bool valid_index = index != rsp->encoding();
  if (valid_index) {
    Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
168
    madr._rspec = rspec;
D
duke 已提交
169 170 171
    return madr;
  } else {
    Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
172
    madr._rspec = rspec;
D
duke 已提交
173 174 175 176 177
    return madr;
  }
}

// Implementation of Assembler
178

D
duke 已提交
179 180 181 182
int AbstractAssembler::code_fill_byte() {
  return (u_char)'\xF4'; // hlt
}

183 184 185
// make this go away someday
void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
  if (rtype == relocInfo::none)
186
        emit_int32(data);
187
  else  emit_data(data, Relocation::spec_simple(rtype), format);
D
duke 已提交
188 189
}

190 191
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
D
duke 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  if (rspec.type() !=  relocInfo::none) {
    #ifdef ASSERT
      check_relocation(rspec, format);
    #endif
    // Do not use AbstractAssembler::relocate, which is not intended for
    // embedded words.  Instead, relocate to the enclosing instruction.

    // hack. call32 is too wide for mask so use disp32
    if (format == call32_operand)
      code_section()->relocate(inst_mark(), rspec, disp32_operand);
    else
      code_section()->relocate(inst_mark(), rspec, format);
  }
206
  emit_int32(data);
D
duke 已提交
207 208
}

209 210 211 212
static int encode(Register r) {
  int enc = r->encoding();
  if (enc >= 8) {
    enc -= 8;
D
duke 已提交
213
  }
214
  return enc;
D
duke 已提交
215 216 217
}

void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
218
  assert(dst->has_byte_register(), "must have byte register");
D
duke 已提交
219 220 221
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert(isByte(imm8), "not a byte");
  assert((op1 & 0x01) == 0, "should be 8bit operation");
222 223 224
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
  emit_int8(imm8);
D
duke 已提交
225 226
}

227 228

void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
D
duke 已提交
229 230 231 232
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
233 234 235
    emit_int8(op1 | 0x02); // set sign bit
    emit_int8(op2 | encode(dst));
    emit_int8(imm32 & 0xFF);
D
duke 已提交
236
  } else {
237 238
    emit_int8(op1);
    emit_int8(op2 | encode(dst));
239
    emit_int32(imm32);
D
duke 已提交
240 241 242
  }
}

243 244 245 246 247
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
248 249
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
250
  emit_int32(imm32);
251 252
}

D
duke 已提交
253
// immediate-to-memory forms
254
void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
D
duke 已提交
255 256 257
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
258
    emit_int8(op1 | 0x02); // set sign bit
D
duke 已提交
259
    emit_operand(rm, adr, 1);
260
    emit_int8(imm32 & 0xFF);
D
duke 已提交
261
  } else {
262
    emit_int8(op1);
D
duke 已提交
263
    emit_operand(rm, adr, 4);
264
    emit_int32(imm32);
D
duke 已提交
265 266 267 268 269 270
  }
}


void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
271 272
  emit_int8(op1);
  emit_int8(op2 | encode(dst) << 3 | encode(src));
D
duke 已提交
273 274
}

275

D
duke 已提交
276 277 278 279 280
void Assembler::emit_operand(Register reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
                             RelocationHolder const& rspec,
                             int rip_relative_correction) {
  relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
281 282 283 284 285 286 287

  // Encode the registers as needed in the fields they are used in

  int regenc = encode(reg) << 3;
  int indexenc = index->is_valid() ? encode(index) << 3 : 0;
  int baseenc = base->is_valid() ? encode(base) : 0;

D
duke 已提交
288 289 290 291 292
  if (base->is_valid()) {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [base + index*scale + disp]
      if (disp == 0 && rtype == relocInfo::none  &&
293
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
294 295 296
        // [base + index*scale]
        // [00 reg 100][ss index base]
        assert(index != rsp, "illegal addressing mode");
297 298
        emit_int8(0x04 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
299 300 301 302
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + index*scale + imm8]
        // [01 reg 100][ss index base] imm8
        assert(index != rsp, "illegal addressing mode");
303 304 305
        emit_int8(0x44 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
306 307 308 309
      } else {
        // [base + index*scale + disp32]
        // [10 reg 100][ss index base] disp32
        assert(index != rsp, "illegal addressing mode");
310 311
        emit_int8(0x84 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
312 313
        emit_data(disp, rspec, disp32_operand);
      }
314
    } else if (base == rsp LP64_ONLY(|| base == r12)) {
D
duke 已提交
315 316 317 318
      // [rsp + disp]
      if (disp == 0 && rtype == relocInfo::none) {
        // [rsp]
        // [00 reg 100][00 100 100]
319 320
        emit_int8(0x04 | regenc);
        emit_int8(0x24);
D
duke 已提交
321 322 323
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [rsp + imm8]
        // [01 reg 100][00 100 100] disp8
324 325 326
        emit_int8(0x44 | regenc);
        emit_int8(0x24);
        emit_int8(disp & 0xFF);
D
duke 已提交
327 328 329
      } else {
        // [rsp + imm32]
        // [10 reg 100][00 100 100] disp32
330 331
        emit_int8(0x84 | regenc);
        emit_int8(0x24);
D
duke 已提交
332 333 334 335
        emit_data(disp, rspec, disp32_operand);
      }
    } else {
      // [base + disp]
336
      assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
D
duke 已提交
337
      if (disp == 0 && rtype == relocInfo::none &&
338
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
339 340
        // [base]
        // [00 reg base]
341
        emit_int8(0x00 | regenc | baseenc);
D
duke 已提交
342 343 344
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + disp8]
        // [01 reg base] disp8
345 346
        emit_int8(0x40 | regenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
347 348 349
      } else {
        // [base + disp32]
        // [10 reg base] disp32
350
        emit_int8(0x80 | regenc | baseenc);
D
duke 已提交
351 352 353 354 355 356 357 358 359
        emit_data(disp, rspec, disp32_operand);
      }
    }
  } else {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [index*scale + disp]
      // [00 reg 100][ss index 101] disp32
      assert(index != rsp, "illegal addressing mode");
360 361
      emit_int8(0x04 | regenc);
      emit_int8(scale << 6 | indexenc | 0x05);
D
duke 已提交
362 363
      emit_data(disp, rspec, disp32_operand);
    } else if (rtype != relocInfo::none ) {
364
      // [disp] (64bit) RIP-RELATIVE (32bit) abs
D
duke 已提交
365 366
      // [00 000 101] disp32

367
      emit_int8(0x05 | regenc);
D
duke 已提交
368 369 370 371 372 373 374 375
      // Note that the RIP-rel. correction applies to the generated
      // disp field, but _not_ to the target address in the rspec.

      // disp was created by converting the target address minus the pc
      // at the start of the instruction. That needs more correction here.
      // intptr_t disp = target - next_ip;
      assert(inst_mark() != NULL, "must be inside InstructionMark");
      address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
376 377 378
      int64_t adjusted = disp;
      // Do rip-rel adjustment for 64bit
      LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
D
duke 已提交
379 380
      assert(is_simm32(adjusted),
             "must be 32bit offset (RIP relative address)");
381
      emit_data((int32_t) adjusted, rspec, disp32_operand);
D
duke 已提交
382 383

    } else {
384
      // 32bit never did this, did everything as the rip-rel/disp code above
D
duke 已提交
385 386
      // [disp] ABSOLUTE
      // [00 reg 100][00 100 101] disp32
387 388
      emit_int8(0x04 | regenc);
      emit_int8(0x25);
D
duke 已提交
389 390 391 392 393 394 395
      emit_data(disp, rspec, disp32_operand);
    }
  }
}

void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
396 397
                             RelocationHolder const& rspec) {
  emit_operand((Register)reg, base, index, scale, disp, rspec);
D
duke 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

// Secret local extension to Assembler::WhichOperand:
#define end_pc_operand (_WhichOperand_limit)

address Assembler::locate_operand(address inst, WhichOperand which) {
  // Decode the given instruction, and return the address of
  // an embedded 32-bit operand word.

  // If "which" is disp32_operand, selects the displacement portion
  // of an effective address specifier.
  // If "which" is imm64_operand, selects the trailing immediate constant.
  // If "which" is call32_operand, selects the displacement of a call or jump.
  // Caller is responsible for ensuring that there is such an operand,
  // and that it is 32/64 bits wide.

  // If "which" is end_pc_operand, find the end of the instruction.

  address ip = inst;
  bool is_64bit = false;

  debug_only(bool has_disp32 = false);
  int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn

  again_after_prefix:
  switch (0xFF & *ip++) {

  // These convenience macros generate groups of "case" labels for the switch.
#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
             case (x)+4: case (x)+5: case (x)+6: case (x)+7
#define REP16(x) REP8((x)+0): \
              case REP8((x)+8)

  case CS_segment:
  case SS_segment:
  case DS_segment:
  case ES_segment:
  case FS_segment:
  case GS_segment:
438 439 440
    // Seems dubious
    LP64_ONLY(assert(false, "shouldn't have that prefix"));
    assert(ip == inst+1, "only one prefix allowed");
D
duke 已提交
441 442 443 444 445 446 447 448 449 450 451
    goto again_after_prefix;

  case 0x67:
  case REX:
  case REX_B:
  case REX_X:
  case REX_XB:
  case REX_R:
  case REX_RB:
  case REX_RX:
  case REX_RXB:
452
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
453 454 455 456 457 458 459 460 461 462
    goto again_after_prefix;

  case REX_W:
  case REX_WB:
  case REX_WX:
  case REX_WXB:
  case REX_WR:
  case REX_WRB:
  case REX_WRX:
  case REX_WRXB:
463
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
464 465 466 467 468 469 470 471 472
    is_64bit = true;
    goto again_after_prefix;

  case 0xFF: // pushq a; decl a; incl a; call a; jmp a
  case 0x88: // movb a, r
  case 0x89: // movl a, r
  case 0x8A: // movb r, a
  case 0x8B: // movl r, a
  case 0x8F: // popl a
473
    debug_only(has_disp32 = true);
D
duke 已提交
474 475 476 477 478 479
    break;

  case 0x68: // pushq #32
    if (which == end_pc_operand) {
      return ip + 4;
    }
480 481
    assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
    return ip;                  // not produced by emit_operand
D
duke 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

  case 0x66: // movw ... (size prefix)
    again_after_size_prefix2:
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
502
      NOT_LP64(assert(false, "64bit prefix found"));
D
duke 已提交
503 504 505
      goto again_after_size_prefix2;
    case 0x8B: // movw r, a
    case 0x89: // movw a, r
506
      debug_only(has_disp32 = true);
D
duke 已提交
507 508
      break;
    case 0xC7: // movw a, #16
509
      debug_only(has_disp32 = true);
D
duke 已提交
510 511 512 513 514 515 516 517 518 519 520 521
      tail_size = 2;  // the imm16
      break;
    case 0x0F: // several SSE/SSE2 variants
      ip--;    // reparse the 0x0F
      goto again_after_prefix;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP8(0xB8): // movl/q r, #32/#64(oop?)
    if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
522 523
    // these asserts are somewhat nonsensical
#ifndef _LP64
524
    assert(which == imm_operand || which == disp32_operand,
525
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
526 527
#else
    assert((which == call32_operand || which == imm_operand) && is_64bit ||
528
           which == narrow_oop_operand && !is_64bit,
529
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
530
#endif // _LP64
D
duke 已提交
531 532 533 534 535 536 537 538 539 540
    return ip;

  case 0x69: // imul r, a, #32
  case 0xC7: // movl a, #32(oop?)
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x0F: // movx..., etc.
    switch (0xFF & *ip++) {
K
kvn 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553
    case 0x3A: // pcmpestri
      tail_size = 1;
    case 0x38: // ptest, pmovzxbw
      ip++; // skip opcode
      debug_only(has_disp32 = true); // has both kinds of operands!
      break;

    case 0x70: // pshufd r, r/a, #8
      debug_only(has_disp32 = true); // has both kinds of operands!
    case 0x73: // psrldq r, #8
      tail_size = 1;
      break;

D
duke 已提交
554 555 556 557 558
    case 0x12: // movlps
    case 0x28: // movaps
    case 0x2E: // ucomiss
    case 0x2F: // comiss
    case 0x54: // andps
559 560
    case 0x55: // andnps
    case 0x56: // orps
D
duke 已提交
561 562 563
    case 0x57: // xorps
    case 0x6E: // movd
    case 0x7E: // movd
K
kvn 已提交
564
    case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
565
      debug_only(has_disp32 = true);
D
duke 已提交
566
      break;
567

D
duke 已提交
568 569
    case 0xAD: // shrd r, a, %cl
    case 0xAF: // imul r, a
570 571 572 573
    case 0xBE: // movsbl r, a (movsxb)
    case 0xBF: // movswl r, a (movsxw)
    case 0xB6: // movzbl r, a (movzxb)
    case 0xB7: // movzwl r, a (movzxw)
D
duke 已提交
574 575 576 577 578 579 580 581 582
    case REP16(0x40): // cmovl cc, r, a
    case 0xB0: // cmpxchgb
    case 0xB1: // cmpxchg
    case 0xC1: // xaddl
    case 0xC7: // cmpxchg8
    case REP16(0x90): // setcc a
      debug_only(has_disp32 = true);
      // fall out of the switch to decode the address
      break;
583

K
kvn 已提交
584 585 586 587 588 589
    case 0xC4: // pinsrw r, a, #8
      debug_only(has_disp32 = true);
    case 0xC5: // pextrw r, r, #8
      tail_size = 1;  // the imm8
      break;

D
duke 已提交
590 591 592 593
    case 0xAC: // shrd r, a, #8
      debug_only(has_disp32 = true);
      tail_size = 1;  // the imm8
      break;
594

D
duke 已提交
595 596
    case REP16(0x80): // jcc rdisp32
      if (which == end_pc_operand)  return ip + 4;
597
      assert(which == call32_operand, "jcc has no disp32 or imm");
D
duke 已提交
598 599 600 601 602 603 604 605
      return ip;
    default:
      ShouldNotReachHere();
    }
    break;

  case 0x81: // addl a, #32; addl r, #32
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
606
    // on 32bit in the case of cmpl, the imm might be an oop
D
duke 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x83: // addl a, #8; addl r, #8
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1;
    break;

  case 0x9B:
    switch (0xFF & *ip++) {
    case 0xD9: // fnstcw a
      debug_only(has_disp32 = true);
      break;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
  case REP4(0x10): // adc...
  case REP4(0x20): // and...
  case REP4(0x30): // xor...
  case REP4(0x08): // or...
  case REP4(0x18): // sbb...
  case REP4(0x28): // sub...
  case 0xF7: // mull a
635
  case 0x8D: // lea r, a
D
duke 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649
  case 0x87: // xchg r, a
  case REP4(0x38): // cmp...
  case 0x85: // test r, a
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
  case 0xC6: // movb a, #8
  case 0x80: // cmpb a, #8
  case 0x6B: // imul r, a, #8
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1; // the imm8
    break;

K
kvn 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
  case 0xC4: // VEX_3bytes
  case 0xC5: // VEX_2bytes
    assert((UseAVX > 0), "shouldn't have VEX prefix");
    assert(ip == inst+1, "no prefixes allowed");
    // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
    // but they have prefix 0x0F and processed when 0x0F processed above.
    //
    // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
    // instructions (these instructions are not supported in 64-bit mode).
    // To distinguish them bits [7:6] are set in the VEX second byte since
    // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
    // those VEX bits REX and vvvv bits are inverted.
    //
    // Fortunately C2 doesn't generate these instructions so we don't need
    // to check for them in product version.

    // Check second byte
    NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));

    // First byte
    if ((0xFF & *inst) == VEX_3bytes) {
      ip++; // third byte
      is_64bit = ((VEX_W & *ip) == VEX_W);
    }
    ip++; // opcode
    // To find the end of instruction (which == end_pc_operand).
    switch (0xFF & *ip) {
    case 0x61: // pcmpestri r, r/a, #8
    case 0x70: // pshufd r, r/a, #8
    case 0x73: // psrldq r, #8
      tail_size = 1;  // the imm8
      break;
    default:
      break;
    }
    ip++; // skip opcode
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
D
duke 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700

  case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
  case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
  case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
  case 0xDD: // fld_d a; fst_d a; fstp_d a
  case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
  case 0xDF: // fild_d a; fistp_d a
  case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
  case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
  case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
    debug_only(has_disp32 = true);
    break;

K
kvn 已提交
701 702 703 704 705 706
  case 0xE8: // call rdisp32
  case 0xE9: // jmp  rdisp32
    if (which == end_pc_operand)  return ip + 4;
    assert(which == call32_operand, "call has no disp32 or imm");
    return ip;

707 708 709 710
  case 0xF0:                    // Lock
    assert(os::is_MP(), "only on MP");
    goto again_after_prefix;

D
duke 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
  case 0xF3:                    // For SSE
  case 0xF2:                    // For SSE2
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
730
      NOT_LP64(assert(false, "found 64bit prefix"));
D
duke 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
      ip++;
    default:
      ip++;
    }
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  default:
    ShouldNotReachHere();

#undef REP8
#undef REP16
  }

  assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
746 747 748 749 750 751
#ifdef _LP64
  assert(which != imm_operand, "instruction is not a movq reg, imm64");
#else
  // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
  assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
#endif // LP64
D
duke 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
  assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");

  // parse the output of emit_operand
  int op2 = 0xFF & *ip++;
  int base = op2 & 0x07;
  int op3 = -1;
  const int b100 = 4;
  const int b101 = 5;
  if (base == b100 && (op2 >> 6) != 3) {
    op3 = 0xFF & *ip++;
    base = op3 & 0x07;   // refetch the base
  }
  // now ip points at the disp (if any)

  switch (op2 >> 6) {
  case 0:
    // [00 reg  100][ss index base]
    // [00 reg  100][00   100  esp]
    // [00 reg base]
    // [00 reg  100][ss index  101][disp32]
    // [00 reg  101]               [disp32]

    if (base == b101) {
      if (which == disp32_operand)
        return ip;              // caller wants the disp32
      ip += 4;                  // skip the disp32
    }
    break;

  case 1:
    // [01 reg  100][ss index base][disp8]
    // [01 reg  100][00   100  esp][disp8]
    // [01 reg base]               [disp8]
    ip += 1;                    // skip the disp8
    break;

  case 2:
    // [10 reg  100][ss index base][disp32]
    // [10 reg  100][00   100  esp][disp32]
    // [10 reg base]               [disp32]
    if (which == disp32_operand)
      return ip;                // caller wants the disp32
    ip += 4;                    // skip the disp32
    break;

  case 3:
    // [11 reg base]  (not a memory addressing mode)
    break;
  }

  if (which == end_pc_operand) {
    return ip + tail_size;
  }

806
#ifdef _LP64
807
  assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
808 809 810
#else
  assert(which == imm_operand, "instruction has only an imm field");
#endif // LP64
D
duke 已提交
811 812 813 814 815 816 817 818
  return ip;
}

address Assembler::locate_next_instruction(address inst) {
  // Secretly share code with locate_operand:
  return locate_operand(inst, end_pc_operand);
}

819

D
duke 已提交
820 821 822
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
  address inst = inst_mark();
823
  assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
D
duke 已提交
824 825 826 827 828 829
  address opnd;

  Relocation* r = rspec.reloc();
  if (r->type() == relocInfo::none) {
    return;
  } else if (r->is_call() || format == call32_operand) {
830
    // assert(format == imm32_operand, "cannot specify a nonzero format");
D
duke 已提交
831 832
    opnd = locate_operand(inst, call32_operand);
  } else if (r->is_data()) {
833 834 835
    assert(format == imm_operand || format == disp32_operand
           LP64_ONLY(|| format == narrow_oop_operand), "format ok");
    opnd = locate_operand(inst, (WhichOperand)format);
D
duke 已提交
836
  } else {
837
    assert(format == imm_operand, "cannot specify a format");
D
duke 已提交
838 839 840 841
    return;
  }
  assert(opnd == pc(), "must put operand where relocs can find it");
}
842
#endif // ASSERT
D
duke 已提交
843

844 845 846 847 848
void Assembler::emit_operand32(Register reg, Address adr) {
  assert(reg->encoding() < 8, "no extended registers");
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec);
D
duke 已提交
849 850 851 852 853 854 855 856 857
}

void Assembler::emit_operand(Register reg, Address adr,
                             int rip_relative_correction) {
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec,
               rip_relative_correction);
}

858
void Assembler::emit_operand(XMMRegister reg, Address adr) {
D
duke 已提交
859
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
860 861 862 863 864 865 866 867 868 869 870 871 872
               adr._rspec);
}

// MMX operations
void Assembler::emit_operand(MMXRegister reg, Address adr) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}

// work around gcc (3.2.1-7a) bug
void Assembler::emit_operand(Address adr, MMXRegister reg) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
D
duke 已提交
873 874
}

875

D
duke 已提交
876 877 878
void Assembler::emit_farith(int b1, int b2, int i) {
  assert(isByte(b1) && isByte(b2), "wrong opcode");
  assert(0 <= i &&  i < 8, "illegal stack offset");
879 880
  emit_int8(b1);
  emit_int8(b2 + i);
D
duke 已提交
881 882 883
}


884 885 886 887 888 889 890 891 892 893 894
// Now the Assembler instructions (identical for 32/64 bits)

void Assembler::adcl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rdx, dst, imm32);
}

void Assembler::adcl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
895
  emit_int8(0x11);
896 897
  emit_operand(src, dst);
}
D
duke 已提交
898

899 900 901
void Assembler::adcl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD0, dst, imm32);
D
duke 已提交
902 903
}

904 905 906
void Assembler::adcl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
907
  emit_int8(0x13);
908
  emit_operand(dst, src);
D
duke 已提交
909 910
}

911 912 913
void Assembler::adcl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x13, 0xC0, dst, src);
D
duke 已提交
914 915
}

916 917 918 919
void Assembler::addl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rax, dst, imm32);
D
duke 已提交
920 921
}

922 923 924
void Assembler::addl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
925
  emit_int8(0x01);
926
  emit_operand(src, dst);
D
duke 已提交
927 928
}

929 930 931 932
void Assembler::addl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC0, dst, imm32);
}
D
duke 已提交
933

934 935 936
void Assembler::addl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
937
  emit_int8(0x03);
938
  emit_operand(dst, src);
D
duke 已提交
939 940
}

941 942 943
void Assembler::addl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
D
duke 已提交
944 945
}

946
void Assembler::addr_nop_4() {
947
  assert(UseAddressNop, "no CPU support");
948
  // 4 bytes: NOP DWORD PTR [EAX+0]
949 950 951 952
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
953 954
}

955
void Assembler::addr_nop_5() {
956
  assert(UseAddressNop, "no CPU support");
957
  // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
958 959 960 961 962
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
963 964
}

965
void Assembler::addr_nop_7() {
966
  assert(UseAddressNop, "no CPU support");
967
  // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
968 969 970 971
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x80);
                   // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
972
  emit_int32(0);   // 32-bits offset (4 bytes)
D
duke 已提交
973 974
}

975
void Assembler::addr_nop_8() {
976
  assert(UseAddressNop, "no CPU support");
977
  // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
978 979 980 981 982
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x84);
                   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
983
  emit_int32(0);   // 32-bits offset (4 bytes)
984 985 986 987
}

void Assembler::addsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
988
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
989 990 991 992
}

void Assembler::addsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
993
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
D
duke 已提交
994 995
}

996 997
void Assembler::addss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
998
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
999 1000 1001 1002
}

void Assembler::addss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1003
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1004 1005
}

1006 1007 1008 1009
void Assembler::aesdec(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1010
  emit_int8((unsigned char)0xDE);
1011 1012 1013 1014 1015 1016
  emit_operand(dst, src);
}

void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1017 1018
  emit_int8((unsigned char)0xDE);
  emit_int8(0xC0 | encode);
1019 1020 1021 1022 1023 1024
}

void Assembler::aesdeclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1025
  emit_int8((unsigned char)0xDF);
1026 1027 1028 1029 1030 1031
  emit_operand(dst, src);
}

void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1032 1033
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)(0xC0 | encode));
1034 1035 1036 1037 1038 1039
}

void Assembler::aesenc(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1040
  emit_int8((unsigned char)0xDC);
1041 1042 1043 1044 1045 1046
  emit_operand(dst, src);
}

void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1047 1048
  emit_int8((unsigned char)0xDC);
  emit_int8(0xC0 | encode);
1049 1050 1051 1052 1053 1054
}

void Assembler::aesenclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1055
  emit_int8((unsigned char)0xDD);
1056 1057 1058 1059 1060 1061
  emit_operand(dst, src);
}

void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1062 1063
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)(0xC0 | encode));
1064 1065 1066
}


K
kvn 已提交
1067 1068 1069
void Assembler::andl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1070
  emit_int8((unsigned char)0x81);
K
kvn 已提交
1071
  emit_operand(rsp, dst, 4);
1072
  emit_int32(imm32);
K
kvn 已提交
1073 1074
}

1075
void Assembler::andl(Register dst, int32_t imm32) {
D
duke 已提交
1076
  prefix(dst);
1077
  emit_arith(0x81, 0xE0, dst, imm32);
D
duke 已提交
1078 1079
}

1080
void Assembler::andl(Register dst, Address src) {
D
duke 已提交
1081
  InstructionMark im(this);
1082
  prefix(src, dst);
1083
  emit_int8(0x23);
1084
  emit_operand(dst, src);
D
duke 已提交
1085 1086
}

1087 1088 1089
void Assembler::andl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x23, 0xC0, dst, src);
D
duke 已提交
1090 1091
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
void Assembler::andnl(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::andnl(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}

1107 1108
void Assembler::bsfl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1109 1110 1111
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
1112 1113 1114 1115
}

void Assembler::bsrl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1116 1117 1118
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1119 1120
}

1121 1122
void Assembler::bswapl(Register reg) { // bswap
  int encode = prefix_and_encode(reg->encoding());
1123 1124
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
1125 1126
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
void Assembler::blsil(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsil(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}

void Assembler::blsmskl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsmskl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}

void Assembler::blsrl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsrl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
void Assembler::call(Label& L, relocInfo::relocType rtype) {
  // suspect disp32 is always good
  int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);

  if (L.is_bound()) {
    const int long_size = 5;
    int offs = (int)( target(L) - pc() );
    assert(offs <= 0, "assembler error");
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
1182
    emit_int8((unsigned char)0xE8);
1183 1184 1185 1186 1187 1188
    emit_data(offs - long_size, rtype, operand);
  } else {
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
    L.add_patch_at(code(), locator());

1189
    emit_int8((unsigned char)0xE8);
1190 1191 1192 1193 1194
    emit_data(int(0), rtype, operand);
  }
}

void Assembler::call(Register dst) {
K
kvn 已提交
1195
  int encode = prefix_and_encode(dst->encoding());
1196 1197
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xD0 | encode));
1198 1199 1200 1201
}


void Assembler::call(Address adr) {
D
duke 已提交
1202
  InstructionMark im(this);
1203
  prefix(adr);
1204
  emit_int8((unsigned char)0xFF);
1205
  emit_operand(rdx, adr);
D
duke 已提交
1206 1207
}

1208 1209 1210
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  assert(entry != NULL, "call most probably wrong");
  InstructionMark im(this);
1211
  emit_int8((unsigned char)0xE8);
1212
  intptr_t disp = entry - (pc() + sizeof(int32_t));
1213 1214 1215 1216 1217 1218
  assert(is_simm32(disp), "must be 32bit offset (call2)");
  // Technically, should use call32_operand, but this format is
  // implied by the fact that we're emitting a call instruction.

  int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
  emit_data((int) disp, rspec, operand);
D
duke 已提交
1219 1220
}

1221
void Assembler::cdql() {
1222
  emit_int8((unsigned char)0x99);
1223 1224
}

1225
void Assembler::cld() {
1226
  emit_int8((unsigned char)0xFC);
1227 1228
}

1229 1230
void Assembler::cmovl(Condition cc, Register dst, Register src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1231
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1232 1233 1234
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1235 1236
}

1237 1238 1239

void Assembler::cmovl(Condition cc, Register dst, Address src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1240
  prefix(src, dst);
1241 1242
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
D
duke 已提交
1243 1244 1245
  emit_operand(dst, src);
}

1246
void Assembler::cmpb(Address dst, int imm8) {
D
duke 已提交
1247 1248
  InstructionMark im(this);
  prefix(dst);
1249
  emit_int8((unsigned char)0x80);
1250
  emit_operand(rdi, dst, 1);
1251
  emit_int8(imm8);
D
duke 已提交
1252 1253
}

1254
void Assembler::cmpl(Address dst, int32_t imm32) {
D
duke 已提交
1255
  InstructionMark im(this);
1256
  prefix(dst);
1257
  emit_int8((unsigned char)0x81);
1258
  emit_operand(rdi, dst, 4);
1259
  emit_int32(imm32);
D
duke 已提交
1260 1261
}

1262 1263 1264
void Assembler::cmpl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF8, dst, imm32);
D
duke 已提交
1265 1266
}

1267 1268 1269
void Assembler::cmpl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
D
duke 已提交
1270 1271 1272
}


1273
void Assembler::cmpl(Register dst, Address  src) {
D
duke 已提交
1274
  InstructionMark im(this);
1275
  prefix(src, dst);
1276
  emit_int8((unsigned char)0x3B);
D
duke 已提交
1277 1278 1279
  emit_operand(dst, src);
}

1280
void Assembler::cmpw(Address dst, int imm16) {
D
duke 已提交
1281
  InstructionMark im(this);
1282
  assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1283 1284
  emit_int8(0x66);
  emit_int8((unsigned char)0x81);
1285
  emit_operand(rdi, dst, 2);
1286
  emit_int16(imm16);
D
duke 已提交
1287 1288
}

1289 1290 1291 1292
// The 32-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
C
coleenp 已提交
1293 1294
  InstructionMark im(this);
  prefix(adr, reg);
1295 1296
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
C
coleenp 已提交
1297
  emit_operand(reg, adr);
D
duke 已提交
1298 1299
}

1300 1301 1302 1303
void Assembler::comisd(XMMRegister dst, Address src) {
  // NOTE: dbx seems to decode this as comiss even though the
  // 0x66 is there. Strangly ucomisd comes out correct
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1304
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
K
kvn 已提交
1305 1306 1307 1308
}

void Assembler::comisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1309
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
D
duke 已提交
1310 1311
}

1312 1313
void Assembler::comiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1314
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1315 1316
}

K
kvn 已提交
1317 1318
void Assembler::comiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1319
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
K
kvn 已提交
1320 1321
}

1322
void Assembler::cpuid() {
1323 1324
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA2);
1325 1326
}

1327 1328
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1329
  emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
D
duke 已提交
1330 1331
}

1332 1333
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1334
  emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1335 1336
}

1337 1338
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1339
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
D
duke 已提交
1340 1341
}

K
kvn 已提交
1342 1343
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1344
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1345 1346
}

1347 1348
void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1349
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
1350 1351
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1352 1353
}

K
kvn 已提交
1354 1355
void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1356
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1357 1358
}

1359 1360
void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1361
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
1362 1363
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1364 1365
}

K
kvn 已提交
1366 1367
void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1368
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1369 1370
}

1371 1372
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1373
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
D
duke 已提交
1374 1375
}

K
kvn 已提交
1376 1377
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1378
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1379 1380 1381
}


1382 1383
void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1384
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
1385 1386
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1387 1388
}

1389 1390
void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1391
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
1392 1393
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1394 1395
}

1396 1397 1398 1399
void Assembler::decl(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrement() instead.
  InstructionMark im(this);
  prefix(dst);
1400
  emit_int8((unsigned char)0xFF);
1401
  emit_operand(rcx, dst);
D
duke 已提交
1402 1403
}

1404 1405
void Assembler::divsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1406
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1407 1408
}

1409 1410
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1411
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1412 1413
}

1414 1415
void Assembler::divss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1416
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1417 1418
}

1419 1420
void Assembler::divss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1421
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1422 1423
}

1424 1425
void Assembler::emms() {
  NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1426 1427
  emit_int8(0x0F);
  emit_int8(0x77);
D
duke 已提交
1428 1429
}

1430
void Assembler::hlt() {
1431
  emit_int8((unsigned char)0xF4);
D
duke 已提交
1432 1433
}

1434 1435
void Assembler::idivl(Register src) {
  int encode = prefix_and_encode(src->encoding());
1436 1437
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
D
duke 已提交
1438 1439
}

1440 1441
void Assembler::divl(Register src) { // Unsigned
  int encode = prefix_and_encode(src->encoding());
1442 1443
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF0 | encode));
1444 1445
}

1446
void Assembler::imull(Register dst, Register src) {
D
duke 已提交
1447
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1448 1449 1450
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1451 1452 1453
}


1454
void Assembler::imull(Register dst, Register src, int value) {
D
duke 已提交
1455
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1456
  if (is8bit(value)) {
1457 1458 1459
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
1460
  } else {
1461 1462
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
1463
    emit_int32(value);
1464
  }
D
duke 已提交
1465 1466
}

1467 1468 1469 1470 1471 1472 1473 1474 1475
void Assembler::imull(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}


1476 1477
void Assembler::incl(Address dst) {
  // Don't use it directly. Use MacroAssembler::increment() instead.
D
duke 已提交
1478
  InstructionMark im(this);
1479
  prefix(dst);
1480
  emit_int8((unsigned char)0xFF);
1481
  emit_operand(rax, dst);
D
duke 已提交
1482 1483
}

1484
void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
D
duke 已提交
1485
  InstructionMark im(this);
1486 1487 1488 1489 1490 1491 1492
  assert((0 <= cc) && (cc < 16), "illegal cc");
  if (L.is_bound()) {
    address dst = target(L);
    assert(dst != NULL, "jcc most probably wrong");

    const int short_size = 2;
    const int long_size = 6;
1493
    intptr_t offs = (intptr_t)dst - (intptr_t)pc();
1494
    if (maybe_short && is8bit(offs - short_size)) {
1495
      // 0111 tttn #8-bit disp
1496 1497
      emit_int8(0x70 | cc);
      emit_int8((offs - short_size) & 0xFF);
1498 1499 1500 1501
    } else {
      // 0000 1111 1000 tttn #32-bit disp
      assert(is_simm32(offs - long_size),
             "must be 32bit offset (call4)");
1502 1503
      emit_int8(0x0F);
      emit_int8((unsigned char)(0x80 | cc));
1504
      emit_int32(offs - long_size);
1505 1506 1507 1508 1509 1510 1511
    }
  } else {
    // Note: could eliminate cond. jumps to this jump if condition
    //       is the same however, seems to be rather unlikely case.
    // Note: use jccb() if label to be bound is very close to get
    //       an 8-bit displacement
    L.add_patch_at(code(), locator());
1512 1513
    emit_int8(0x0F);
    emit_int8((unsigned char)(0x80 | cc));
1514
    emit_int32(0);
1515
  }
D
duke 已提交
1516 1517
}

1518 1519 1520 1521
void Assembler::jccb(Condition cc, Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
1522
#ifdef ASSERT
1523
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1524 1525 1526 1527 1528 1529
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1530
    intptr_t offs = (intptr_t)entry - (intptr_t)pc();
1531
    // 0111 tttn #8-bit disp
1532 1533
    emit_int8(0x70 | cc);
    emit_int8((offs - short_size) & 0xFF);
1534 1535 1536
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1537 1538
    emit_int8(0x70 | cc);
    emit_int8(0);
1539
  }
D
duke 已提交
1540 1541
}

1542
void Assembler::jmp(Address adr) {
D
duke 已提交
1543
  InstructionMark im(this);
1544
  prefix(adr);
1545
  emit_int8((unsigned char)0xFF);
1546
  emit_operand(rsp, adr);
D
duke 已提交
1547 1548
}

1549
void Assembler::jmp(Label& L, bool maybe_short) {
1550 1551 1552 1553 1554 1555
  if (L.is_bound()) {
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
    InstructionMark im(this);
    const int short_size = 2;
    const int long_size = 5;
1556
    intptr_t offs = entry - pc();
1557
    if (maybe_short && is8bit(offs - short_size)) {
1558 1559
      emit_int8((unsigned char)0xEB);
      emit_int8((offs - short_size) & 0xFF);
1560
    } else {
1561
      emit_int8((unsigned char)0xE9);
1562
      emit_int32(offs - long_size);
1563 1564 1565 1566 1567 1568 1569 1570
    }
  } else {
    // By default, forward jumps are always 32-bit displacements, since
    // we can't yet know where the label will be bound.  If you're sure that
    // the forward jump will not run beyond 256 bytes, use jmpb to
    // force an 8-bit displacement.
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1571
    emit_int8((unsigned char)0xE9);
1572
    emit_int32(0);
1573
  }
D
duke 已提交
1574 1575
}

1576 1577
void Assembler::jmp(Register entry) {
  int encode = prefix_and_encode(entry->encoding());
1578 1579
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
1580 1581
}

1582
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
D
duke 已提交
1583
  InstructionMark im(this);
1584
  emit_int8((unsigned char)0xE9);
1585
  assert(dest != NULL, "must have a target");
1586
  intptr_t disp = dest - (pc() + sizeof(int32_t));
1587 1588
  assert(is_simm32(disp), "must be 32bit offset (jmp)");
  emit_data(disp, rspec.reloc(), call32_operand);
D
duke 已提交
1589 1590
}

1591 1592 1593 1594 1595
void Assembler::jmpb(Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
1596
#ifdef ASSERT
1597
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1598 1599 1600 1601 1602 1603
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1604
    intptr_t offs = entry - pc();
1605 1606
    emit_int8((unsigned char)0xEB);
    emit_int8((offs - short_size) & 0xFF);
1607 1608 1609
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1610 1611
    emit_int8((unsigned char)0xEB);
    emit_int8(0);
1612
  }
D
duke 已提交
1613 1614
}

1615 1616
void Assembler::ldmxcsr( Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
D
duke 已提交
1617
  InstructionMark im(this);
1618
  prefix(src);
1619 1620
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
1621
  emit_operand(as_Register(2), src);
D
duke 已提交
1622 1623
}

1624
void Assembler::leal(Register dst, Address src) {
D
duke 已提交
1625
  InstructionMark im(this);
1626
#ifdef _LP64
1627
  emit_int8(0x67); // addr32
1628 1629
  prefix(src, dst);
#endif // LP64
1630
  emit_int8((unsigned char)0x8D);
1631
  emit_operand(dst, src);
D
duke 已提交
1632 1633
}

1634
void Assembler::lfence() {
1635 1636 1637
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xE8);
1638 1639
}

1640
void Assembler::lock() {
1641
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1642 1643
}

1644 1645
void Assembler::lzcntl(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
1646
  emit_int8((unsigned char)0xF3);
1647
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1648 1649 1650
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1651 1652
}

1653
// Emit mfence instruction
1654
void Assembler::mfence() {
1655
  NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1656 1657 1658
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1659 1660
}

1661 1662
void Assembler::mov(Register dst, Register src) {
  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
D
duke 已提交
1663 1664
}

1665 1666
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1667
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
D
duke 已提交
1668 1669
}

1670 1671
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1672
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
1673 1674
}

1675 1676 1677
void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
1678 1679
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
1680 1681
}

1682 1683
void Assembler::movb(Register dst, Address src) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
D
duke 已提交
1684
  InstructionMark im(this);
1685
  prefix(src, dst, true);
1686
  emit_int8((unsigned char)0x8A);
D
duke 已提交
1687 1688 1689 1690
  emit_operand(dst, src);
}


1691
void Assembler::movb(Address dst, int imm8) {
D
duke 已提交
1692
  InstructionMark im(this);
1693
   prefix(dst);
1694
  emit_int8((unsigned char)0xC6);
1695
  emit_operand(rax, dst, 1);
1696
  emit_int8(imm8);
D
duke 已提交
1697 1698
}

1699 1700 1701

void Assembler::movb(Address dst, Register src) {
  assert(src->has_byte_register(), "must have byte register");
D
duke 已提交
1702
  InstructionMark im(this);
1703
  prefix(dst, src, true);
1704
  emit_int8((unsigned char)0x88);
D
duke 已提交
1705 1706 1707
  emit_operand(src, dst);
}

1708 1709
void Assembler::movdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1710
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
1711 1712
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1713 1714
}

1715 1716 1717
void Assembler::movdl(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // swap src/dst to get correct prefix
K
kvn 已提交
1718
  int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
1719 1720
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
1721 1722
}

1723 1724 1725
void Assembler::movdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1726
  simd_prefix(dst, src, VEX_SIMD_66);
1727
  emit_int8(0x6E);
1728 1729 1730
  emit_operand(dst, src);
}

1731 1732 1733 1734
void Assembler::movdl(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
1735
  emit_int8(0x7E);
1736 1737 1738
  emit_operand(src, dst);
}

1739 1740
void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1741
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
D
duke 已提交
1742 1743
}

1744 1745 1746 1747 1748
void Assembler::movdqa(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}

1749 1750
void Assembler::movdqu(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1751
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1752 1753 1754 1755
}

void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1756
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1757 1758 1759 1760 1761
}

void Assembler::movdqu(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1762
  simd_prefix(dst, src, VEX_SIMD_F3);
1763
  emit_int8(0x7F);
1764 1765 1766
  emit_operand(src, dst);
}

1767 1768
// Move Unaligned 256bit Vector
void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
1769
  assert(UseAVX > 0, "");
1770 1771
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1772 1773
  emit_int8(0x6F);
  emit_int8((unsigned char)(0xC0 | encode));
1774 1775 1776
}

void Assembler::vmovdqu(XMMRegister dst, Address src) {
1777
  assert(UseAVX > 0, "");
1778 1779 1780
  InstructionMark im(this);
  bool vector256 = true;
  vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1781
  emit_int8(0x6F);
1782 1783 1784 1785
  emit_operand(dst, src);
}

void Assembler::vmovdqu(Address dst, XMMRegister src) {
1786
  assert(UseAVX > 0, "");
1787 1788 1789 1790 1791
  InstructionMark im(this);
  bool vector256 = true;
  // swap src<->dst for encoding
  assert(src != xnoreg, "sanity");
  vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
1792
  emit_int8(0x7F);
1793 1794 1795
  emit_operand(src, dst);
}

1796 1797 1798 1799
// Uses zero extension on 64bit

void Assembler::movl(Register dst, int32_t imm32) {
  int encode = prefix_and_encode(dst->encoding());
1800
  emit_int8((unsigned char)(0xB8 | encode));
1801
  emit_int32(imm32);
D
duke 已提交
1802 1803
}

1804 1805
void Assembler::movl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1806 1807
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1808 1809
}

1810
void Assembler::movl(Register dst, Address src) {
D
duke 已提交
1811
  InstructionMark im(this);
1812
  prefix(src, dst);
1813
  emit_int8((unsigned char)0x8B);
D
duke 已提交
1814 1815 1816
  emit_operand(dst, src);
}

1817 1818 1819
void Assembler::movl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1820
  emit_int8((unsigned char)0xC7);
1821
  emit_operand(rax, dst, 4);
1822
  emit_int32(imm32);
D
duke 已提交
1823 1824
}

1825 1826 1827
void Assembler::movl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
1828
  emit_int8((unsigned char)0x89);
1829
  emit_operand(src, dst);
D
duke 已提交
1830 1831
}

1832 1833 1834 1835 1836
// New cpus require to use movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
void Assembler::movlpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1837
  emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
D
duke 已提交
1838 1839
}

1840 1841
void Assembler::movq( MMXRegister dst, Address src ) {
  assert( VM_Version::supports_mmx(), "" );
1842 1843
  emit_int8(0x0F);
  emit_int8(0x6F);
1844
  emit_operand(dst, src);
D
duke 已提交
1845 1846
}

1847 1848
void Assembler::movq( Address dst, MMXRegister src ) {
  assert( VM_Version::supports_mmx(), "" );
1849 1850
  emit_int8(0x0F);
  emit_int8(0x7F);
1851 1852 1853 1854 1855 1856 1857
  // workaround gcc (3.2.1-7a) bug
  // In that version of gcc with only an emit_operand(MMX, Address)
  // gcc will tail jump and try and reverse the parameters completely
  // obliterating dst in the process. By having a version available
  // that doesn't need to swap the args at the tail jump the bug is
  // avoided.
  emit_operand(dst, src);
D
duke 已提交
1858 1859
}

1860 1861
void Assembler::movq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1862
  InstructionMark im(this);
K
kvn 已提交
1863
  simd_prefix(dst, src, VEX_SIMD_F3);
1864
  emit_int8(0x7E);
D
duke 已提交
1865 1866 1867
  emit_operand(dst, src);
}

1868 1869
void Assembler::movq(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1870
  InstructionMark im(this);
K
kvn 已提交
1871
  simd_prefix(dst, src, VEX_SIMD_66);
1872
  emit_int8((unsigned char)0xD6);
1873
  emit_operand(src, dst);
D
duke 已提交
1874 1875
}

1876
void Assembler::movsbl(Register dst, Address src) { // movsxb
D
duke 已提交
1877
  InstructionMark im(this);
1878
  prefix(src, dst);
1879 1880
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
1881
  emit_operand(dst, src);
D
duke 已提交
1882 1883
}

1884 1885 1886
void Assembler::movsbl(Register dst, Register src) { // movsxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1887 1888 1889
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1890 1891
}

1892 1893
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1894
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1895 1896
}

1897 1898
void Assembler::movsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1899
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1900 1901
}

1902 1903
void Assembler::movsd(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1904
  InstructionMark im(this);
K
kvn 已提交
1905
  simd_prefix(dst, src, VEX_SIMD_F2);
1906
  emit_int8(0x11);
1907
  emit_operand(src, dst);
D
duke 已提交
1908 1909
}

1910 1911
void Assembler::movss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1912
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1913 1914
}

1915 1916
void Assembler::movss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1917
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1918 1919
}

1920 1921 1922
void Assembler::movss(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
K
kvn 已提交
1923
  simd_prefix(dst, src, VEX_SIMD_F3);
1924
  emit_int8(0x11);
1925
  emit_operand(src, dst);
D
duke 已提交
1926 1927
}

1928
void Assembler::movswl(Register dst, Address src) { // movsxw
D
duke 已提交
1929
  InstructionMark im(this);
1930
  prefix(src, dst);
1931 1932
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
D
duke 已提交
1933 1934 1935
  emit_operand(dst, src);
}

1936
void Assembler::movswl(Register dst, Register src) { // movsxw
D
duke 已提交
1937
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1938 1939 1940
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1941 1942
}

1943 1944
void Assembler::movw(Address dst, int imm16) {
  InstructionMark im(this);
D
duke 已提交
1945

1946
  emit_int8(0x66); // switch to 16-bit mode
1947
  prefix(dst);
1948
  emit_int8((unsigned char)0xC7);
1949
  emit_operand(rax, dst, 2);
1950
  emit_int16(imm16);
D
duke 已提交
1951 1952
}

1953
void Assembler::movw(Register dst, Address src) {
D
duke 已提交
1954
  InstructionMark im(this);
1955
  emit_int8(0x66);
1956
  prefix(src, dst);
1957
  emit_int8((unsigned char)0x8B);
1958
  emit_operand(dst, src);
D
duke 已提交
1959 1960
}

1961 1962
void Assembler::movw(Address dst, Register src) {
  InstructionMark im(this);
1963
  emit_int8(0x66);
1964
  prefix(dst, src);
1965
  emit_int8((unsigned char)0x89);
1966
  emit_operand(src, dst);
D
duke 已提交
1967 1968
}

1969
void Assembler::movzbl(Register dst, Address src) { // movzxb
D
duke 已提交
1970
  InstructionMark im(this);
1971
  prefix(src, dst);
1972 1973
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
1974
  emit_operand(dst, src);
D
duke 已提交
1975 1976
}

1977 1978 1979
void Assembler::movzbl(Register dst, Register src) { // movzxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1980 1981 1982
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
D
duke 已提交
1983 1984
}

1985 1986 1987
void Assembler::movzwl(Register dst, Address src) { // movzxw
  InstructionMark im(this);
  prefix(src, dst);
1988 1989
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
1990
  emit_operand(dst, src);
D
duke 已提交
1991 1992
}

1993
void Assembler::movzwl(Register dst, Register src) { // movzxw
D
duke 已提交
1994
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1995 1996 1997
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8(0xC0 | encode);
D
duke 已提交
1998 1999 2000 2001 2002
}

void Assembler::mull(Address src) {
  InstructionMark im(this);
  prefix(src);
2003
  emit_int8((unsigned char)0xF7);
D
duke 已提交
2004 2005 2006 2007 2008
  emit_operand(rsp, src);
}

void Assembler::mull(Register src) {
  int encode = prefix_and_encode(src->encoding());
2009 2010
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
2011 2012
}

2013 2014
void Assembler::mulsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2015
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
2016 2017
}

2018 2019
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2020
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
2021 2022
}

2023 2024
void Assembler::mulss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2025
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
2026 2027
}

2028 2029
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2030
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
2031 2032
}

2033
void Assembler::negl(Register dst) {
D
duke 已提交
2034
  int encode = prefix_and_encode(dst->encoding());
2035 2036
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
D
duke 已提交
2037 2038
}

2039 2040 2041 2042 2043 2044 2045
void Assembler::nop(int i) {
#ifdef ASSERT
  assert(i > 0, " ");
  // The fancy nops aren't currently recognized by debuggers making it a
  // pain to disassemble code while debugging. If asserts are on clearly
  // speed is not an issue so simply use the single byte traditional nop
  // to do alignment.
D
duke 已提交
2046

2047
  for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
2048
  return;
D
duke 已提交
2049

2050
#endif // ASSERT
D
duke 已提交
2051

2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
  if (UseAddressNop && VM_Version::is_intel()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
D
duke 已提交
2066

2067
    // The rest coding is Intel specific - don't use consecutive address nops
D
duke 已提交
2068

2069 2070 2071 2072
    // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
D
duke 已提交
2073

2074 2075 2076
    while(i >= 15) {
      // For Intel don't generate consecutive addess nops (mix with regular nops)
      i -= 15;
2077 2078 2079
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
2080
      addr_nop_8();
2081 2082 2083 2084 2085
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8((unsigned char)0x90);
                         // nop
2086 2087 2088
    }
    switch (i) {
      case 14:
2089
        emit_int8(0x66); // size prefix
2090
      case 13:
2091
        emit_int8(0x66); // size prefix
2092 2093
      case 12:
        addr_nop_8();
2094 2095 2096 2097 2098
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8((unsigned char)0x90);
                         // nop
2099 2100
        break;
      case 11:
2101
        emit_int8(0x66); // size prefix
2102
      case 10:
2103
        emit_int8(0x66); // size prefix
2104
      case 9:
2105
        emit_int8(0x66); // size prefix
2106 2107 2108 2109 2110 2111 2112
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2113
        emit_int8(0x66); // size prefix
2114 2115 2116 2117 2118 2119 2120 2121
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2122
        emit_int8(0x66); // size prefix
2123
      case 2:
2124
        emit_int8(0x66); // size prefix
2125
      case 1:
2126 2127
        emit_int8((unsigned char)0x90);
                         // nop
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }
  if (UseAddressNop && VM_Version::is_amd()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00

    // The rest coding is AMD specific - use consecutive address nops

    // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //     Size prefixes (0x66) are added for larger sizes

    while(i >= 22) {
      i -= 11;
2160 2161 2162
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
2163 2164 2165 2166 2167 2168
      addr_nop_8();
    }
    // Generate first nop for size between 21-12
    switch (i) {
      case 21:
        i -= 1;
2169
        emit_int8(0x66); // size prefix
2170 2171 2172
      case 20:
      case 19:
        i -= 1;
2173
        emit_int8(0x66); // size prefix
2174 2175 2176
      case 18:
      case 17:
        i -= 1;
2177
        emit_int8(0x66); // size prefix
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
      case 16:
      case 15:
        i -= 8;
        addr_nop_8();
        break;
      case 14:
      case 13:
        i -= 7;
        addr_nop_7();
        break;
      case 12:
        i -= 6;
2190
        emit_int8(0x66); // size prefix
2191 2192 2193 2194 2195 2196 2197 2198 2199
        addr_nop_5();
        break;
      default:
        assert(i < 12, " ");
    }

    // Generate second nop for size between 11-1
    switch (i) {
      case 11:
2200
        emit_int8(0x66); // size prefix
2201
      case 10:
2202
        emit_int8(0x66); // size prefix
2203
      case 9:
2204
        emit_int8(0x66); // size prefix
2205 2206 2207 2208 2209 2210 2211
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2212
        emit_int8(0x66); // size prefix
2213 2214 2215 2216 2217 2218 2219 2220
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2221
        emit_int8(0x66); // size prefix
2222
      case 2:
2223
        emit_int8(0x66); // size prefix
2224
      case 1:
2225 2226
        emit_int8((unsigned char)0x90);
                         // nop
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }

  // Using nops with size prefixes "0x66 0x90".
  // From AMD Optimization Guide:
  //  1: 0x90
  //  2: 0x66 0x90
  //  3: 0x66 0x66 0x90
  //  4: 0x66 0x66 0x66 0x90
  //  5: 0x66 0x66 0x90 0x66 0x90
  //  6: 0x66 0x66 0x90 0x66 0x66 0x90
  //  7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
  //  8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
  //  9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  //
  while(i > 12) {
    i -= 4;
2249 2250 2251 2252 2253
    emit_int8(0x66); // size prefix
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
                     // nop
2254 2255 2256 2257 2258
  }
  // 1 - 12 nops
  if(i > 8) {
    if(i > 9) {
      i -= 1;
2259
      emit_int8(0x66);
2260 2261
    }
    i -= 3;
2262 2263 2264
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2265 2266 2267 2268 2269
  }
  // 1 - 8 nops
  if(i > 4) {
    if(i > 6) {
      i -= 1;
2270
      emit_int8(0x66);
2271 2272
    }
    i -= 3;
2273 2274 2275
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2276 2277 2278
  }
  switch (i) {
    case 4:
2279
      emit_int8(0x66);
2280
    case 3:
2281
      emit_int8(0x66);
2282
    case 2:
2283
      emit_int8(0x66);
2284
    case 1:
2285
      emit_int8((unsigned char)0x90);
2286 2287 2288 2289
      break;
    default:
      assert(i == 0, " ");
  }
D
duke 已提交
2290 2291
}

2292 2293
void Assembler::notl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2294 2295
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
D
duke 已提交
2296 2297
}

2298
void Assembler::orl(Address dst, int32_t imm32) {
D
duke 已提交
2299
  InstructionMark im(this);
2300
  prefix(dst);
2301
  emit_arith_operand(0x81, rcx, dst, imm32);
D
duke 已提交
2302 2303
}

2304 2305 2306
void Assembler::orl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC8, dst, imm32);
D
duke 已提交
2307 2308
}

2309
void Assembler::orl(Register dst, Address src) {
D
duke 已提交
2310
  InstructionMark im(this);
2311
  prefix(src, dst);
2312
  emit_int8(0x0B);
D
duke 已提交
2313 2314 2315
  emit_operand(dst, src);
}

2316 2317 2318
void Assembler::orl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
D
duke 已提交
2319 2320
}

2321 2322 2323 2324 2325 2326 2327
void Assembler::orl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x09);
  emit_operand(src, dst);
}

K
kvn 已提交
2328 2329 2330
void Assembler::packuswb(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2331
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2332 2333 2334 2335
}

void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2336
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2337
}
C
cfang 已提交
2338

2339 2340 2341 2342 2343 2344
void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
2345 2346 2347 2348 2349
  assert(VM_Version::supports_avx2(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
  emit_int8(0x00);
  emit_int8(0xC0 | encode);
  emit_int8(imm8);
2350 2351
}

2352 2353 2354 2355 2356
void Assembler::pause() {
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)0x90);
}

C
cfang 已提交
2357 2358 2359
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
  InstructionMark im(this);
K
kvn 已提交
2360
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2361
  emit_int8(0x61);
C
cfang 已提交
2362
  emit_operand(dst, src);
2363
  emit_int8(imm8);
C
cfang 已提交
2364 2365 2366 2367
}

void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
2368
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2369 2370 2371
  emit_int8(0x61);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
C
cfang 已提交
2372 2373
}

2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

K
kvn 已提交
2406 2407 2408 2409
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2410
  emit_int8(0x30);
K
kvn 已提交
2411 2412 2413 2414 2415
  emit_operand(dst, src);
}

void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2416
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2417 2418
  emit_int8(0x30);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
2419 2420
}

2421 2422
// generic
void Assembler::pop(Register dst) {
D
duke 已提交
2423
  int encode = prefix_and_encode(dst->encoding());
2424
  emit_int8(0x58 | encode);
D
duke 已提交
2425 2426
}

2427 2428 2429
void Assembler::popcntl(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
2430
  emit_int8((unsigned char)0xF3);
2431
  prefix(src, dst);
2432 2433
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
2434 2435 2436 2437 2438
  emit_operand(dst, src);
}

void Assembler::popcntl(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
2439
  emit_int8((unsigned char)0xF3);
2440
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
2441 2442 2443
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
2444 2445
}

2446
void Assembler::popf() {
2447
  emit_int8((unsigned char)0x9D);
D
duke 已提交
2448 2449
}

R
roland 已提交
2450
#ifndef _LP64 // no 32bit push/pop on amd64
2451 2452 2453 2454
void Assembler::popl(Address dst) {
  // NOTE: this will adjust stack by 8byte on 64bits
  InstructionMark im(this);
  prefix(dst);
2455
  emit_int8((unsigned char)0x8F);
2456
  emit_operand(rax, dst);
D
duke 已提交
2457
}
R
roland 已提交
2458
#endif
D
duke 已提交
2459

2460 2461
void Assembler::prefetch_prefix(Address src) {
  prefix(src);
2462
  emit_int8(0x0F);
D
duke 已提交
2463 2464
}

2465
void Assembler::prefetchnta(Address src) {
2466
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2467 2468
  InstructionMark im(this);
  prefetch_prefix(src);
2469
  emit_int8(0x18);
2470
  emit_operand(rax, src); // 0, src
D
duke 已提交
2471 2472
}

2473
void Assembler::prefetchr(Address src) {
2474
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
2475 2476
  InstructionMark im(this);
  prefetch_prefix(src);
2477
  emit_int8(0x0D);
2478
  emit_operand(rax, src); // 0, src
D
duke 已提交
2479 2480
}

2481 2482 2483 2484
void Assembler::prefetcht0(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2485
  emit_int8(0x18);
2486
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2487 2488
}

2489 2490
void Assembler::prefetcht1(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
D
duke 已提交
2491
  InstructionMark im(this);
2492
  prefetch_prefix(src);
2493
  emit_int8(0x18);
2494
  emit_operand(rdx, src); // 2, src
D
duke 已提交
2495 2496
}

2497 2498 2499 2500
void Assembler::prefetcht2(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2501
  emit_int8(0x18);
2502
  emit_operand(rbx, src); // 3, src
D
duke 已提交
2503 2504
}

2505
void Assembler::prefetchw(Address src) {
2506
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
D
duke 已提交
2507
  InstructionMark im(this);
2508
  prefetch_prefix(src);
2509
  emit_int8(0x0D);
2510
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2511 2512
}

2513
void Assembler::prefix(Prefix p) {
2514
  emit_int8(p);
D
duke 已提交
2515 2516
}

2517 2518 2519
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_ssse3(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2520 2521
  emit_int8(0x00);
  emit_int8((unsigned char)(0xC0 | encode));
2522 2523 2524 2525 2526 2527
}

void Assembler::pshufb(XMMRegister dst, Address src) {
  assert(VM_Version::supports_ssse3(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2528
  emit_int8(0x00);
2529 2530 2531
  emit_operand(dst, src);
}

2532 2533 2534
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2535
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
2536
  emit_int8(mode & 0xFF);
2537

D
duke 已提交
2538 2539
}

2540 2541 2542
void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2543
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2544
  InstructionMark im(this);
K
kvn 已提交
2545
  simd_prefix(dst, src, VEX_SIMD_66);
2546
  emit_int8(0x70);
2547
  emit_operand(dst, src);
2548
  emit_int8(mode & 0xFF);
D
duke 已提交
2549 2550
}

2551 2552 2553
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2554
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
2555
  emit_int8(mode & 0xFF);
D
duke 已提交
2556 2557
}

2558 2559 2560
void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2561
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2562
  InstructionMark im(this);
K
kvn 已提交
2563
  simd_prefix(dst, src, VEX_SIMD_F2);
2564
  emit_int8(0x70);
D
duke 已提交
2565
  emit_operand(dst, src);
2566
  emit_int8(mode & 0xFF);
D
duke 已提交
2567 2568
}

2569 2570 2571
void Assembler::psrldq(XMMRegister dst, int shift) {
  // Shift 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2572
  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
2573 2574 2575
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
2576 2577
}

2578 2579 2580 2581 2582 2583 2584 2585 2586
void Assembler::pslldq(XMMRegister dst, int shift) {
  // Shift left 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66);
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
}

C
cfang 已提交
2587 2588
void Assembler::ptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
K
kvn 已提交
2589
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
C
cfang 已提交
2590
  InstructionMark im(this);
K
kvn 已提交
2591
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2592
  emit_int8(0x17);
C
cfang 已提交
2593 2594 2595 2596 2597
  emit_operand(dst, src);
}

void Assembler::ptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2598
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2599 2600
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
C
cfang 已提交
2601 2602
}

2603 2604 2605 2606 2607 2608 2609
void Assembler::vptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
2610
  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
  emit_int8(0x17);
  emit_operand(dst, src);
}

void Assembler::vptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
}

K
kvn 已提交
2623 2624 2625
void Assembler::punpcklbw(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2626
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
K
kvn 已提交
2627 2628
}

2629 2630
void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2631
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
D
duke 已提交
2632 2633
}

K
kvn 已提交
2634 2635 2636
void Assembler::punpckldq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2637
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2638 2639 2640 2641
}

void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2642
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2643 2644
}

K
kvn 已提交
2645 2646
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2647
  emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
K
kvn 已提交
2648 2649
}

2650 2651 2652
void Assembler::push(int32_t imm32) {
  // in 64bits we push 64bits onto the stack but only
  // take a 32bit immediate
2653
  emit_int8(0x68);
2654
  emit_int32(imm32);
D
duke 已提交
2655 2656
}

2657 2658 2659
void Assembler::push(Register src) {
  int encode = prefix_and_encode(src->encoding());

2660
  emit_int8(0x50 | encode);
D
duke 已提交
2661 2662
}

2663
void Assembler::pushf() {
2664
  emit_int8((unsigned char)0x9C);
D
duke 已提交
2665 2666
}

R
roland 已提交
2667
#ifndef _LP64 // no 32bit push/pop on amd64
2668 2669 2670 2671
void Assembler::pushl(Address src) {
  // Note this will push 64bit on 64bit
  InstructionMark im(this);
  prefix(src);
2672
  emit_int8((unsigned char)0xFF);
2673
  emit_operand(rsi, src);
D
duke 已提交
2674
}
R
roland 已提交
2675
#endif
D
duke 已提交
2676

2677 2678 2679 2680
void Assembler::rcll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1) {
2681 2682
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
2683
  } else {
2684 2685 2686
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)0xD0 | encode);
    emit_int8(imm8);
2687
  }
D
duke 已提交
2688 2689
}

2690 2691 2692 2693 2694
void Assembler::rdtsc() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x31);
}

2695 2696 2697
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
void Assembler::rep_mov() {
2698
  emit_int8((unsigned char)0xF3);
2699 2700
  // MOVSQ
  LP64_ONLY(prefix(REX_W));
2701
  emit_int8((unsigned char)0xA5);
D
duke 已提交
2702 2703
}

2704 2705 2706 2707 2708 2709 2710
// sets rcx bytes with rax, value at [edi]
void Assembler::rep_stosb() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xAA); // STOSB
}

2711 2712
// sets rcx pointer sized words with rax, value at [edi]
// generic
2713 2714 2715
void Assembler::rep_stos() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));       // LP64:STOSQ, LP32:STOSD
2716
  emit_int8((unsigned char)0xAB);
2717 2718 2719 2720 2721
}

// scans rcx pointer sized words at [edi] for occurance of rax,
// generic
void Assembler::repne_scan() { // repne_scan
2722
  emit_int8((unsigned char)0xF2);
2723 2724
  // SCASQ
  LP64_ONLY(prefix(REX_W));
2725
  emit_int8((unsigned char)0xAF);
2726 2727 2728 2729 2730 2731
}

#ifdef _LP64
// scans rcx 4 byte words at [edi] for occurance of rax,
// generic
void Assembler::repne_scanl() { // repne_scan
2732
  emit_int8((unsigned char)0xF2);
2733
  // SCASL
2734
  emit_int8((unsigned char)0xAF);
2735 2736 2737 2738 2739
}
#endif

void Assembler::ret(int imm16) {
  if (imm16 == 0) {
2740
    emit_int8((unsigned char)0xC3);
2741
  } else {
2742
    emit_int8((unsigned char)0xC2);
2743
    emit_int16(imm16);
2744 2745 2746 2747 2748 2749 2750 2751
  }
}

void Assembler::sahf() {
#ifdef _LP64
  // Not supported in 64bit mode
  ShouldNotReachHere();
#endif
2752
  emit_int8((unsigned char)0x9E);
2753 2754 2755 2756 2757 2758
}

void Assembler::sarl(Register dst, int imm8) {
  int encode = prefix_and_encode(dst->encoding());
  assert(isShiftCount(imm8), "illegal shift count");
  if (imm8 == 1) {
2759 2760
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
2761
  } else {
2762 2763 2764
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
2765 2766 2767 2768 2769
  }
}

void Assembler::sarl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2770 2771
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
}

void Assembler::sbbl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}

void Assembler::sbbl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD8, dst, imm32);
}


void Assembler::sbbl(Register dst, Address src) {
D
duke 已提交
2787 2788
  InstructionMark im(this);
  prefix(src, dst);
2789
  emit_int8(0x1B);
D
duke 已提交
2790 2791 2792
  emit_operand(dst, src);
}

2793 2794 2795
void Assembler::sbbl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
D
duke 已提交
2796 2797
}

2798 2799 2800
void Assembler::setb(Condition cc, Register dst) {
  assert(0 <= cc && cc < 16, "illegal cc");
  int encode = prefix_and_encode(dst->encoding(), true);
2801 2802 2803
  emit_int8(0x0F);
  emit_int8((unsigned char)0x90 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
2804 2805
}

2806 2807 2808 2809
void Assembler::shll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1 ) {
2810 2811
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
2812
  } else {
2813 2814 2815
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
2816
  }
D
duke 已提交
2817 2818
}

2819 2820
void Assembler::shll(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2821 2822
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
2823 2824 2825 2826 2827
}

void Assembler::shrl(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
2828 2829 2830
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
2831 2832 2833 2834
}

void Assembler::shrl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2835 2836
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE8 | encode));
2837 2838 2839 2840
}

// copies a single word from [esi] to [edi]
void Assembler::smovl() {
2841
  emit_int8((unsigned char)0xA5);
2842 2843 2844 2845
}

void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2846
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
D
duke 已提交
2847 2848
}

2849 2850
void Assembler::sqrtsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2851
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2852 2853 2854
}

void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
K
kvn 已提交
2855
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2856
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2857 2858
}

2859
void Assembler::std() {
2860
  emit_int8((unsigned char)0xFD);
2861 2862
}

2863
void Assembler::sqrtss(XMMRegister dst, Address src) {
K
kvn 已提交
2864
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2865
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2866 2867
}

2868 2869 2870 2871
void Assembler::stmxcsr( Address dst) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  prefix(dst);
2872 2873
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
2874
  emit_operand(as_Register(3), dst);
D
duke 已提交
2875 2876
}

2877 2878 2879
void Assembler::subl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
2880
  emit_arith_operand(0x81, rbp, dst, imm32);
2881 2882 2883 2884 2885
}

void Assembler::subl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
2886
  emit_int8(0x29);
2887 2888 2889
  emit_operand(src, dst);
}

2890 2891 2892 2893 2894
void Assembler::subl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xE8, dst, imm32);
}

2895 2896 2897 2898 2899 2900
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subl_imm32(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

2901
void Assembler::subl(Register dst, Address src) {
D
duke 已提交
2902 2903
  InstructionMark im(this);
  prefix(src, dst);
2904
  emit_int8(0x2B);
D
duke 已提交
2905 2906 2907
  emit_operand(dst, src);
}

2908 2909 2910 2911 2912 2913 2914
void Assembler::subl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::subsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2915
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2916 2917
}

2918 2919
void Assembler::subsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2920
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2921 2922
}

2923 2924
void Assembler::subss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2925
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2926 2927
}

2928 2929
void Assembler::subss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2930
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2931 2932
}

2933 2934 2935 2936
void Assembler::testb(Register dst, int imm8) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  (void) prefix_and_encode(dst->encoding(), true);
  emit_arith_b(0xF6, 0xC0, dst, imm8);
D
duke 已提交
2937 2938
}

2939 2940 2941 2942 2943 2944
void Assembler::testl(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
2945
    emit_int8((unsigned char)0xA9);
2946 2947
  } else {
    encode = prefix_and_encode(encode);
2948 2949
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
2950
  }
2951
  emit_int32(imm32);
D
duke 已提交
2952 2953
}

2954 2955 2956 2957
void Assembler::testl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
}
D
duke 已提交
2958

2959 2960 2961
void Assembler::testl(Register dst, Address  src) {
  InstructionMark im(this);
  prefix(src, dst);
2962
  emit_int8((unsigned char)0x85);
2963
  emit_operand(dst, src);
D
duke 已提交
2964 2965
}

2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
void Assembler::tzcntl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)0xC0 | encode);
}

void Assembler::tzcntq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
}

2984 2985
void Assembler::ucomisd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2986
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2987 2988
}

2989 2990
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2991
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2992 2993
}

2994 2995
void Assembler::ucomiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2996
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2997 2998 2999 3000
}

void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
3001
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
3002 3003
}

3004 3005 3006 3007 3008
void Assembler::xabort(int8_t imm8) {
  emit_int8((unsigned char)0xC6);
  emit_int8((unsigned char)0xF8);
  emit_int8((unsigned char)(imm8 & 0xFF));
}
3009 3010 3011 3012

void Assembler::xaddl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
3013 3014
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
3015 3016 3017
  emit_operand(src, dst);
}

3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035
void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
  InstructionMark im(this);
  relocate(rtype);
  if (abort.is_bound()) {
    address entry = target(abort);
    assert(entry != NULL, "abort entry NULL");
    intptr_t offset = entry - pc();
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(offset - 6); // 2 opcode + 4 address
  } else {
    abort.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(0);
  }
}

3036 3037 3038
void Assembler::xchgl(Register dst, Address src) { // xchg
  InstructionMark im(this);
  prefix(src, dst);
3039
  emit_int8((unsigned char)0x87);
3040 3041 3042 3043 3044
  emit_operand(dst, src);
}

void Assembler::xchgl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
3045 3046
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xC0 | encode));
3047 3048
}

3049 3050 3051 3052 3053 3054
void Assembler::xend() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x01);
  emit_int8((unsigned char)0xD5);
}

3055
void Assembler::xgetbv() {
3056 3057 3058
  emit_int8(0x0F);
  emit_int8(0x01);
  emit_int8((unsigned char)0xD0);
3059 3060
}

3061 3062 3063 3064 3065 3066 3067 3068
void Assembler::xorl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF0, dst, imm32);
}

void Assembler::xorl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
3069
  emit_int8(0x33);
3070 3071 3072 3073 3074 3075 3076 3077 3078
  emit_operand(dst, src);
}

void Assembler::xorl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
}


3079
// AVX 3-operands scalar float-point arithmetic instructions
3080 3081 3082

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3083
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3084 3085 3086 3087
}

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3088
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3089 3090 3091 3092
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3093
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3094 3095 3096 3097
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3098
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3099 3100 3101 3102
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3103
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3104 3105 3106 3107
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3108
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3109 3110 3111 3112
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3113
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3114 3115 3116 3117
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3118
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3119 3120 3121 3122
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3123
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3124 3125 3126 3127
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3128
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3129 3130 3131
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
3132 3133
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3134 3135 3136 3137
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3138
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3139 3140 3141 3142
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3143
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3144 3145 3146 3147
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3148
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3149 3150 3151 3152
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3153
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3154 3155 3156 3157
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3158
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3159 3160
}

3161 3162 3163 3164 3165 3166 3167
//====================VECTOR ARITHMETIC=====================================

// Float-point vector arithmetic

void Assembler::addpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
3168 3169
}

3170 3171 3172
void Assembler::addps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
3173 3174
}

3175
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3176
  assert(VM_Version::supports_avx(), "");
3177
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
3178 3179
}

3180
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3181
  assert(VM_Version::supports_avx(), "");
3182
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3183 3184
}

3185 3186 3187
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
K
kvn 已提交
3188 3189
}

3190
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3191
  assert(VM_Version::supports_avx(), "");
3192
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3193 3194
}

3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
void Assembler::subpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
}

void Assembler::subps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
}

void Assembler::mulps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::divpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
}

void Assembler::divps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::andpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::andps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::xorpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}


// Integer vector arithmetic
void Assembler::paddb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
}

void Assembler::paddw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
}

void Assembler::paddd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
}

void Assembler::paddq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::psubb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
}

void Assembler::psubw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
}

void Assembler::psubd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
}

void Assembler::psubq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
}

void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
3495 3496
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
3507 3508
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  InstructionMark im(this);
  int dst_enc = dst->encoding();
  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
3522
  emit_int8(0x40);
3523 3524 3525 3526 3527 3528 3529 3530
  emit_operand(dst, src);
}

// Shift packed integers left by specified number of bits.
void Assembler::psllw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3531 3532 3533
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3534 3535 3536 3537 3538 3539
}

void Assembler::pslld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3540 3541 3542
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3543 3544 3545 3546 3547 3548
}

void Assembler::psllq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3549 3550 3551
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572
}

void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
}

void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
}

void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
3573
  emit_int8(shift & 0xFF);
3574 3575 3576 3577 3578 3579
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
3580
  emit_int8(shift & 0xFF);
3581 3582 3583 3584 3585 3586
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
3587
  emit_int8(shift & 0xFF);
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers logically right by specified number of bits.
void Assembler::psrlw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 71 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3610 3611 3612
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3613 3614 3615 3616 3617 3618
}

void Assembler::psrld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 72 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3619 3620 3621
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3622 3623 3624 3625 3626 3627 3628 3629
}

void Assembler::psrlq(XMMRegister dst, int shift) {
  // Do not confuse it with psrldq SSE2 instruction which
  // shifts 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3630 3631 3632
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653
}

void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
}

void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
3654
  emit_int8(shift & 0xFF);
3655 3656 3657 3658 3659 3660
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
3661
  emit_int8(shift & 0xFF);
3662 3663 3664 3665 3666 3667
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
3668
  emit_int8(shift & 0xFF);
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers arithmetically right by specified number of bits.
void Assembler::psraw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3691 3692 3693
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3694 3695 3696 3697 3698 3699
}

void Assembler::psrad(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 72 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3700 3701 3702
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718
}

void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
3719
  emit_int8(shift & 0xFF);
3720 3721 3722 3723 3724 3725
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
3726
  emit_int8(shift & 0xFF);
3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
}


// AND packed integers
void Assembler::pand(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::por(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pxor(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}


void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3791 3792
  emit_int8(0x18);
  emit_int8((unsigned char)(0xC0 | encode));
3793 3794
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3795
  emit_int8(0x01);
3796 3797
}

3798 3799 3800 3801 3802 3803 3804 3805
void Assembler::vinsertf128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3806
  emit_int8(0x18);
3807 3808
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3809
  emit_int8(0x01);
3810 3811 3812 3813 3814 3815 3816 3817 3818
}

void Assembler::vextractf128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3819
  emit_int8(0x19);
3820 3821
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3822
  emit_int8(0x01);
3823 3824
}

3825 3826 3827
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
K
kvn 已提交
3828
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3829 3830
  emit_int8(0x38);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
3831 3832
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3833
  emit_int8(0x01);
K
kvn 已提交
3834 3835
}

3836 3837 3838 3839 3840 3841 3842 3843
void Assembler::vinserti128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3844
  emit_int8(0x38);
3845 3846
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3847
  emit_int8(0x01);
3848 3849 3850 3851 3852 3853 3854 3855 3856
}

void Assembler::vextracti128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3857
  emit_int8(0x39);
3858 3859
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3860
  emit_int8(0x01);
3861 3862
}

3863 3864 3865 3866 3867 3868 3869 3870 3871
// duplicate 4-bytes integer data from src into 8 locations in dest
void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x58);
  emit_int8((unsigned char)(0xC0 | encode));
}

3872 3873 3874 3875 3876 3877 3878 3879 3880
// Carry-Less Multiplication Quadword
void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
  assert(VM_Version::supports_clmul(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}

3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
// Carry-Less Multiplication Quadword
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
  assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
  bool vector256 = false;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}

3891 3892 3893
void Assembler::vzeroupper() {
  assert(VM_Version::supports_avx(), "");
  (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3894
  emit_int8(0x77);
3895 3896
}

3897

3898 3899 3900 3901 3902 3903
#ifndef _LP64
// 32bit only pieces of the assembler

void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT
  InstructionMark im(this);
3904 3905
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | src1->encoding()));
3906 3907 3908 3909 3910 3911
  emit_data(imm32, rspec, 0);
}

void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
  InstructionMark im(this);
3912
  emit_int8((unsigned char)0x81);
3913 3914 3915 3916 3917 3918 3919 3920 3921
  emit_operand(rdi, src1);
  emit_data(imm32, rspec, 0);
}

// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
// into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchg8(Address adr) {
  InstructionMark im(this);
3922 3923
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC7);
3924 3925 3926 3927 3928
  emit_operand(rcx, adr);
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
3929
 emit_int8(0x48 | dst->encoding());
3930 3931 3932 3933 3934 3935 3936
}

#endif // _LP64

// 64bit typically doesn't use the x87 but needs to for the trig funcs

void Assembler::fabs() {
3937 3938
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE1);
3939 3940 3941 3942 3943 3944 3945 3946
}

void Assembler::fadd(int i) {
  emit_farith(0xD8, 0xC0, i);
}

void Assembler::fadd_d(Address src) {
  InstructionMark im(this);
3947
  emit_int8((unsigned char)0xDC);
3948 3949 3950 3951 3952
  emit_operand32(rax, src);
}

void Assembler::fadd_s(Address src) {
  InstructionMark im(this);
3953
  emit_int8((unsigned char)0xD8);
3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965
  emit_operand32(rax, src);
}

void Assembler::fadda(int i) {
  emit_farith(0xDC, 0xC0, i);
}

void Assembler::faddp(int i) {
  emit_farith(0xDE, 0xC0, i);
}

void Assembler::fchs() {
3966 3967
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE0);
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
}

void Assembler::fcom(int i) {
  emit_farith(0xD8, 0xD0, i);
}

void Assembler::fcomp(int i) {
  emit_farith(0xD8, 0xD8, i);
}

void Assembler::fcomp_d(Address src) {
  InstructionMark im(this);
3980
  emit_int8((unsigned char)0xDC);
3981 3982 3983 3984 3985
  emit_operand32(rbx, src);
}

void Assembler::fcomp_s(Address src) {
  InstructionMark im(this);
3986
  emit_int8((unsigned char)0xD8);
3987 3988 3989 3990
  emit_operand32(rbx, src);
}

void Assembler::fcompp() {
3991 3992
  emit_int8((unsigned char)0xDE);
  emit_int8((unsigned char)0xD9);
3993 3994 3995
}

void Assembler::fcos() {
3996 3997
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFF);
3998 3999 4000
}

void Assembler::fdecstp() {
4001 4002
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF6);
4003 4004 4005 4006 4007 4008 4009 4010
}

void Assembler::fdiv(int i) {
  emit_farith(0xD8, 0xF0, i);
}

void Assembler::fdiv_d(Address src) {
  InstructionMark im(this);
4011
  emit_int8((unsigned char)0xDC);
4012 4013 4014 4015 4016
  emit_operand32(rsi, src);
}

void Assembler::fdiv_s(Address src) {
  InstructionMark im(this);
4017
  emit_int8((unsigned char)0xD8);
4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
  emit_operand32(rsi, src);
}

void Assembler::fdiva(int i) {
  emit_farith(0xDC, 0xF8, i);
}

// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
//       is erroneous for some of the floating-point instructions below.

void Assembler::fdivp(int i) {
  emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}

void Assembler::fdivr(int i) {
  emit_farith(0xD8, 0xF8, i);
}

void Assembler::fdivr_d(Address src) {
  InstructionMark im(this);
4038
  emit_int8((unsigned char)0xDC);
4039 4040 4041 4042 4043
  emit_operand32(rdi, src);
}

void Assembler::fdivr_s(Address src) {
  InstructionMark im(this);
4044
  emit_int8((unsigned char)0xD8);
4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061
  emit_operand32(rdi, src);
}

void Assembler::fdivra(int i) {
  emit_farith(0xDC, 0xF0, i);
}

void Assembler::fdivrp(int i) {
  emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
}

void Assembler::ffree(int i) {
  emit_farith(0xDD, 0xC0, i);
}

void Assembler::fild_d(Address adr) {
  InstructionMark im(this);
4062
  emit_int8((unsigned char)0xDF);
4063 4064 4065 4066 4067
  emit_operand32(rbp, adr);
}

void Assembler::fild_s(Address adr) {
  InstructionMark im(this);
4068
  emit_int8((unsigned char)0xDB);
4069 4070 4071 4072
  emit_operand32(rax, adr);
}

void Assembler::fincstp() {
4073 4074
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF7);
4075 4076 4077
}

void Assembler::finit() {
4078 4079 4080
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xDB);
  emit_int8((unsigned char)0xE3);
4081 4082 4083 4084
}

void Assembler::fist_s(Address adr) {
  InstructionMark im(this);
4085
  emit_int8((unsigned char)0xDB);
4086 4087 4088 4089 4090
  emit_operand32(rdx, adr);
}

void Assembler::fistp_d(Address adr) {
  InstructionMark im(this);
4091
  emit_int8((unsigned char)0xDF);
4092 4093 4094 4095 4096
  emit_operand32(rdi, adr);
}

void Assembler::fistp_s(Address adr) {
  InstructionMark im(this);
4097
  emit_int8((unsigned char)0xDB);
4098 4099 4100 4101
  emit_operand32(rbx, adr);
}

void Assembler::fld1() {
4102 4103
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE8);
4104 4105 4106 4107
}

void Assembler::fld_d(Address adr) {
  InstructionMark im(this);
4108
  emit_int8((unsigned char)0xDD);
4109 4110 4111 4112 4113
  emit_operand32(rax, adr);
}

void Assembler::fld_s(Address adr) {
  InstructionMark im(this);
4114
  emit_int8((unsigned char)0xD9);
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124
  emit_operand32(rax, adr);
}


void Assembler::fld_s(int index) {
  emit_farith(0xD9, 0xC0, index);
}

void Assembler::fld_x(Address adr) {
  InstructionMark im(this);
4125
  emit_int8((unsigned char)0xDB);
4126 4127 4128 4129 4130
  emit_operand32(rbp, adr);
}

void Assembler::fldcw(Address src) {
  InstructionMark im(this);
4131
  emit_int8((unsigned char)0xD9);
4132 4133 4134 4135 4136
  emit_operand32(rbp, src);
}

void Assembler::fldenv(Address src) {
  InstructionMark im(this);
4137
  emit_int8((unsigned char)0xD9);
4138 4139 4140 4141
  emit_operand32(rsp, src);
}

void Assembler::fldlg2() {
4142 4143
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEC);
4144 4145 4146
}

void Assembler::fldln2() {
4147 4148
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xED);
4149 4150 4151
}

void Assembler::fldz() {
4152 4153
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEE);
4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
}

void Assembler::flog() {
  fldln2();
  fxch();
  fyl2x();
}

void Assembler::flog10() {
  fldlg2();
  fxch();
  fyl2x();
}

void Assembler::fmul(int i) {
  emit_farith(0xD8, 0xC8, i);
}

void Assembler::fmul_d(Address src) {
  InstructionMark im(this);
4174
  emit_int8((unsigned char)0xDC);
4175 4176 4177 4178 4179
  emit_operand32(rcx, src);
}

void Assembler::fmul_s(Address src) {
  InstructionMark im(this);
4180
  emit_int8((unsigned char)0xD8);
4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193
  emit_operand32(rcx, src);
}

void Assembler::fmula(int i) {
  emit_farith(0xDC, 0xC8, i);
}

void Assembler::fmulp(int i) {
  emit_farith(0xDE, 0xC8, i);
}

void Assembler::fnsave(Address dst) {
  InstructionMark im(this);
4194
  emit_int8((unsigned char)0xDD);
4195 4196 4197 4198 4199
  emit_operand32(rsi, dst);
}

void Assembler::fnstcw(Address src) {
  InstructionMark im(this);
4200 4201
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xD9);
4202 4203 4204 4205
  emit_operand32(rdi, src);
}

void Assembler::fnstsw_ax() {
4206 4207
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)0xE0);
4208 4209 4210
}

void Assembler::fprem() {
4211 4212
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF8);
4213 4214 4215
}

void Assembler::fprem1() {
4216 4217
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF5);
4218 4219 4220 4221
}

void Assembler::frstor(Address src) {
  InstructionMark im(this);
4222
  emit_int8((unsigned char)0xDD);
4223 4224 4225 4226
  emit_operand32(rsp, src);
}

void Assembler::fsin() {
4227 4228
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFE);
4229 4230 4231
}

void Assembler::fsqrt() {
4232 4233
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFA);
4234 4235 4236 4237
}

void Assembler::fst_d(Address adr) {
  InstructionMark im(this);
4238
  emit_int8((unsigned char)0xDD);
4239 4240 4241 4242 4243
  emit_operand32(rdx, adr);
}

void Assembler::fst_s(Address adr) {
  InstructionMark im(this);
4244
  emit_int8((unsigned char)0xD9);
4245 4246 4247 4248 4249
  emit_operand32(rdx, adr);
}

void Assembler::fstp_d(Address adr) {
  InstructionMark im(this);
4250
  emit_int8((unsigned char)0xDD);
4251 4252 4253 4254 4255 4256 4257 4258 4259
  emit_operand32(rbx, adr);
}

void Assembler::fstp_d(int index) {
  emit_farith(0xDD, 0xD8, index);
}

void Assembler::fstp_s(Address adr) {
  InstructionMark im(this);
4260
  emit_int8((unsigned char)0xD9);
4261 4262 4263 4264 4265
  emit_operand32(rbx, adr);
}

void Assembler::fstp_x(Address adr) {
  InstructionMark im(this);
4266
  emit_int8((unsigned char)0xDB);
4267 4268 4269 4270 4271 4272 4273 4274 4275
  emit_operand32(rdi, adr);
}

void Assembler::fsub(int i) {
  emit_farith(0xD8, 0xE0, i);
}

void Assembler::fsub_d(Address src) {
  InstructionMark im(this);
4276
  emit_int8((unsigned char)0xDC);
4277 4278 4279 4280 4281
  emit_operand32(rsp, src);
}

void Assembler::fsub_s(Address src) {
  InstructionMark im(this);
4282
  emit_int8((unsigned char)0xD8);
4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299
  emit_operand32(rsp, src);
}

void Assembler::fsuba(int i) {
  emit_farith(0xDC, 0xE8, i);
}

void Assembler::fsubp(int i) {
  emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
}

void Assembler::fsubr(int i) {
  emit_farith(0xD8, 0xE8, i);
}

void Assembler::fsubr_d(Address src) {
  InstructionMark im(this);
4300
  emit_int8((unsigned char)0xDC);
4301 4302 4303 4304 4305
  emit_operand32(rbp, src);
}

void Assembler::fsubr_s(Address src) {
  InstructionMark im(this);
4306
  emit_int8((unsigned char)0xD8);
4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318
  emit_operand32(rbp, src);
}

void Assembler::fsubra(int i) {
  emit_farith(0xDC, 0xE0, i);
}

void Assembler::fsubrp(int i) {
  emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
}

void Assembler::ftan() {
4319 4320 4321 4322
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)0xD8);
4323 4324 4325
}

void Assembler::ftst() {
4326 4327
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE4);
4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342
}

void Assembler::fucomi(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDB, 0xE8, i);
}

void Assembler::fucomip(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDF, 0xE8, i);
}

void Assembler::fwait() {
4343
  emit_int8((unsigned char)0x9B);
4344 4345 4346 4347 4348 4349 4350
}

void Assembler::fxch(int i) {
  emit_farith(0xD9, 0xC8, i);
}

void Assembler::fyl2x() {
4351 4352
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF1);
4353 4354
}

4355
void Assembler::frndint() {
4356 4357
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFC);
4358 4359 4360
}

void Assembler::f2xm1() {
4361 4362
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF0);
4363 4364 4365
}

void Assembler::fldl2e() {
4366 4367
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEA);
4368 4369
}

K
kvn 已提交
4370 4371 4372 4373 4374 4375 4376 4377
// SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
// SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
static int simd_opc[4] = { 0,    0, 0x38, 0x3A };

// Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4378
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4379 4380 4381 4382 4383 4384 4385
  }
  if (rex_w) {
    prefixq(adr, xreg);
  } else {
    prefix(adr, xreg);
  }
  if (opc > 0) {
4386
    emit_int8(0x0F);
K
kvn 已提交
4387 4388
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4389
      emit_int8(opc2);
K
kvn 已提交
4390 4391 4392 4393 4394 4395
    }
  }
}

int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4396
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4397 4398 4399 4400
  }
  int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
                          prefix_and_encode(dst_enc, src_enc);
  if (opc > 0) {
4401
    emit_int8(0x0F);
K
kvn 已提交
4402 4403
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4404
      emit_int8(opc2);
K
kvn 已提交
4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417
    }
  }
  return encode;
}


void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
  if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
    prefix(VEX_3bytes);

    int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
    byte1 = (~byte1) & 0xE0;
    byte1 |= opc;
4418
    emit_int8(byte1);
K
kvn 已提交
4419 4420 4421

    int byte2 = ((~nds_enc) & 0xf) << 3;
    byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
4422
    emit_int8(byte2);
K
kvn 已提交
4423 4424 4425 4426 4427 4428 4429
  } else {
    prefix(VEX_2bytes);

    int byte1 = vex_r ? VEX_R : 0;
    byte1 = (~byte1) & 0x80;
    byte1 |= ((~nds_enc) & 0xf) << 3;
    byte1 |= (vector256 ? 4 : 0) | pre;
4430
    emit_int8(byte1);
K
kvn 已提交
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471
  }
}

void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
  bool vex_r = (xreg_enc >= 8);
  bool vex_b = adr.base_needs_rex();
  bool vex_x = adr.index_needs_rex();
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
}

int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
  bool vex_r = (dst_enc >= 8);
  bool vex_b = (src_enc >= 8);
  bool vex_x = false;
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  return (((dst_enc & 7) << 3) | (src_enc & 7));
}


void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  if (UseAVX > 0) {
    int xreg_enc = xreg->encoding();
    int  nds_enc = nds->is_valid() ? nds->encoding() : 0;
    vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
    rex_prefix(adr, xreg, pre, opc, rex_w);
  }
}

int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  int dst_enc = dst->encoding();
  int src_enc = src->encoding();
  if (UseAVX > 0) {
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
    return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
  }
}
4472

4473 4474 4475
void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, dst, src, pre);
4476
  emit_int8(opcode);
4477 4478 4479 4480 4481
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, dst, src, pre);
4482 4483
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4484 4485 4486 4487 4488 4489
}

// Versions with no second source register (non-destructive source).
void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, xnoreg, src, pre);
4490
  emit_int8(opcode);
4491 4492 4493 4494 4495
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
4496 4497
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4498 4499 4500 4501 4502 4503 4504
}

// 3-operands AVX instructions
void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               Address src, VexSimdPrefix pre, bool vector256) {
  InstructionMark im(this);
  vex_prefix(dst, nds, src, pre, vector256);
4505
  emit_int8(opcode);
4506 4507 4508 4509 4510 4511
  emit_operand(dst, src);
}

void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               XMMRegister src, VexSimdPrefix pre, bool vector256) {
  int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
4512 4513
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4514 4515
}

4516 4517 4518 4519
#ifndef _LP64

void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
4520
  emit_int8(0x40 | dst->encoding());
4521 4522 4523 4524 4525 4526 4527 4528
}

void Assembler::lea(Register dst, Address src) {
  leal(dst, src);
}

void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
4529
  emit_int8((unsigned char)0xC7);
4530 4531 4532 4533
  emit_operand(rax, dst);
  emit_data((int)imm32, rspec, 0);
}

4534 4535 4536
void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
4537
  emit_int8((unsigned char)(0xB8 | encode));
4538 4539
  emit_data((int)imm32, rspec, 0);
}
4540 4541

void Assembler::popa() { // 32bit
4542
  emit_int8(0x61);
4543 4544 4545 4546
}

void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
4547
  emit_int8(0x68);
4548 4549 4550 4551
  emit_data(imm32, rspec, 0);
}

void Assembler::pusha() { // 32bit
4552
  emit_int8(0x60);
4553 4554 4555
}

void Assembler::set_byte_if_not_zero(Register dst) {
4556 4557 4558
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | dst->encoding()));
4559 4560 4561
}

void Assembler::shldl(Register dst, Register src) {
4562 4563 4564
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA5);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4565 4566 4567
}

void Assembler::shrdl(Register dst, Register src) {
4568 4569 4570
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAD);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4571 4572 4573 4574
}

#else // LP64

I
iveresov 已提交
4575 4576
void Assembler::set_byte_if_not_zero(Register dst) {
  int enc = prefix_and_encode(dst->encoding(), true);
4577 4578 4579
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | enc));
I
iveresov 已提交
4580 4581
}

4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.

bool Assembler::reachable(AddressLiteral adr) {
  int64_t disp;
  // None will force a 64bit literal to the code stream. Likely a placeholder
  // for something that will be patched later and we need to certain it will
  // always be reachable.
  if (adr.reloc() == relocInfo::none) {
    return false;
  }
  if (adr.reloc() == relocInfo::internal_word_type) {
    // This should be rip relative and easily reachable.
    return true;
  }
  if (adr.reloc() == relocInfo::virtual_call_type ||
      adr.reloc() == relocInfo::opt_virtual_call_type ||
      adr.reloc() == relocInfo::static_call_type ||
      adr.reloc() == relocInfo::static_stub_type ) {
    // This should be rip relative within the code cache and easily
    // reachable until we get huge code caches. (At which point
    // ic code is going to have issues).
    return true;
  }
  if (adr.reloc() != relocInfo::external_word_type &&
      adr.reloc() != relocInfo::poll_return_type &&  // these are really external_word but need special
      adr.reloc() != relocInfo::poll_type &&         // relocs to identify them
      adr.reloc() != relocInfo::runtime_call_type ) {
    return false;
  }

  // Stress the correction code
  if (ForceUnreachable) {
    // Must be runtimecall reloc, see if it is in the codecache
    // Flipping stuff in the codecache to be unreachable causes issues
    // with things like inline caches where the additional instructions
    // are not handled.
    if (CodeCache::find_blob(adr._target) == NULL) {
      return false;
    }
  }
  // For external_word_type/runtime_call_type if it is reachable from where we
  // are now (possibly a temp buffer) and where we might end up
  // anywhere in the codeCache then we are always reachable.
  // This would have to change if we ever save/restore shared code
  // to be more pessimistic.
  disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;
  disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;

4634
  disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651

  // Because rip relative is a disp + address_of_next_instruction and we
  // don't know the value of address_of_next_instruction we apply a fudge factor
  // to make sure we will be ok no matter the size of the instruction we get placed into.
  // We don't have to fudge the checks above here because they are already worst case.

  // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
  // + 4 because better safe than sorry.
  const int fudge = 12 + 4;
  if (disp < 0) {
    disp -= fudge;
  } else {
    disp += fudge;
  }
  return is_simm32(disp);
}

4652 4653 4654 4655
// Check if the polling page is not reachable from the code cache using rip-relative
// addressing.
bool Assembler::is_polling_page_far() {
  intptr_t addr = (intptr_t)os::get_polling_page();
4656 4657
  return ForceUnreachable ||
         !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
4658 4659 4660
         !is_simm32(addr - (intptr_t)CodeCache::high_bound());
}

4661 4662 4663 4664
void Assembler::emit_data64(jlong data,
                            relocInfo::relocType rtype,
                            int format) {
  if (rtype == relocInfo::none) {
4665
    emit_int64(data);
4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682
  } else {
    emit_data64(data, Relocation::spec_simple(rtype), format);
  }
}

void Assembler::emit_data64(jlong data,
                            RelocationHolder const& rspec,
                            int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
  assert(imm_operand == format, "must be immediate");
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  // Do not use AbstractAssembler::relocate, which is not intended for
  // embedded words.  Instead, relocate to the enclosing instruction.
  code_section()->relocate(inst_mark(), rspec, format);
#ifdef ASSERT
  check_relocation(rspec, format);
#endif
4683
  emit_int64(data);
4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793
}

int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
  if (reg_enc >= 8) {
    prefix(REX_B);
    reg_enc -= 8;
  } else if (byteinst && reg_enc >= 4) {
    prefix(REX);
  }
  return reg_enc;
}

int Assembler::prefixq_and_encode(int reg_enc) {
  if (reg_enc < 8) {
    prefix(REX_W);
  } else {
    prefix(REX_WB);
    reg_enc -= 8;
  }
  return reg_enc;
}

int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
  if (dst_enc < 8) {
    if (src_enc >= 8) {
      prefix(REX_B);
      src_enc -= 8;
    } else if (byteinst && src_enc >= 4) {
      prefix(REX);
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_R);
    } else {
      prefix(REX_RB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
  if (dst_enc < 8) {
    if (src_enc < 8) {
      prefix(REX_W);
    } else {
      prefix(REX_WB);
      src_enc -= 8;
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_WR);
    } else {
      prefix(REX_WRB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

void Assembler::prefix(Register reg) {
  if (reg->encoding() >= 8) {
    prefix(REX_B);
  }
}

void Assembler::prefix(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_XB);
    } else {
      prefix(REX_B);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_X);
    }
  }
}

void Assembler::prefixq(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_WXB);
    } else {
      prefix(REX_WB);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_WX);
    } else {
      prefix(REX_W);
    }
  }
}


void Assembler::prefix(Address adr, Register reg, bool byteinst) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
4794
      } else if (byteinst && reg->encoding() >= 4 ) {
4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876
        prefix(REX);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

void Assembler::prefixq(Address adr, Register src) {
  if (src->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WXB);
      } else {
        prefix(REX_WB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WX);
      } else {
        prefix(REX_W);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WRXB);
      } else {
        prefix(REX_WRB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WRX);
      } else {
        prefix(REX_WR);
      }
    }
  }
}

void Assembler::prefix(Address adr, XMMRegister reg) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

K
kvn 已提交
4877 4878
void Assembler::prefixq(Address adr, XMMRegister src) {
  if (src->encoding() < 8) {
4879 4880
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4881
        prefix(REX_WXB);
4882
      } else {
K
kvn 已提交
4883
        prefix(REX_WB);
4884 4885 4886
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4887 4888 4889
        prefix(REX_WX);
      } else {
        prefix(REX_W);
4890 4891 4892 4893 4894
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4895
        prefix(REX_WRXB);
4896
      } else {
K
kvn 已提交
4897
        prefix(REX_WRB);
4898 4899 4900
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4901
        prefix(REX_WRX);
4902
      } else {
K
kvn 已提交
4903
        prefix(REX_WR);
4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916
      }
    }
  }
}

void Assembler::adcq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD0, dst, imm32);
}

void Assembler::adcq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4917
  emit_int8(0x13);
4918 4919 4920 4921
  emit_operand(dst, src);
}

void Assembler::adcq(Register dst, Register src) {
4922
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934
  emit_arith(0x13, 0xC0, dst, src);
}

void Assembler::addq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rax, dst,imm32);
}

void Assembler::addq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
4935
  emit_int8(0x01);
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946
  emit_operand(src, dst);
}

void Assembler::addq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC0, dst, imm32);
}

void Assembler::addq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4947
  emit_int8(0x03);
4948 4949 4950 4951 4952 4953 4954 4955
  emit_operand(dst, src);
}

void Assembler::addq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
}

4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975
void Assembler::adcxq(Register dst, Register src) {
  //assert(VM_Version::supports_adx(), "adx instructions not supported");
  emit_int8((unsigned char)0x66);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x38);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::adoxq(Register dst, Register src) {
  //assert(VM_Version::supports_adx(), "adx instructions not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x38);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}

4976 4977 4978
void Assembler::andq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
4979
  emit_int8((unsigned char)0x81);
4980
  emit_operand(rsp, dst, 4);
4981
  emit_int32(imm32);
4982 4983
}

4984 4985 4986 4987 4988 4989 4990 4991
void Assembler::andq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE0, dst, imm32);
}

void Assembler::andq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4992
  emit_int8(0x23);
4993 4994 4995 4996
  emit_operand(dst, src);
}

void Assembler::andq(Register dst, Register src) {
4997
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4998 4999 5000
  emit_arith(0x23, 0xC0, dst, src);
}

5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015
void Assembler::andnq(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::andnq(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}

5016 5017
void Assembler::bsfq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5018 5019 5020
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
5021 5022 5023 5024
}

void Assembler::bsrq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5025 5026 5027
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
5028 5029
}

5030 5031
void Assembler::bswapq(Register reg) {
  int encode = prefixq_and_encode(reg->encoding());
5032 5033
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
5034 5035
}

5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080
void Assembler::blsiq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsiq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}

void Assembler::blsmskq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsmskq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}

void Assembler::blsrq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}

void Assembler::blsrq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}

5081 5082
void Assembler::cdqq() {
  prefix(REX_W);
5083
  emit_int8((unsigned char)0x99);
5084 5085 5086 5087
}

void Assembler::clflush(Address adr) {
  prefix(adr);
5088 5089
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5090 5091 5092 5093 5094
  emit_operand(rdi, adr);
}

void Assembler::cmovq(Condition cc, Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5095 5096 5097
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
5098 5099 5100 5101 5102
}

void Assembler::cmovq(Condition cc, Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5103 5104
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
5105 5106 5107 5108 5109 5110
  emit_operand(dst, src);
}

void Assembler::cmpq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5111
  emit_int8((unsigned char)0x81);
5112
  emit_operand(rdi, dst, 4);
5113
  emit_int32(imm32);
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123
}

void Assembler::cmpq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xF8, dst, imm32);
}

void Assembler::cmpq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5124
  emit_int8(0x3B);
5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135
  emit_operand(src, dst);
}

void Assembler::cmpq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
}

void Assembler::cmpq(Register dst, Address  src) {
  InstructionMark im(this);
  prefixq(src, dst);
5136
  emit_int8(0x3B);
5137 5138 5139 5140 5141 5142
  emit_operand(dst, src);
}

void Assembler::cmpxchgq(Register reg, Address adr) {
  InstructionMark im(this);
  prefixq(adr, reg);
5143 5144
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
5145 5146 5147 5148 5149
  emit_operand(reg, adr);
}

void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
5150
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
5151 5152
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
5153 5154
}

K
kvn 已提交
5155 5156 5157 5158
void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
5159
  emit_int8(0x2A);
K
kvn 已提交
5160 5161 5162
  emit_operand(dst, src);
}

5163 5164
void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
5165
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
5166 5167
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
5168 5169
}

K
kvn 已提交
5170 5171 5172 5173
void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
5174
  emit_int8(0x2A);
K
kvn 已提交
5175 5176 5177
  emit_operand(dst, src);
}

5178 5179
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
5180
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
5181 5182
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
5183 5184 5185 5186
}

void Assembler::cvttss2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
5187
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
5188 5189
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
5190 5191 5192 5193 5194 5195
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
  // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
5196 5197
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC8 | encode));
5198 5199 5200 5201 5202 5203
}

void Assembler::decq(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
5204 5205
  emit_int8((unsigned char)0xFF);
  emit_int8(0xC8 | encode);
5206 5207 5208 5209 5210 5211
}

void Assembler::decq(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
5212
  emit_int8((unsigned char)0xFF);
5213 5214 5215 5216 5217
  emit_operand(rcx, dst);
}

void Assembler::fxrstor(Address src) {
  prefixq(src);
5218 5219
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5220 5221 5222 5223 5224
  emit_operand(as_Register(1), src);
}

void Assembler::fxsave(Address dst) {
  prefixq(dst);
5225 5226
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5227 5228 5229 5230 5231
  emit_operand(as_Register(0), dst);
}

void Assembler::idivq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
5232 5233
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
5234 5235 5236 5237
}

void Assembler::imulq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5238 5239 5240
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
5241 5242 5243 5244 5245
}

void Assembler::imulq(Register dst, Register src, int value) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  if (is8bit(value)) {
5246 5247 5248
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
5249
  } else {
5250 5251
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
5252
    emit_int32(value);
5253 5254 5255
  }
}

5256 5257 5258 5259 5260 5261 5262 5263
void Assembler::imulq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}

5264 5265 5266 5267
void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
5268 5269
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5270 5271 5272 5273 5274 5275
}

void Assembler::incq(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
5276 5277
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5278 5279 5280 5281 5282 5283
}

void Assembler::incq(Address dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
5284
  emit_int8((unsigned char)0xFF);
5285 5286 5287 5288 5289 5290 5291 5292 5293 5294
  emit_operand(rax, dst);
}

void Assembler::lea(Register dst, Address src) {
  leaq(dst, src);
}

void Assembler::leaq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5295
  emit_int8((unsigned char)0x8D);
5296 5297 5298 5299 5300 5301
  emit_operand(dst, src);
}

void Assembler::mov64(Register dst, int64_t imm64) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5302
  emit_int8((unsigned char)(0xB8 | encode));
5303
  emit_int64(imm64);
5304 5305 5306 5307 5308
}

void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5309
  emit_int8(0xB8 | encode);
5310 5311 5312
  emit_data64(imm64, rspec);
}

5313 5314 5315
void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
5316
  emit_int8((unsigned char)(0xB8 | encode));
5317 5318 5319 5320 5321 5322
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(dst);
5323
  emit_int8((unsigned char)0xC7);
5324 5325 5326 5327 5328 5329 5330
  emit_operand(rax, dst, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(src1->encoding());
5331 5332
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | encode));
5333 5334 5335 5336 5337 5338
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(src1);
5339
  emit_int8((unsigned char)0x81);
5340 5341 5342 5343
  emit_operand(rax, src1, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

5344 5345
void Assembler::lzcntq(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
5346
  emit_int8((unsigned char)0xF3);
5347
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5348 5349 5350
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
5351 5352
}

5353 5354
void Assembler::movdq(XMMRegister dst, Register src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5355 5356
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
5357 5358
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
5359 5360 5361 5362
}

void Assembler::movdq(Register dst, XMMRegister src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5363
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5364
  // swap src/dst to get correct prefix
K
kvn 已提交
5365
  int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
5366 5367
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5368 5369
}

5370 5371
void Assembler::movq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5372 5373
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
5374
}
D
duke 已提交
5375

5376 5377 5378
void Assembler::movq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5379
  emit_int8((unsigned char)0x8B);
5380 5381
  emit_operand(dst, src);
}
D
duke 已提交
5382

5383 5384 5385
void Assembler::movq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5386
  emit_int8((unsigned char)0x89);
5387 5388
  emit_operand(src, dst);
}
D
duke 已提交
5389

5390 5391 5392
void Assembler::movsbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5393 5394
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
5395 5396 5397 5398 5399
  emit_operand(dst, src);
}

void Assembler::movsbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5400 5401 5402
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
5403 5404
}

5405 5406 5407 5408 5409 5410 5411
void Assembler::movslq(Register dst, int32_t imm32) {
  // dbx shows movslq(rcx, 3) as movq     $0x0000000049000000,(%rbx)
  // and movslq(r8, 3); as movl     $0x0000000048000000,(%rbx)
  // as a result we shouldn't use until tested at runtime...
  ShouldNotReachHere();
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5412
  emit_int8((unsigned char)(0xC7 | encode));
5413
  emit_int32(imm32);
5414 5415 5416 5417 5418 5419
}

void Assembler::movslq(Address dst, int32_t imm32) {
  assert(is_simm32(imm32), "lost bits");
  InstructionMark im(this);
  prefixq(dst);
5420
  emit_int8((unsigned char)0xC7);
5421
  emit_operand(rax, dst, 4);
5422
  emit_int32(imm32);
5423 5424 5425 5426 5427
}

void Assembler::movslq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5428
  emit_int8(0x63);
5429 5430 5431 5432 5433
  emit_operand(dst, src);
}

void Assembler::movslq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5434 5435
  emit_int8(0x63);
  emit_int8((unsigned char)(0xC0 | encode));
5436 5437
}

5438 5439 5440
void Assembler::movswq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5441 5442
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
5443 5444 5445 5446 5447
  emit_operand(dst, src);
}

void Assembler::movswq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5448 5449 5450
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
5451 5452 5453 5454 5455
}

void Assembler::movzbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5456 5457
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB6);
5458 5459 5460 5461 5462
  emit_operand(dst, src);
}

void Assembler::movzbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5463 5464 5465
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
5466 5467 5468 5469 5470
}

void Assembler::movzwq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5471 5472
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
5473 5474 5475 5476 5477
  emit_operand(dst, src);
}

void Assembler::movzwq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5478 5479 5480
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8((unsigned char)(0xC0 | encode));
5481 5482
}

5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502
void Assembler::mulq(Address src) {
  InstructionMark im(this);
  prefixq(src);
  emit_int8((unsigned char)0xF7);
  emit_operand(rsp, src);
}

void Assembler::mulq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
}

void Assembler::mulxq(Register dst1, Register dst2, Register src) {
  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
  int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, true, false);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}

5503 5504
void Assembler::negq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5505 5506
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
5507 5508 5509 5510
}

void Assembler::notq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5511 5512
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
5513 5514 5515 5516 5517
}

void Assembler::orq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5518
  emit_int8((unsigned char)0x81);
5519
  emit_operand(rcx, dst, 4);
5520
  emit_int32(imm32);
5521 5522 5523 5524 5525 5526 5527 5528 5529 5530
}

void Assembler::orq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC8, dst, imm32);
}

void Assembler::orq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5531
  emit_int8(0x0B);
5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560
  emit_operand(dst, src);
}

void Assembler::orq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
}

void Assembler::popa() { // 64bit
  movq(r15, Address(rsp, 0));
  movq(r14, Address(rsp, wordSize));
  movq(r13, Address(rsp, 2 * wordSize));
  movq(r12, Address(rsp, 3 * wordSize));
  movq(r11, Address(rsp, 4 * wordSize));
  movq(r10, Address(rsp, 5 * wordSize));
  movq(r9,  Address(rsp, 6 * wordSize));
  movq(r8,  Address(rsp, 7 * wordSize));
  movq(rdi, Address(rsp, 8 * wordSize));
  movq(rsi, Address(rsp, 9 * wordSize));
  movq(rbp, Address(rsp, 10 * wordSize));
  // skip rsp
  movq(rbx, Address(rsp, 12 * wordSize));
  movq(rdx, Address(rsp, 13 * wordSize));
  movq(rcx, Address(rsp, 14 * wordSize));
  movq(rax, Address(rsp, 15 * wordSize));

  addq(rsp, 16 * wordSize);
}

5561 5562 5563
void Assembler::popcntq(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
5564
  emit_int8((unsigned char)0xF3);
5565
  prefixq(src, dst);
5566 5567
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
5568 5569 5570 5571 5572
  emit_operand(dst, src);
}

void Assembler::popcntq(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
5573
  emit_int8((unsigned char)0xF3);
5574
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5575 5576 5577
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
5578 5579
}

5580 5581 5582
void Assembler::popq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
5583
  emit_int8((unsigned char)0x8F);
5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614
  emit_operand(rax, dst);
}

void Assembler::pusha() { // 64bit
  // we have to store original rsp.  ABI says that 128 bytes
  // below rsp are local scratch.
  movq(Address(rsp, -5 * wordSize), rsp);

  subq(rsp, 16 * wordSize);

  movq(Address(rsp, 15 * wordSize), rax);
  movq(Address(rsp, 14 * wordSize), rcx);
  movq(Address(rsp, 13 * wordSize), rdx);
  movq(Address(rsp, 12 * wordSize), rbx);
  // skip rsp
  movq(Address(rsp, 10 * wordSize), rbp);
  movq(Address(rsp, 9 * wordSize), rsi);
  movq(Address(rsp, 8 * wordSize), rdi);
  movq(Address(rsp, 7 * wordSize), r8);
  movq(Address(rsp, 6 * wordSize), r9);
  movq(Address(rsp, 5 * wordSize), r10);
  movq(Address(rsp, 4 * wordSize), r11);
  movq(Address(rsp, 3 * wordSize), r12);
  movq(Address(rsp, 2 * wordSize), r13);
  movq(Address(rsp, wordSize), r14);
  movq(Address(rsp, 0), r15);
}

void Assembler::pushq(Address src) {
  InstructionMark im(this);
  prefixq(src);
5615
  emit_int8((unsigned char)0xFF);
5616 5617 5618 5619 5620 5621 5622
  emit_operand(rsi, src);
}

void Assembler::rclq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5623 5624
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
5625
  } else {
5626 5627 5628
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD0 | encode));
    emit_int8(imm8);
D
duke 已提交
5629
  }
5630
}
5631

5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644
void Assembler::rcrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD8 | encode));
    emit_int8(imm8);
  }
}

5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
void Assembler::rorq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xC8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xc8 | encode));
    emit_int8(imm8);
  }
}

void Assembler::rorxq(Register dst, Register src, int imm8) {
  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, true, false);
  emit_int8((unsigned char)0xF0);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

5666 5667 5668 5669
void Assembler::sarq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5670 5671
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
5672
  } else {
5673 5674 5675
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
5676 5677
  }
}
D
duke 已提交
5678

5679 5680
void Assembler::sarq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5681 5682
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
5683
}
5684

5685 5686 5687 5688 5689
void Assembler::sbbq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}
D
duke 已提交
5690

5691 5692 5693 5694
void Assembler::sbbq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD8, dst, imm32);
}
D
duke 已提交
5695

5696 5697 5698
void Assembler::sbbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5699
  emit_int8(0x1B);
5700 5701
  emit_operand(dst, src);
}
D
duke 已提交
5702

5703 5704 5705 5706
void Assembler::sbbq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
}
D
duke 已提交
5707

5708 5709 5710 5711
void Assembler::shlq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5712 5713
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
5714
  } else {
5715 5716 5717
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
D
duke 已提交
5718
  }
5719 5720 5721 5722
}

void Assembler::shlq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5723 5724
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
5725 5726 5727 5728 5729
}

void Assembler::shrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
5730 5731 5732
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
5733 5734 5735 5736
}

void Assembler::shrq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5737 5738
  emit_int8((unsigned char)0xD3);
  emit_int8(0xE8 | encode);
5739 5740 5741 5742 5743
}

void Assembler::subq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5744
  emit_arith_operand(0x81, rbp, dst, imm32);
5745 5746 5747 5748 5749
}

void Assembler::subq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5750
  emit_int8(0x29);
5751 5752 5753
  emit_operand(src, dst);
}

5754 5755 5756 5757 5758
void Assembler::subq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE8, dst, imm32);
}

5759 5760 5761 5762 5763 5764
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subq_imm32(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

5765 5766 5767
void Assembler::subq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5768
  emit_int8(0x2B);
5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783
  emit_operand(dst, src);
}

void Assembler::subq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::testq(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
    prefix(REX_W);
5784
    emit_int8((unsigned char)0xA9);
D
duke 已提交
5785
  } else {
5786
    encode = prefixq_and_encode(encode);
5787 5788
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5789
  }
5790
  emit_int32(imm32);
D
duke 已提交
5791 5792
}

5793 5794 5795
void Assembler::testq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
D
duke 已提交
5796 5797
}

5798 5799 5800
void Assembler::xaddq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5801 5802
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
5803
  emit_operand(src, dst);
D
duke 已提交
5804 5805
}

5806 5807 5808
void Assembler::xchgq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5809
  emit_int8((unsigned char)0x87);
5810
  emit_operand(dst, src);
D
duke 已提交
5811 5812
}

5813 5814
void Assembler::xchgq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5815 5816
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xc0 | encode));
D
duke 已提交
5817 5818
}

5819 5820 5821
void Assembler::xorq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
D
duke 已提交
5822 5823
}

5824 5825 5826
void Assembler::xorq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5827
  emit_int8(0x33);
5828
  emit_operand(dst, src);
5829 5830
}

5831
#endif // !LP64