assembler_x86.cpp 164.9 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25
#include "precompiled.hpp"
26 27
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
28 29 30 31 32 33 34 35 36 37 38
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
39 40
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
41 42 43
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
44
#endif // INCLUDE_ALL_GCS
D
duke 已提交
45

46 47 48 49 50 51 52 53 54
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#define STOP(error) stop(error)
#else
#define BLOCK_COMMENT(str) block_comment(str)
#define STOP(error) block_comment(error); stop(error)
#endif

#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
D
duke 已提交
55 56 57 58 59 60 61
// Implementation of AddressLiteral

AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
  _is_lval = false;
  _target = target;
  switch (rtype) {
  case relocInfo::oop_type:
62
  case relocInfo::metadata_type:
D
duke 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    // Oops are a special case. Normally they would be their own section
    // but in cases like icBuffer they are literals in the code stream that
    // we don't have a section for. We use none so that we get a literal address
    // which is always patchable.
    break;
  case relocInfo::external_word_type:
    _rspec = external_word_Relocation::spec(target);
    break;
  case relocInfo::internal_word_type:
    _rspec = internal_word_Relocation::spec(target);
    break;
  case relocInfo::opt_virtual_call_type:
    _rspec = opt_virtual_call_Relocation::spec();
    break;
  case relocInfo::static_call_type:
    _rspec = static_call_Relocation::spec();
    break;
  case relocInfo::runtime_call_type:
    _rspec = runtime_call_Relocation::spec();
    break;
83 84 85 86
  case relocInfo::poll_type:
  case relocInfo::poll_return_type:
    _rspec = Relocation::spec_simple(rtype);
    break;
D
duke 已提交
87 88 89 90 91 92 93 94 95 96 97
  case relocInfo::none:
    break;
  default:
    ShouldNotReachHere();
    break;
  }
}

// Implementation of Address

#ifdef _LP64
98 99

Address Address::make_array(ArrayAddress adr) {
D
duke 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
  // Not implementable on 64bit machines
  // Should have been handled higher up the call chain.
  ShouldNotReachHere();
  return Address();
}

// exceedingly dangerous constructor
Address::Address(int disp, address loc, relocInfo::relocType rtype) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = disp;
  switch (rtype) {
    case relocInfo::external_word_type:
      _rspec = external_word_Relocation::spec(loc);
      break;
    case relocInfo::internal_word_type:
      _rspec = internal_word_Relocation::spec(loc);
      break;
    case relocInfo::runtime_call_type:
      // HMM
      _rspec = runtime_call_Relocation::spec();
      break;
123 124 125 126
    case relocInfo::poll_type:
    case relocInfo::poll_return_type:
      _rspec = Relocation::spec_simple(rtype);
      break;
D
duke 已提交
127 128 129 130 131 132
    case relocInfo::none:
      break;
    default:
      ShouldNotReachHere();
  }
}
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#else // LP64

Address Address::make_array(ArrayAddress adr) {
  AddressLiteral base = adr.base();
  Address index = adr.index();
  assert(index._disp == 0, "must not have disp"); // maybe it can?
  Address array(index._base, index._index, index._scale, (intptr_t) base.target());
  array._rspec = base._rspec;
  return array;
}

// exceedingly dangerous constructor
Address::Address(address loc, RelocationHolder spec) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = (intptr_t) loc;
  _rspec = spec;
}

#endif // _LP64


D
duke 已提交
156 157 158 159

// Convert the raw encoding form into the form expected by the constructor for
// Address.  An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
160
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
161
  RelocationHolder rspec;
162 163
  if (disp_reloc != relocInfo::none) {
    rspec = Relocation::spec_simple(disp_reloc);
164
  }
D
duke 已提交
165 166 167
  bool valid_index = index != rsp->encoding();
  if (valid_index) {
    Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
168
    madr._rspec = rspec;
D
duke 已提交
169 170 171
    return madr;
  } else {
    Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
172
    madr._rspec = rspec;
D
duke 已提交
173 174 175 176 177
    return madr;
  }
}

// Implementation of Assembler
178

D
duke 已提交
179 180 181 182
int AbstractAssembler::code_fill_byte() {
  return (u_char)'\xF4'; // hlt
}

183 184 185
// make this go away someday
void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
  if (rtype == relocInfo::none)
186
        emit_int32(data);
187
  else  emit_data(data, Relocation::spec_simple(rtype), format);
D
duke 已提交
188 189
}

190 191
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
D
duke 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  if (rspec.type() !=  relocInfo::none) {
    #ifdef ASSERT
      check_relocation(rspec, format);
    #endif
    // Do not use AbstractAssembler::relocate, which is not intended for
    // embedded words.  Instead, relocate to the enclosing instruction.

    // hack. call32 is too wide for mask so use disp32
    if (format == call32_operand)
      code_section()->relocate(inst_mark(), rspec, disp32_operand);
    else
      code_section()->relocate(inst_mark(), rspec, format);
  }
206
  emit_int32(data);
D
duke 已提交
207 208
}

209 210 211 212
static int encode(Register r) {
  int enc = r->encoding();
  if (enc >= 8) {
    enc -= 8;
D
duke 已提交
213
  }
214
  return enc;
D
duke 已提交
215 216 217
}

void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
218
  assert(dst->has_byte_register(), "must have byte register");
D
duke 已提交
219 220 221
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert(isByte(imm8), "not a byte");
  assert((op1 & 0x01) == 0, "should be 8bit operation");
222 223 224
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
  emit_int8(imm8);
D
duke 已提交
225 226
}

227 228

void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
D
duke 已提交
229 230 231 232
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
233 234 235
    emit_int8(op1 | 0x02); // set sign bit
    emit_int8(op2 | encode(dst));
    emit_int8(imm32 & 0xFF);
D
duke 已提交
236
  } else {
237 238
    emit_int8(op1);
    emit_int8(op2 | encode(dst));
239
    emit_int32(imm32);
D
duke 已提交
240 241 242
  }
}

243 244 245 246 247
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
248 249
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
250
  emit_int32(imm32);
251 252
}

D
duke 已提交
253
// immediate-to-memory forms
254
void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
D
duke 已提交
255 256 257
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
258
    emit_int8(op1 | 0x02); // set sign bit
D
duke 已提交
259
    emit_operand(rm, adr, 1);
260
    emit_int8(imm32 & 0xFF);
D
duke 已提交
261
  } else {
262
    emit_int8(op1);
D
duke 已提交
263
    emit_operand(rm, adr, 4);
264
    emit_int32(imm32);
D
duke 已提交
265 266 267 268 269 270
  }
}


void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
271 272
  emit_int8(op1);
  emit_int8(op2 | encode(dst) << 3 | encode(src));
D
duke 已提交
273 274
}

275

D
duke 已提交
276 277 278 279 280
void Assembler::emit_operand(Register reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
                             RelocationHolder const& rspec,
                             int rip_relative_correction) {
  relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
281 282 283 284 285 286 287

  // Encode the registers as needed in the fields they are used in

  int regenc = encode(reg) << 3;
  int indexenc = index->is_valid() ? encode(index) << 3 : 0;
  int baseenc = base->is_valid() ? encode(base) : 0;

D
duke 已提交
288 289 290 291 292
  if (base->is_valid()) {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [base + index*scale + disp]
      if (disp == 0 && rtype == relocInfo::none  &&
293
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
294 295 296
        // [base + index*scale]
        // [00 reg 100][ss index base]
        assert(index != rsp, "illegal addressing mode");
297 298
        emit_int8(0x04 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
299 300 301 302
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + index*scale + imm8]
        // [01 reg 100][ss index base] imm8
        assert(index != rsp, "illegal addressing mode");
303 304 305
        emit_int8(0x44 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
306 307 308 309
      } else {
        // [base + index*scale + disp32]
        // [10 reg 100][ss index base] disp32
        assert(index != rsp, "illegal addressing mode");
310 311
        emit_int8(0x84 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
D
duke 已提交
312 313
        emit_data(disp, rspec, disp32_operand);
      }
314
    } else if (base == rsp LP64_ONLY(|| base == r12)) {
D
duke 已提交
315 316 317 318
      // [rsp + disp]
      if (disp == 0 && rtype == relocInfo::none) {
        // [rsp]
        // [00 reg 100][00 100 100]
319 320
        emit_int8(0x04 | regenc);
        emit_int8(0x24);
D
duke 已提交
321 322 323
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [rsp + imm8]
        // [01 reg 100][00 100 100] disp8
324 325 326
        emit_int8(0x44 | regenc);
        emit_int8(0x24);
        emit_int8(disp & 0xFF);
D
duke 已提交
327 328 329
      } else {
        // [rsp + imm32]
        // [10 reg 100][00 100 100] disp32
330 331
        emit_int8(0x84 | regenc);
        emit_int8(0x24);
D
duke 已提交
332 333 334 335
        emit_data(disp, rspec, disp32_operand);
      }
    } else {
      // [base + disp]
336
      assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
D
duke 已提交
337
      if (disp == 0 && rtype == relocInfo::none &&
338
          base != rbp LP64_ONLY(&& base != r13)) {
D
duke 已提交
339 340
        // [base]
        // [00 reg base]
341
        emit_int8(0x00 | regenc | baseenc);
D
duke 已提交
342 343 344
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        // [base + disp8]
        // [01 reg base] disp8
345 346
        emit_int8(0x40 | regenc | baseenc);
        emit_int8(disp & 0xFF);
D
duke 已提交
347 348 349
      } else {
        // [base + disp32]
        // [10 reg base] disp32
350
        emit_int8(0x80 | regenc | baseenc);
D
duke 已提交
351 352 353 354 355 356 357 358 359
        emit_data(disp, rspec, disp32_operand);
      }
    }
  } else {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      // [index*scale + disp]
      // [00 reg 100][ss index 101] disp32
      assert(index != rsp, "illegal addressing mode");
360 361
      emit_int8(0x04 | regenc);
      emit_int8(scale << 6 | indexenc | 0x05);
D
duke 已提交
362 363
      emit_data(disp, rspec, disp32_operand);
    } else if (rtype != relocInfo::none ) {
364
      // [disp] (64bit) RIP-RELATIVE (32bit) abs
D
duke 已提交
365 366
      // [00 000 101] disp32

367
      emit_int8(0x05 | regenc);
D
duke 已提交
368 369 370 371 372 373 374 375
      // Note that the RIP-rel. correction applies to the generated
      // disp field, but _not_ to the target address in the rspec.

      // disp was created by converting the target address minus the pc
      // at the start of the instruction. That needs more correction here.
      // intptr_t disp = target - next_ip;
      assert(inst_mark() != NULL, "must be inside InstructionMark");
      address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
376 377 378
      int64_t adjusted = disp;
      // Do rip-rel adjustment for 64bit
      LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
D
duke 已提交
379 380
      assert(is_simm32(adjusted),
             "must be 32bit offset (RIP relative address)");
381
      emit_data((int32_t) adjusted, rspec, disp32_operand);
D
duke 已提交
382 383

    } else {
384
      // 32bit never did this, did everything as the rip-rel/disp code above
D
duke 已提交
385 386
      // [disp] ABSOLUTE
      // [00 reg 100][00 100 101] disp32
387 388
      emit_int8(0x04 | regenc);
      emit_int8(0x25);
D
duke 已提交
389 390 391 392 393 394 395
      emit_data(disp, rspec, disp32_operand);
    }
  }
}

void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
396 397
                             RelocationHolder const& rspec) {
  emit_operand((Register)reg, base, index, scale, disp, rspec);
D
duke 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

// Secret local extension to Assembler::WhichOperand:
#define end_pc_operand (_WhichOperand_limit)

address Assembler::locate_operand(address inst, WhichOperand which) {
  // Decode the given instruction, and return the address of
  // an embedded 32-bit operand word.

  // If "which" is disp32_operand, selects the displacement portion
  // of an effective address specifier.
  // If "which" is imm64_operand, selects the trailing immediate constant.
  // If "which" is call32_operand, selects the displacement of a call or jump.
  // Caller is responsible for ensuring that there is such an operand,
  // and that it is 32/64 bits wide.

  // If "which" is end_pc_operand, find the end of the instruction.

  address ip = inst;
  bool is_64bit = false;

  debug_only(bool has_disp32 = false);
  int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn

  again_after_prefix:
  switch (0xFF & *ip++) {

  // These convenience macros generate groups of "case" labels for the switch.
#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
             case (x)+4: case (x)+5: case (x)+6: case (x)+7
#define REP16(x) REP8((x)+0): \
              case REP8((x)+8)

  case CS_segment:
  case SS_segment:
  case DS_segment:
  case ES_segment:
  case FS_segment:
  case GS_segment:
438 439 440
    // Seems dubious
    LP64_ONLY(assert(false, "shouldn't have that prefix"));
    assert(ip == inst+1, "only one prefix allowed");
D
duke 已提交
441 442 443 444 445 446 447 448 449 450 451
    goto again_after_prefix;

  case 0x67:
  case REX:
  case REX_B:
  case REX_X:
  case REX_XB:
  case REX_R:
  case REX_RB:
  case REX_RX:
  case REX_RXB:
452
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
453 454 455 456 457 458 459 460 461 462
    goto again_after_prefix;

  case REX_W:
  case REX_WB:
  case REX_WX:
  case REX_WXB:
  case REX_WR:
  case REX_WRB:
  case REX_WRX:
  case REX_WRXB:
463
    NOT_LP64(assert(false, "64bit prefixes"));
D
duke 已提交
464 465 466 467 468 469 470 471 472
    is_64bit = true;
    goto again_after_prefix;

  case 0xFF: // pushq a; decl a; incl a; call a; jmp a
  case 0x88: // movb a, r
  case 0x89: // movl a, r
  case 0x8A: // movb r, a
  case 0x8B: // movl r, a
  case 0x8F: // popl a
473
    debug_only(has_disp32 = true);
D
duke 已提交
474 475 476 477 478 479
    break;

  case 0x68: // pushq #32
    if (which == end_pc_operand) {
      return ip + 4;
    }
480 481
    assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
    return ip;                  // not produced by emit_operand
D
duke 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

  case 0x66: // movw ... (size prefix)
    again_after_size_prefix2:
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
502
      NOT_LP64(assert(false, "64bit prefix found"));
D
duke 已提交
503 504 505
      goto again_after_size_prefix2;
    case 0x8B: // movw r, a
    case 0x89: // movw a, r
506
      debug_only(has_disp32 = true);
D
duke 已提交
507 508
      break;
    case 0xC7: // movw a, #16
509
      debug_only(has_disp32 = true);
D
duke 已提交
510 511 512 513 514 515 516 517 518 519 520 521
      tail_size = 2;  // the imm16
      break;
    case 0x0F: // several SSE/SSE2 variants
      ip--;    // reparse the 0x0F
      goto again_after_prefix;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP8(0xB8): // movl/q r, #32/#64(oop?)
    if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
522 523
    // these asserts are somewhat nonsensical
#ifndef _LP64
524 525
    assert(which == imm_operand || which == disp32_operand,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
526 527
#else
    assert((which == call32_operand || which == imm_operand) && is_64bit ||
528 529
           which == narrow_oop_operand && !is_64bit,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
530
#endif // _LP64
D
duke 已提交
531 532 533 534 535 536 537 538 539 540
    return ip;

  case 0x69: // imul r, a, #32
  case 0xC7: // movl a, #32(oop?)
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x0F: // movx..., etc.
    switch (0xFF & *ip++) {
K
kvn 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553
    case 0x3A: // pcmpestri
      tail_size = 1;
    case 0x38: // ptest, pmovzxbw
      ip++; // skip opcode
      debug_only(has_disp32 = true); // has both kinds of operands!
      break;

    case 0x70: // pshufd r, r/a, #8
      debug_only(has_disp32 = true); // has both kinds of operands!
    case 0x73: // psrldq r, #8
      tail_size = 1;
      break;

D
duke 已提交
554 555 556 557 558
    case 0x12: // movlps
    case 0x28: // movaps
    case 0x2E: // ucomiss
    case 0x2F: // comiss
    case 0x54: // andps
559 560
    case 0x55: // andnps
    case 0x56: // orps
D
duke 已提交
561 562 563
    case 0x57: // xorps
    case 0x6E: // movd
    case 0x7E: // movd
K
kvn 已提交
564
    case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
565
      debug_only(has_disp32 = true);
D
duke 已提交
566
      break;
567

D
duke 已提交
568 569
    case 0xAD: // shrd r, a, %cl
    case 0xAF: // imul r, a
570 571 572 573
    case 0xBE: // movsbl r, a (movsxb)
    case 0xBF: // movswl r, a (movsxw)
    case 0xB6: // movzbl r, a (movzxb)
    case 0xB7: // movzwl r, a (movzxw)
D
duke 已提交
574 575 576 577 578 579 580 581 582
    case REP16(0x40): // cmovl cc, r, a
    case 0xB0: // cmpxchgb
    case 0xB1: // cmpxchg
    case 0xC1: // xaddl
    case 0xC7: // cmpxchg8
    case REP16(0x90): // setcc a
      debug_only(has_disp32 = true);
      // fall out of the switch to decode the address
      break;
583

K
kvn 已提交
584 585 586 587 588 589
    case 0xC4: // pinsrw r, a, #8
      debug_only(has_disp32 = true);
    case 0xC5: // pextrw r, r, #8
      tail_size = 1;  // the imm8
      break;

D
duke 已提交
590 591 592 593
    case 0xAC: // shrd r, a, #8
      debug_only(has_disp32 = true);
      tail_size = 1;  // the imm8
      break;
594

D
duke 已提交
595 596
    case REP16(0x80): // jcc rdisp32
      if (which == end_pc_operand)  return ip + 4;
597
      assert(which == call32_operand, "jcc has no disp32 or imm");
D
duke 已提交
598 599 600 601 602 603 604 605
      return ip;
    default:
      ShouldNotReachHere();
    }
    break;

  case 0x81: // addl a, #32; addl r, #32
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
606
    // on 32bit in the case of cmpl, the imm might be an oop
D
duke 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0x83: // addl a, #8; addl r, #8
    // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1;
    break;

  case 0x9B:
    switch (0xFF & *ip++) {
    case 0xD9: // fnstcw a
      debug_only(has_disp32 = true);
      break;
    default:
      ShouldNotReachHere();
    }
    break;

  case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
  case REP4(0x10): // adc...
  case REP4(0x20): // and...
  case REP4(0x30): // xor...
  case REP4(0x08): // or...
  case REP4(0x18): // sbb...
  case REP4(0x28): // sub...
  case 0xF7: // mull a
635
  case 0x8D: // lea r, a
D
duke 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649
  case 0x87: // xchg r, a
  case REP4(0x38): // cmp...
  case 0x85: // test r, a
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
  case 0xC6: // movb a, #8
  case 0x80: // cmpb a, #8
  case 0x6B: // imul r, a, #8
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1; // the imm8
    break;

K
kvn 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
  case 0xC4: // VEX_3bytes
  case 0xC5: // VEX_2bytes
    assert((UseAVX > 0), "shouldn't have VEX prefix");
    assert(ip == inst+1, "no prefixes allowed");
    // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
    // but they have prefix 0x0F and processed when 0x0F processed above.
    //
    // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
    // instructions (these instructions are not supported in 64-bit mode).
    // To distinguish them bits [7:6] are set in the VEX second byte since
    // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
    // those VEX bits REX and vvvv bits are inverted.
    //
    // Fortunately C2 doesn't generate these instructions so we don't need
    // to check for them in product version.

    // Check second byte
    NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));

    // First byte
    if ((0xFF & *inst) == VEX_3bytes) {
      ip++; // third byte
      is_64bit = ((VEX_W & *ip) == VEX_W);
    }
    ip++; // opcode
    // To find the end of instruction (which == end_pc_operand).
    switch (0xFF & *ip) {
    case 0x61: // pcmpestri r, r/a, #8
    case 0x70: // pshufd r, r/a, #8
    case 0x73: // psrldq r, #8
      tail_size = 1;  // the imm8
      break;
    default:
      break;
    }
    ip++; // skip opcode
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
D
duke 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700

  case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
  case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
  case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
  case 0xDD: // fld_d a; fst_d a; fstp_d a
  case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
  case 0xDF: // fild_d a; fistp_d a
  case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
  case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
  case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
    debug_only(has_disp32 = true);
    break;

K
kvn 已提交
701 702 703 704 705 706
  case 0xE8: // call rdisp32
  case 0xE9: // jmp  rdisp32
    if (which == end_pc_operand)  return ip + 4;
    assert(which == call32_operand, "call has no disp32 or imm");
    return ip;

707 708 709 710
  case 0xF0:                    // Lock
    assert(os::is_MP(), "only on MP");
    goto again_after_prefix;

D
duke 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
  case 0xF3:                    // For SSE
  case 0xF2:                    // For SSE2
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
730
      NOT_LP64(assert(false, "found 64bit prefix"));
D
duke 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
      ip++;
    default:
      ip++;
    }
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;

  default:
    ShouldNotReachHere();

#undef REP8
#undef REP16
  }

  assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
746 747 748 749 750 751
#ifdef _LP64
  assert(which != imm_operand, "instruction is not a movq reg, imm64");
#else
  // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
  assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
#endif // LP64
D
duke 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
  assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");

  // parse the output of emit_operand
  int op2 = 0xFF & *ip++;
  int base = op2 & 0x07;
  int op3 = -1;
  const int b100 = 4;
  const int b101 = 5;
  if (base == b100 && (op2 >> 6) != 3) {
    op3 = 0xFF & *ip++;
    base = op3 & 0x07;   // refetch the base
  }
  // now ip points at the disp (if any)

  switch (op2 >> 6) {
  case 0:
    // [00 reg  100][ss index base]
    // [00 reg  100][00   100  esp]
    // [00 reg base]
    // [00 reg  100][ss index  101][disp32]
    // [00 reg  101]               [disp32]

    if (base == b101) {
      if (which == disp32_operand)
        return ip;              // caller wants the disp32
      ip += 4;                  // skip the disp32
    }
    break;

  case 1:
    // [01 reg  100][ss index base][disp8]
    // [01 reg  100][00   100  esp][disp8]
    // [01 reg base]               [disp8]
    ip += 1;                    // skip the disp8
    break;

  case 2:
    // [10 reg  100][ss index base][disp32]
    // [10 reg  100][00   100  esp][disp32]
    // [10 reg base]               [disp32]
    if (which == disp32_operand)
      return ip;                // caller wants the disp32
    ip += 4;                    // skip the disp32
    break;

  case 3:
    // [11 reg base]  (not a memory addressing mode)
    break;
  }

  if (which == end_pc_operand) {
    return ip + tail_size;
  }

806
#ifdef _LP64
807
  assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
808 809 810
#else
  assert(which == imm_operand, "instruction has only an imm field");
#endif // LP64
D
duke 已提交
811 812 813 814 815 816 817 818
  return ip;
}

address Assembler::locate_next_instruction(address inst) {
  // Secretly share code with locate_operand:
  return locate_operand(inst, end_pc_operand);
}

819

D
duke 已提交
820 821 822
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
  address inst = inst_mark();
823
  assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
D
duke 已提交
824 825 826 827 828 829
  address opnd;

  Relocation* r = rspec.reloc();
  if (r->type() == relocInfo::none) {
    return;
  } else if (r->is_call() || format == call32_operand) {
830
    // assert(format == imm32_operand, "cannot specify a nonzero format");
D
duke 已提交
831 832
    opnd = locate_operand(inst, call32_operand);
  } else if (r->is_data()) {
833 834 835
    assert(format == imm_operand || format == disp32_operand
           LP64_ONLY(|| format == narrow_oop_operand), "format ok");
    opnd = locate_operand(inst, (WhichOperand)format);
D
duke 已提交
836
  } else {
837
    assert(format == imm_operand, "cannot specify a format");
D
duke 已提交
838 839 840 841
    return;
  }
  assert(opnd == pc(), "must put operand where relocs can find it");
}
842
#endif // ASSERT
D
duke 已提交
843

844 845 846 847 848
void Assembler::emit_operand32(Register reg, Address adr) {
  assert(reg->encoding() < 8, "no extended registers");
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec);
D
duke 已提交
849 850 851 852 853 854 855 856 857
}

void Assembler::emit_operand(Register reg, Address adr,
                             int rip_relative_correction) {
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec,
               rip_relative_correction);
}

858
void Assembler::emit_operand(XMMRegister reg, Address adr) {
D
duke 已提交
859
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
860 861 862 863 864 865 866 867 868 869 870 871 872
               adr._rspec);
}

// MMX operations
void Assembler::emit_operand(MMXRegister reg, Address adr) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}

// work around gcc (3.2.1-7a) bug
void Assembler::emit_operand(Address adr, MMXRegister reg) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
D
duke 已提交
873 874
}

875

D
duke 已提交
876 877 878
void Assembler::emit_farith(int b1, int b2, int i) {
  assert(isByte(b1) && isByte(b2), "wrong opcode");
  assert(0 <= i &&  i < 8, "illegal stack offset");
879 880
  emit_int8(b1);
  emit_int8(b2 + i);
D
duke 已提交
881 882 883
}


884 885 886 887 888 889 890 891 892 893 894
// Now the Assembler instructions (identical for 32/64 bits)

void Assembler::adcl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rdx, dst, imm32);
}

void Assembler::adcl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
895
  emit_int8(0x11);
896 897
  emit_operand(src, dst);
}
D
duke 已提交
898

899 900 901
void Assembler::adcl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD0, dst, imm32);
D
duke 已提交
902 903
}

904 905 906
void Assembler::adcl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
907
  emit_int8(0x13);
908
  emit_operand(dst, src);
D
duke 已提交
909 910
}

911 912 913
void Assembler::adcl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x13, 0xC0, dst, src);
D
duke 已提交
914 915
}

916 917 918 919
void Assembler::addl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rax, dst, imm32);
D
duke 已提交
920 921
}

922 923 924
void Assembler::addl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
925
  emit_int8(0x01);
926
  emit_operand(src, dst);
D
duke 已提交
927 928
}

929 930 931 932
void Assembler::addl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC0, dst, imm32);
}
D
duke 已提交
933

934 935 936
void Assembler::addl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
937
  emit_int8(0x03);
938
  emit_operand(dst, src);
D
duke 已提交
939 940
}

941 942 943
void Assembler::addl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
D
duke 已提交
944 945
}

946
void Assembler::addr_nop_4() {
947
  assert(UseAddressNop, "no CPU support");
948
  // 4 bytes: NOP DWORD PTR [EAX+0]
949 950 951 952
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
953 954
}

955
void Assembler::addr_nop_5() {
956
  assert(UseAddressNop, "no CPU support");
957
  // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
958 959 960 961 962
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
D
duke 已提交
963 964
}

965
void Assembler::addr_nop_7() {
966
  assert(UseAddressNop, "no CPU support");
967
  // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
968 969 970 971
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x80);
                   // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
972
  emit_int32(0);   // 32-bits offset (4 bytes)
D
duke 已提交
973 974
}

975
void Assembler::addr_nop_8() {
976
  assert(UseAddressNop, "no CPU support");
977
  // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
978 979 980 981 982
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x84);
                   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
983
  emit_int32(0);   // 32-bits offset (4 bytes)
984 985 986 987
}

void Assembler::addsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
988
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
989 990 991 992
}

void Assembler::addsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
993
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
D
duke 已提交
994 995
}

996 997
void Assembler::addss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
998
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
999 1000 1001 1002
}

void Assembler::addss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1003
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1004 1005
}

1006 1007 1008 1009
void Assembler::aesdec(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1010
  emit_int8((unsigned char)0xDE);
1011 1012 1013 1014 1015 1016
  emit_operand(dst, src);
}

void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1017 1018
  emit_int8((unsigned char)0xDE);
  emit_int8(0xC0 | encode);
1019 1020 1021 1022 1023 1024
}

void Assembler::aesdeclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1025
  emit_int8((unsigned char)0xDF);
1026 1027 1028 1029 1030 1031
  emit_operand(dst, src);
}

void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1032 1033
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)(0xC0 | encode));
1034 1035 1036 1037 1038 1039
}

void Assembler::aesenc(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1040
  emit_int8((unsigned char)0xDC);
1041 1042 1043 1044 1045 1046
  emit_operand(dst, src);
}

void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1047 1048
  emit_int8((unsigned char)0xDC);
  emit_int8(0xC0 | encode);
1049 1050 1051 1052 1053 1054
}

void Assembler::aesenclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1055
  emit_int8((unsigned char)0xDD);
1056 1057 1058 1059 1060 1061
  emit_operand(dst, src);
}

void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
1062 1063
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)(0xC0 | encode));
1064 1065 1066
}


K
kvn 已提交
1067 1068 1069
void Assembler::andl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1070
  emit_int8((unsigned char)0x81);
K
kvn 已提交
1071
  emit_operand(rsp, dst, 4);
1072
  emit_int32(imm32);
K
kvn 已提交
1073 1074
}

1075
void Assembler::andl(Register dst, int32_t imm32) {
D
duke 已提交
1076
  prefix(dst);
1077
  emit_arith(0x81, 0xE0, dst, imm32);
D
duke 已提交
1078 1079
}

1080
void Assembler::andl(Register dst, Address src) {
D
duke 已提交
1081
  InstructionMark im(this);
1082
  prefix(src, dst);
1083
  emit_int8(0x23);
1084
  emit_operand(dst, src);
D
duke 已提交
1085 1086
}

1087 1088 1089
void Assembler::andl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x23, 0xC0, dst, src);
D
duke 已提交
1090 1091
}

1092 1093
void Assembler::bsfl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1094 1095 1096
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
1097 1098 1099 1100 1101
}

void Assembler::bsrl(Register dst, Register src) {
  assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1102 1103 1104
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1105 1106
}

1107 1108
void Assembler::bswapl(Register reg) { // bswap
  int encode = prefix_and_encode(reg->encoding());
1109 1110
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
}

void Assembler::call(Label& L, relocInfo::relocType rtype) {
  // suspect disp32 is always good
  int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);

  if (L.is_bound()) {
    const int long_size = 5;
    int offs = (int)( target(L) - pc() );
    assert(offs <= 0, "assembler error");
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
1123
    emit_int8((unsigned char)0xE8);
1124 1125 1126 1127 1128 1129
    emit_data(offs - long_size, rtype, operand);
  } else {
    InstructionMark im(this);
    // 1110 1000 #32-bit disp
    L.add_patch_at(code(), locator());

1130
    emit_int8((unsigned char)0xE8);
1131 1132 1133 1134 1135
    emit_data(int(0), rtype, operand);
  }
}

void Assembler::call(Register dst) {
K
kvn 已提交
1136
  int encode = prefix_and_encode(dst->encoding());
1137 1138
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xD0 | encode));
1139 1140 1141 1142
}


void Assembler::call(Address adr) {
D
duke 已提交
1143
  InstructionMark im(this);
1144
  prefix(adr);
1145
  emit_int8((unsigned char)0xFF);
1146
  emit_operand(rdx, adr);
D
duke 已提交
1147 1148
}

1149 1150 1151
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  assert(entry != NULL, "call most probably wrong");
  InstructionMark im(this);
1152
  emit_int8((unsigned char)0xE8);
1153
  intptr_t disp = entry - (pc() + sizeof(int32_t));
1154 1155 1156 1157 1158 1159
  assert(is_simm32(disp), "must be 32bit offset (call2)");
  // Technically, should use call32_operand, but this format is
  // implied by the fact that we're emitting a call instruction.

  int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
  emit_data((int) disp, rspec, operand);
D
duke 已提交
1160 1161
}

1162
void Assembler::cdql() {
1163
  emit_int8((unsigned char)0x99);
1164 1165
}

1166
void Assembler::cld() {
1167
  emit_int8((unsigned char)0xFC);
1168 1169
}

1170 1171
void Assembler::cmovl(Condition cc, Register dst, Register src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1172
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1173 1174 1175
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1176 1177
}

1178 1179 1180

void Assembler::cmovl(Condition cc, Register dst, Address src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
D
duke 已提交
1181
  prefix(src, dst);
1182 1183
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
D
duke 已提交
1184 1185 1186
  emit_operand(dst, src);
}

1187
void Assembler::cmpb(Address dst, int imm8) {
D
duke 已提交
1188 1189
  InstructionMark im(this);
  prefix(dst);
1190
  emit_int8((unsigned char)0x80);
1191
  emit_operand(rdi, dst, 1);
1192
  emit_int8(imm8);
D
duke 已提交
1193 1194
}

1195
void Assembler::cmpl(Address dst, int32_t imm32) {
D
duke 已提交
1196
  InstructionMark im(this);
1197
  prefix(dst);
1198
  emit_int8((unsigned char)0x81);
1199
  emit_operand(rdi, dst, 4);
1200
  emit_int32(imm32);
D
duke 已提交
1201 1202
}

1203 1204 1205
void Assembler::cmpl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF8, dst, imm32);
D
duke 已提交
1206 1207
}

1208 1209 1210
void Assembler::cmpl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
D
duke 已提交
1211 1212 1213
}


1214
void Assembler::cmpl(Register dst, Address  src) {
D
duke 已提交
1215
  InstructionMark im(this);
1216
  prefix(src, dst);
1217
  emit_int8((unsigned char)0x3B);
D
duke 已提交
1218 1219 1220
  emit_operand(dst, src);
}

1221
void Assembler::cmpw(Address dst, int imm16) {
D
duke 已提交
1222
  InstructionMark im(this);
1223
  assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1224 1225
  emit_int8(0x66);
  emit_int8((unsigned char)0x81);
1226
  emit_operand(rdi, dst, 2);
1227
  emit_int16(imm16);
D
duke 已提交
1228 1229
}

1230 1231 1232 1233
// The 32-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
C
coleenp 已提交
1234 1235
  InstructionMark im(this);
  prefix(adr, reg);
1236 1237
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
C
coleenp 已提交
1238
  emit_operand(reg, adr);
D
duke 已提交
1239 1240
}

1241 1242 1243 1244
void Assembler::comisd(XMMRegister dst, Address src) {
  // NOTE: dbx seems to decode this as comiss even though the
  // 0x66 is there. Strangly ucomisd comes out correct
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1245
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
K
kvn 已提交
1246 1247 1248 1249
}

void Assembler::comisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1250
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
D
duke 已提交
1251 1252
}

1253 1254
void Assembler::comiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1255
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1256 1257
}

K
kvn 已提交
1258 1259
void Assembler::comiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1260
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
K
kvn 已提交
1261 1262
}

1263
void Assembler::cpuid() {
1264 1265
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA2);
1266 1267
}

1268 1269
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1270
  emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
D
duke 已提交
1271 1272
}

1273 1274
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1275
  emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
D
duke 已提交
1276 1277
}

1278 1279
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1280
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
D
duke 已提交
1281 1282
}

K
kvn 已提交
1283 1284
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1285
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1286 1287
}

1288 1289
void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1290
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
1291 1292
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1293 1294
}

K
kvn 已提交
1295 1296
void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1297
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
K
kvn 已提交
1298 1299
}

1300 1301
void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1302
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
1303 1304
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1305 1306
}

K
kvn 已提交
1307 1308
void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1309
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1310 1311
}

1312 1313
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1314
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
D
duke 已提交
1315 1316
}

K
kvn 已提交
1317 1318
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1319
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
K
kvn 已提交
1320 1321 1322
}


1323 1324
void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1325
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
1326 1327
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1328 1329
}

1330 1331
void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
1332
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
1333 1334
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1335 1336
}

1337 1338 1339 1340
void Assembler::decl(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrement() instead.
  InstructionMark im(this);
  prefix(dst);
1341
  emit_int8((unsigned char)0xFF);
1342
  emit_operand(rcx, dst);
D
duke 已提交
1343 1344
}

1345 1346
void Assembler::divsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1347
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1348 1349
}

1350 1351
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1352
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
D
duke 已提交
1353 1354
}

1355 1356
void Assembler::divss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1357
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1358 1359
}

1360 1361
void Assembler::divss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1362
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
D
duke 已提交
1363 1364
}

1365 1366
void Assembler::emms() {
  NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1367 1368
  emit_int8(0x0F);
  emit_int8(0x77);
D
duke 已提交
1369 1370
}

1371
void Assembler::hlt() {
1372
  emit_int8((unsigned char)0xF4);
D
duke 已提交
1373 1374
}

1375 1376
void Assembler::idivl(Register src) {
  int encode = prefix_and_encode(src->encoding());
1377 1378
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
D
duke 已提交
1379 1380
}

1381 1382
void Assembler::divl(Register src) { // Unsigned
  int encode = prefix_and_encode(src->encoding());
1383 1384
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF0 | encode));
1385 1386
}

1387
void Assembler::imull(Register dst, Register src) {
D
duke 已提交
1388
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1389 1390 1391
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1392 1393 1394
}


1395
void Assembler::imull(Register dst, Register src, int value) {
D
duke 已提交
1396
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1397
  if (is8bit(value)) {
1398 1399 1400
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
1401
  } else {
1402 1403
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
1404
    emit_int32(value);
1405
  }
D
duke 已提交
1406 1407
}

1408 1409 1410 1411 1412 1413 1414 1415 1416
void Assembler::imull(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}


1417 1418
void Assembler::incl(Address dst) {
  // Don't use it directly. Use MacroAssembler::increment() instead.
D
duke 已提交
1419
  InstructionMark im(this);
1420
  prefix(dst);
1421
  emit_int8((unsigned char)0xFF);
1422
  emit_operand(rax, dst);
D
duke 已提交
1423 1424
}

1425
void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
D
duke 已提交
1426
  InstructionMark im(this);
1427 1428 1429 1430 1431 1432 1433
  assert((0 <= cc) && (cc < 16), "illegal cc");
  if (L.is_bound()) {
    address dst = target(L);
    assert(dst != NULL, "jcc most probably wrong");

    const int short_size = 2;
    const int long_size = 6;
1434
    intptr_t offs = (intptr_t)dst - (intptr_t)pc();
1435
    if (maybe_short && is8bit(offs - short_size)) {
1436
      // 0111 tttn #8-bit disp
1437 1438
      emit_int8(0x70 | cc);
      emit_int8((offs - short_size) & 0xFF);
1439 1440 1441 1442
    } else {
      // 0000 1111 1000 tttn #32-bit disp
      assert(is_simm32(offs - long_size),
             "must be 32bit offset (call4)");
1443 1444
      emit_int8(0x0F);
      emit_int8((unsigned char)(0x80 | cc));
1445
      emit_int32(offs - long_size);
1446 1447 1448 1449 1450 1451 1452
    }
  } else {
    // Note: could eliminate cond. jumps to this jump if condition
    //       is the same however, seems to be rather unlikely case.
    // Note: use jccb() if label to be bound is very close to get
    //       an 8-bit displacement
    L.add_patch_at(code(), locator());
1453 1454
    emit_int8(0x0F);
    emit_int8((unsigned char)(0x80 | cc));
1455
    emit_int32(0);
1456
  }
D
duke 已提交
1457 1458
}

1459 1460 1461 1462
void Assembler::jccb(Condition cc, Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
1463
#ifdef ASSERT
1464
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1465 1466 1467 1468 1469 1470
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1471
    intptr_t offs = (intptr_t)entry - (intptr_t)pc();
1472
    // 0111 tttn #8-bit disp
1473 1474
    emit_int8(0x70 | cc);
    emit_int8((offs - short_size) & 0xFF);
1475 1476 1477
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1478 1479
    emit_int8(0x70 | cc);
    emit_int8(0);
1480
  }
D
duke 已提交
1481 1482
}

1483
void Assembler::jmp(Address adr) {
D
duke 已提交
1484
  InstructionMark im(this);
1485
  prefix(adr);
1486
  emit_int8((unsigned char)0xFF);
1487
  emit_operand(rsp, adr);
D
duke 已提交
1488 1489
}

1490
void Assembler::jmp(Label& L, bool maybe_short) {
1491 1492 1493 1494 1495 1496
  if (L.is_bound()) {
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
    InstructionMark im(this);
    const int short_size = 2;
    const int long_size = 5;
1497
    intptr_t offs = entry - pc();
1498
    if (maybe_short && is8bit(offs - short_size)) {
1499 1500
      emit_int8((unsigned char)0xEB);
      emit_int8((offs - short_size) & 0xFF);
1501
    } else {
1502
      emit_int8((unsigned char)0xE9);
1503
      emit_int32(offs - long_size);
1504 1505 1506 1507 1508 1509 1510 1511
    }
  } else {
    // By default, forward jumps are always 32-bit displacements, since
    // we can't yet know where the label will be bound.  If you're sure that
    // the forward jump will not run beyond 256 bytes, use jmpb to
    // force an 8-bit displacement.
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1512
    emit_int8((unsigned char)0xE9);
1513
    emit_int32(0);
1514
  }
D
duke 已提交
1515 1516
}

1517 1518
void Assembler::jmp(Register entry) {
  int encode = prefix_and_encode(entry->encoding());
1519 1520
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
1521 1522
}

1523
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
D
duke 已提交
1524
  InstructionMark im(this);
1525
  emit_int8((unsigned char)0xE9);
1526
  assert(dest != NULL, "must have a target");
1527
  intptr_t disp = dest - (pc() + sizeof(int32_t));
1528 1529
  assert(is_simm32(disp), "must be 32bit offset (jmp)");
  emit_data(disp, rspec.reloc(), call32_operand);
D
duke 已提交
1530 1531
}

1532 1533 1534 1535 1536
void Assembler::jmpb(Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
1537
#ifdef ASSERT
1538
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1539 1540 1541 1542 1543 1544
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
1545
    intptr_t offs = entry - pc();
1546 1547
    emit_int8((unsigned char)0xEB);
    emit_int8((offs - short_size) & 0xFF);
1548 1549 1550
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
1551 1552
    emit_int8((unsigned char)0xEB);
    emit_int8(0);
1553
  }
D
duke 已提交
1554 1555
}

1556 1557
void Assembler::ldmxcsr( Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
D
duke 已提交
1558
  InstructionMark im(this);
1559
  prefix(src);
1560 1561
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
1562
  emit_operand(as_Register(2), src);
D
duke 已提交
1563 1564
}

1565
void Assembler::leal(Register dst, Address src) {
D
duke 已提交
1566
  InstructionMark im(this);
1567
#ifdef _LP64
1568
  emit_int8(0x67); // addr32
1569 1570
  prefix(src, dst);
#endif // LP64
1571
  emit_int8((unsigned char)0x8D);
1572
  emit_operand(dst, src);
D
duke 已提交
1573 1574
}

1575
void Assembler::lfence() {
1576 1577 1578
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xE8);
1579 1580
}

1581
void Assembler::lock() {
1582
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1583 1584
}

1585 1586
void Assembler::lzcntl(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
1587
  emit_int8((unsigned char)0xF3);
1588
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1589 1590 1591
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
1592 1593
}

1594
// Emit mfence instruction
1595
void Assembler::mfence() {
1596
  NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1597 1598 1599
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xF0);
D
duke 已提交
1600 1601
}

1602 1603
void Assembler::mov(Register dst, Register src) {
  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
D
duke 已提交
1604 1605
}

1606 1607
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1608
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
D
duke 已提交
1609 1610
}

1611 1612
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1613
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
1614 1615
}

1616 1617 1618
void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
1619 1620
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
1621 1622
}

1623 1624
void Assembler::movb(Register dst, Address src) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
D
duke 已提交
1625
  InstructionMark im(this);
1626
  prefix(src, dst, true);
1627
  emit_int8((unsigned char)0x8A);
D
duke 已提交
1628 1629 1630 1631
  emit_operand(dst, src);
}


1632
void Assembler::movb(Address dst, int imm8) {
D
duke 已提交
1633
  InstructionMark im(this);
1634
   prefix(dst);
1635
  emit_int8((unsigned char)0xC6);
1636
  emit_operand(rax, dst, 1);
1637
  emit_int8(imm8);
D
duke 已提交
1638 1639
}

1640 1641 1642

void Assembler::movb(Address dst, Register src) {
  assert(src->has_byte_register(), "must have byte register");
D
duke 已提交
1643
  InstructionMark im(this);
1644
  prefix(dst, src, true);
1645
  emit_int8((unsigned char)0x88);
D
duke 已提交
1646 1647 1648
  emit_operand(src, dst);
}

1649 1650
void Assembler::movdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
1651
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
1652 1653
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1654 1655
}

1656 1657 1658
void Assembler::movdl(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // swap src/dst to get correct prefix
K
kvn 已提交
1659
  int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
1660 1661
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
1662 1663
}

1664 1665 1666
void Assembler::movdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1667
  simd_prefix(dst, src, VEX_SIMD_66);
1668
  emit_int8(0x6E);
1669 1670 1671
  emit_operand(dst, src);
}

1672 1673 1674 1675
void Assembler::movdl(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
1676
  emit_int8(0x7E);
1677 1678 1679
  emit_operand(src, dst);
}

1680 1681
void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1682
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
D
duke 已提交
1683 1684
}

1685 1686 1687 1688 1689
void Assembler::movdqa(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}

1690 1691
void Assembler::movdqu(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1692
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1693 1694 1695 1696
}

void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1697
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1698 1699 1700 1701 1702
}

void Assembler::movdqu(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
K
kvn 已提交
1703
  simd_prefix(dst, src, VEX_SIMD_F3);
1704
  emit_int8(0x7F);
1705 1706 1707
  emit_operand(src, dst);
}

1708 1709 1710 1711 1712
// Move Unaligned 256bit Vector
void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
  assert(UseAVX, "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1713 1714
  emit_int8(0x6F);
  emit_int8((unsigned char)(0xC0 | encode));
1715 1716 1717 1718 1719 1720 1721
}

void Assembler::vmovdqu(XMMRegister dst, Address src) {
  assert(UseAVX, "");
  InstructionMark im(this);
  bool vector256 = true;
  vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1722
  emit_int8(0x6F);
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
  emit_operand(dst, src);
}

void Assembler::vmovdqu(Address dst, XMMRegister src) {
  assert(UseAVX, "");
  InstructionMark im(this);
  bool vector256 = true;
  // swap src<->dst for encoding
  assert(src != xnoreg, "sanity");
  vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
1733
  emit_int8(0x7F);
1734 1735 1736
  emit_operand(src, dst);
}

1737 1738 1739 1740
// Uses zero extension on 64bit

void Assembler::movl(Register dst, int32_t imm32) {
  int encode = prefix_and_encode(dst->encoding());
1741
  emit_int8((unsigned char)(0xB8 | encode));
1742
  emit_int32(imm32);
D
duke 已提交
1743 1744
}

1745 1746
void Assembler::movl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1747 1748
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1749 1750
}

1751
void Assembler::movl(Register dst, Address src) {
D
duke 已提交
1752
  InstructionMark im(this);
1753
  prefix(src, dst);
1754
  emit_int8((unsigned char)0x8B);
D
duke 已提交
1755 1756 1757
  emit_operand(dst, src);
}

1758 1759 1760
void Assembler::movl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
1761
  emit_int8((unsigned char)0xC7);
1762
  emit_operand(rax, dst, 4);
1763
  emit_int32(imm32);
D
duke 已提交
1764 1765
}

1766 1767 1768
void Assembler::movl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
1769
  emit_int8((unsigned char)0x89);
1770
  emit_operand(src, dst);
D
duke 已提交
1771 1772
}

1773 1774 1775 1776 1777
// New cpus require to use movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
void Assembler::movlpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1778
  emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
D
duke 已提交
1779 1780
}

1781 1782
void Assembler::movq( MMXRegister dst, Address src ) {
  assert( VM_Version::supports_mmx(), "" );
1783 1784
  emit_int8(0x0F);
  emit_int8(0x6F);
1785
  emit_operand(dst, src);
D
duke 已提交
1786 1787
}

1788 1789
void Assembler::movq( Address dst, MMXRegister src ) {
  assert( VM_Version::supports_mmx(), "" );
1790 1791
  emit_int8(0x0F);
  emit_int8(0x7F);
1792 1793 1794 1795 1796 1797 1798
  // workaround gcc (3.2.1-7a) bug
  // In that version of gcc with only an emit_operand(MMX, Address)
  // gcc will tail jump and try and reverse the parameters completely
  // obliterating dst in the process. By having a version available
  // that doesn't need to swap the args at the tail jump the bug is
  // avoided.
  emit_operand(dst, src);
D
duke 已提交
1799 1800
}

1801 1802
void Assembler::movq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1803
  InstructionMark im(this);
K
kvn 已提交
1804
  simd_prefix(dst, src, VEX_SIMD_F3);
1805
  emit_int8(0x7E);
D
duke 已提交
1806 1807 1808
  emit_operand(dst, src);
}

1809 1810
void Assembler::movq(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1811
  InstructionMark im(this);
K
kvn 已提交
1812
  simd_prefix(dst, src, VEX_SIMD_66);
1813
  emit_int8((unsigned char)0xD6);
1814
  emit_operand(src, dst);
D
duke 已提交
1815 1816
}

1817
void Assembler::movsbl(Register dst, Address src) { // movsxb
D
duke 已提交
1818
  InstructionMark im(this);
1819
  prefix(src, dst);
1820 1821
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
1822
  emit_operand(dst, src);
D
duke 已提交
1823 1824
}

1825 1826 1827
void Assembler::movsbl(Register dst, Register src) { // movsxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1828 1829 1830
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1831 1832
}

1833 1834
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1835
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1836 1837
}

1838 1839
void Assembler::movsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1840
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
D
duke 已提交
1841 1842
}

1843 1844
void Assembler::movsd(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
D
duke 已提交
1845
  InstructionMark im(this);
K
kvn 已提交
1846
  simd_prefix(dst, src, VEX_SIMD_F2);
1847
  emit_int8(0x11);
1848
  emit_operand(src, dst);
D
duke 已提交
1849 1850
}

1851 1852
void Assembler::movss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1853
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1854 1855
}

1856 1857
void Assembler::movss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1858
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
D
duke 已提交
1859 1860
}

1861 1862 1863
void Assembler::movss(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
K
kvn 已提交
1864
  simd_prefix(dst, src, VEX_SIMD_F3);
1865
  emit_int8(0x11);
1866
  emit_operand(src, dst);
D
duke 已提交
1867 1868
}

1869
void Assembler::movswl(Register dst, Address src) { // movsxw
D
duke 已提交
1870
  InstructionMark im(this);
1871
  prefix(src, dst);
1872 1873
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
D
duke 已提交
1874 1875 1876
  emit_operand(dst, src);
}

1877
void Assembler::movswl(Register dst, Register src) { // movsxw
D
duke 已提交
1878
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1879 1880 1881
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
1882 1883
}

1884 1885
void Assembler::movw(Address dst, int imm16) {
  InstructionMark im(this);
D
duke 已提交
1886

1887
  emit_int8(0x66); // switch to 16-bit mode
1888
  prefix(dst);
1889
  emit_int8((unsigned char)0xC7);
1890
  emit_operand(rax, dst, 2);
1891
  emit_int16(imm16);
D
duke 已提交
1892 1893
}

1894
void Assembler::movw(Register dst, Address src) {
D
duke 已提交
1895
  InstructionMark im(this);
1896
  emit_int8(0x66);
1897
  prefix(src, dst);
1898
  emit_int8((unsigned char)0x8B);
1899
  emit_operand(dst, src);
D
duke 已提交
1900 1901
}

1902 1903
void Assembler::movw(Address dst, Register src) {
  InstructionMark im(this);
1904
  emit_int8(0x66);
1905
  prefix(dst, src);
1906
  emit_int8((unsigned char)0x89);
1907
  emit_operand(src, dst);
D
duke 已提交
1908 1909
}

1910
void Assembler::movzbl(Register dst, Address src) { // movzxb
D
duke 已提交
1911
  InstructionMark im(this);
1912
  prefix(src, dst);
1913 1914
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
1915
  emit_operand(dst, src);
D
duke 已提交
1916 1917
}

1918 1919 1920
void Assembler::movzbl(Register dst, Register src) { // movzxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1921 1922 1923
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
D
duke 已提交
1924 1925
}

1926 1927 1928
void Assembler::movzwl(Register dst, Address src) { // movzxw
  InstructionMark im(this);
  prefix(src, dst);
1929 1930
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
1931
  emit_operand(dst, src);
D
duke 已提交
1932 1933
}

1934
void Assembler::movzwl(Register dst, Register src) { // movzxw
D
duke 已提交
1935
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
1936 1937 1938
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8(0xC0 | encode);
D
duke 已提交
1939 1940 1941 1942 1943
}

void Assembler::mull(Address src) {
  InstructionMark im(this);
  prefix(src);
1944
  emit_int8((unsigned char)0xF7);
D
duke 已提交
1945 1946 1947 1948 1949
  emit_operand(rsp, src);
}

void Assembler::mull(Register src) {
  int encode = prefix_and_encode(src->encoding());
1950 1951
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
D
duke 已提交
1952 1953
}

1954 1955
void Assembler::mulsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1956
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
1957 1958
}

1959 1960
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1961
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
D
duke 已提交
1962 1963
}

1964 1965
void Assembler::mulss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1966
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
1967 1968
}

1969 1970
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
1971
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
D
duke 已提交
1972 1973
}

1974
void Assembler::negl(Register dst) {
D
duke 已提交
1975
  int encode = prefix_and_encode(dst->encoding());
1976 1977
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
D
duke 已提交
1978 1979
}

1980 1981 1982 1983 1984 1985 1986
void Assembler::nop(int i) {
#ifdef ASSERT
  assert(i > 0, " ");
  // The fancy nops aren't currently recognized by debuggers making it a
  // pain to disassemble code while debugging. If asserts are on clearly
  // speed is not an issue so simply use the single byte traditional nop
  // to do alignment.
D
duke 已提交
1987

1988
  for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
1989
  return;
D
duke 已提交
1990

1991
#endif // ASSERT
D
duke 已提交
1992

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
  if (UseAddressNop && VM_Version::is_intel()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
D
duke 已提交
2007

2008
    // The rest coding is Intel specific - don't use consecutive address nops
D
duke 已提交
2009

2010 2011 2012 2013
    // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
    // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
D
duke 已提交
2014

2015 2016 2017
    while(i >= 15) {
      // For Intel don't generate consecutive addess nops (mix with regular nops)
      i -= 15;
2018 2019 2020
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
2021
      addr_nop_8();
2022 2023 2024 2025 2026
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8((unsigned char)0x90);
                         // nop
2027 2028 2029
    }
    switch (i) {
      case 14:
2030
        emit_int8(0x66); // size prefix
2031
      case 13:
2032
        emit_int8(0x66); // size prefix
2033 2034
      case 12:
        addr_nop_8();
2035 2036 2037 2038 2039
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8((unsigned char)0x90);
                         // nop
2040 2041
        break;
      case 11:
2042
        emit_int8(0x66); // size prefix
2043
      case 10:
2044
        emit_int8(0x66); // size prefix
2045
      case 9:
2046
        emit_int8(0x66); // size prefix
2047 2048 2049 2050 2051 2052 2053
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2054
        emit_int8(0x66); // size prefix
2055 2056 2057 2058 2059 2060 2061 2062
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2063
        emit_int8(0x66); // size prefix
2064
      case 2:
2065
        emit_int8(0x66); // size prefix
2066
      case 1:
2067 2068
        emit_int8((unsigned char)0x90);
                         // nop
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }
  if (UseAddressNop && VM_Version::is_amd()) {
    //
    // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
    //  1: 0x90
    //  2: 0x66 0x90
    //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
    //  4: 0x0F 0x1F 0x40 0x00
    //  5: 0x0F 0x1F 0x44 0x00 0x00
    //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
    //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00

    // The rest coding is AMD specific - use consecutive address nops

    // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
    // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
    // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
    //     Size prefixes (0x66) are added for larger sizes

    while(i >= 22) {
      i -= 11;
2101 2102 2103
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
2104 2105 2106 2107 2108 2109
      addr_nop_8();
    }
    // Generate first nop for size between 21-12
    switch (i) {
      case 21:
        i -= 1;
2110
        emit_int8(0x66); // size prefix
2111 2112 2113
      case 20:
      case 19:
        i -= 1;
2114
        emit_int8(0x66); // size prefix
2115 2116 2117
      case 18:
      case 17:
        i -= 1;
2118
        emit_int8(0x66); // size prefix
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
      case 16:
      case 15:
        i -= 8;
        addr_nop_8();
        break;
      case 14:
      case 13:
        i -= 7;
        addr_nop_7();
        break;
      case 12:
        i -= 6;
2131
        emit_int8(0x66); // size prefix
2132 2133 2134 2135 2136 2137 2138 2139 2140
        addr_nop_5();
        break;
      default:
        assert(i < 12, " ");
    }

    // Generate second nop for size between 11-1
    switch (i) {
      case 11:
2141
        emit_int8(0x66); // size prefix
2142
      case 10:
2143
        emit_int8(0x66); // size prefix
2144
      case 9:
2145
        emit_int8(0x66); // size prefix
2146 2147 2148 2149 2150 2151 2152
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
2153
        emit_int8(0x66); // size prefix
2154 2155 2156 2157 2158 2159 2160 2161
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2162
        emit_int8(0x66); // size prefix
2163
      case 2:
2164
        emit_int8(0x66); // size prefix
2165
      case 1:
2166 2167
        emit_int8((unsigned char)0x90);
                         // nop
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }

  // Using nops with size prefixes "0x66 0x90".
  // From AMD Optimization Guide:
  //  1: 0x90
  //  2: 0x66 0x90
  //  3: 0x66 0x66 0x90
  //  4: 0x66 0x66 0x66 0x90
  //  5: 0x66 0x66 0x90 0x66 0x90
  //  6: 0x66 0x66 0x90 0x66 0x66 0x90
  //  7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
  //  8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
  //  9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  //
  while(i > 12) {
    i -= 4;
2190 2191 2192 2193 2194
    emit_int8(0x66); // size prefix
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
                     // nop
2195 2196 2197 2198 2199
  }
  // 1 - 12 nops
  if(i > 8) {
    if(i > 9) {
      i -= 1;
2200
      emit_int8(0x66);
2201 2202
    }
    i -= 3;
2203 2204 2205
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2206 2207 2208 2209 2210
  }
  // 1 - 8 nops
  if(i > 4) {
    if(i > 6) {
      i -= 1;
2211
      emit_int8(0x66);
2212 2213
    }
    i -= 3;
2214 2215 2216
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
2217 2218 2219
  }
  switch (i) {
    case 4:
2220
      emit_int8(0x66);
2221
    case 3:
2222
      emit_int8(0x66);
2223
    case 2:
2224
      emit_int8(0x66);
2225
    case 1:
2226
      emit_int8((unsigned char)0x90);
2227 2228 2229 2230
      break;
    default:
      assert(i == 0, " ");
  }
D
duke 已提交
2231 2232
}

2233 2234
void Assembler::notl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2235 2236
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
D
duke 已提交
2237 2238
}

2239
void Assembler::orl(Address dst, int32_t imm32) {
D
duke 已提交
2240
  InstructionMark im(this);
2241
  prefix(dst);
2242
  emit_arith_operand(0x81, rcx, dst, imm32);
D
duke 已提交
2243 2244
}

2245 2246 2247
void Assembler::orl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC8, dst, imm32);
D
duke 已提交
2248 2249
}

2250
void Assembler::orl(Register dst, Address src) {
D
duke 已提交
2251
  InstructionMark im(this);
2252
  prefix(src, dst);
2253
  emit_int8(0x0B);
D
duke 已提交
2254 2255 2256
  emit_operand(dst, src);
}

2257 2258 2259
void Assembler::orl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
D
duke 已提交
2260 2261
}

K
kvn 已提交
2262 2263 2264
void Assembler::packuswb(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2265
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2266 2267 2268 2269
}

void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2270
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
K
kvn 已提交
2271
}
C
cfang 已提交
2272

2273 2274 2275 2276 2277 2278
void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
2279 2280 2281 2282 2283
  assert(VM_Version::supports_avx2(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
  emit_int8(0x00);
  emit_int8(0xC0 | encode);
  emit_int8(imm8);
2284 2285
}

C
cfang 已提交
2286 2287 2288
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
  InstructionMark im(this);
K
kvn 已提交
2289
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2290
  emit_int8(0x61);
C
cfang 已提交
2291
  emit_operand(dst, src);
2292
  emit_int8(imm8);
C
cfang 已提交
2293 2294 2295 2296
}

void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
2297
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2298 2299 2300
  emit_int8(0x61);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
C
cfang 已提交
2301 2302
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}

K
kvn 已提交
2335 2336 2337 2338
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2339
  emit_int8(0x30);
K
kvn 已提交
2340 2341 2342 2343 2344
  emit_operand(dst, src);
}

void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2345
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2346 2347
  emit_int8(0x30);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
2348 2349
}

2350 2351
// generic
void Assembler::pop(Register dst) {
D
duke 已提交
2352
  int encode = prefix_and_encode(dst->encoding());
2353
  emit_int8(0x58 | encode);
D
duke 已提交
2354 2355
}

2356 2357 2358
void Assembler::popcntl(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
2359
  emit_int8((unsigned char)0xF3);
2360
  prefix(src, dst);
2361 2362
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
2363 2364 2365 2366 2367
  emit_operand(dst, src);
}

void Assembler::popcntl(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
2368
  emit_int8((unsigned char)0xF3);
2369
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
2370 2371 2372
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
2373 2374
}

2375
void Assembler::popf() {
2376
  emit_int8((unsigned char)0x9D);
D
duke 已提交
2377 2378
}

R
roland 已提交
2379
#ifndef _LP64 // no 32bit push/pop on amd64
2380 2381 2382 2383
void Assembler::popl(Address dst) {
  // NOTE: this will adjust stack by 8byte on 64bits
  InstructionMark im(this);
  prefix(dst);
2384
  emit_int8((unsigned char)0x8F);
2385
  emit_operand(rax, dst);
D
duke 已提交
2386
}
R
roland 已提交
2387
#endif
D
duke 已提交
2388

2389 2390
void Assembler::prefetch_prefix(Address src) {
  prefix(src);
2391
  emit_int8(0x0F);
D
duke 已提交
2392 2393
}

2394
void Assembler::prefetchnta(Address src) {
2395
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2396 2397
  InstructionMark im(this);
  prefetch_prefix(src);
2398
  emit_int8(0x18);
2399
  emit_operand(rax, src); // 0, src
D
duke 已提交
2400 2401
}

2402
void Assembler::prefetchr(Address src) {
2403
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
2404 2405
  InstructionMark im(this);
  prefetch_prefix(src);
2406
  emit_int8(0x0D);
2407
  emit_operand(rax, src); // 0, src
D
duke 已提交
2408 2409
}

2410 2411 2412 2413
void Assembler::prefetcht0(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2414
  emit_int8(0x18);
2415
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2416 2417
}

2418 2419
void Assembler::prefetcht1(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
D
duke 已提交
2420
  InstructionMark im(this);
2421
  prefetch_prefix(src);
2422
  emit_int8(0x18);
2423
  emit_operand(rdx, src); // 2, src
D
duke 已提交
2424 2425
}

2426 2427 2428 2429
void Assembler::prefetcht2(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
2430
  emit_int8(0x18);
2431
  emit_operand(rbx, src); // 3, src
D
duke 已提交
2432 2433
}

2434
void Assembler::prefetchw(Address src) {
2435
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
D
duke 已提交
2436
  InstructionMark im(this);
2437
  prefetch_prefix(src);
2438
  emit_int8(0x0D);
2439
  emit_operand(rcx, src); // 1, src
D
duke 已提交
2440 2441
}

2442
void Assembler::prefix(Prefix p) {
2443
  emit_int8(p);
D
duke 已提交
2444 2445
}

2446 2447 2448
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_ssse3(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2449 2450
  emit_int8(0x00);
  emit_int8((unsigned char)(0xC0 | encode));
2451 2452 2453 2454 2455 2456
}

void Assembler::pshufb(XMMRegister dst, Address src) {
  assert(VM_Version::supports_ssse3(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2457
  emit_int8(0x00);
2458 2459 2460
  emit_operand(dst, src);
}

2461 2462 2463
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2464
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
2465
  emit_int8(mode & 0xFF);
2466

D
duke 已提交
2467 2468
}

2469 2470 2471
void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2472
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2473
  InstructionMark im(this);
K
kvn 已提交
2474
  simd_prefix(dst, src, VEX_SIMD_66);
2475
  emit_int8(0x70);
2476
  emit_operand(dst, src);
2477
  emit_int8(mode & 0xFF);
D
duke 已提交
2478 2479
}

2480 2481 2482
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2483
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
2484
  emit_int8(mode & 0xFF);
D
duke 已提交
2485 2486
}

2487 2488 2489
void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2490
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
D
duke 已提交
2491
  InstructionMark im(this);
K
kvn 已提交
2492
  simd_prefix(dst, src, VEX_SIMD_F2);
2493
  emit_int8(0x70);
D
duke 已提交
2494
  emit_operand(dst, src);
2495
  emit_int8(mode & 0xFF);
D
duke 已提交
2496 2497
}

2498 2499 2500
void Assembler::psrldq(XMMRegister dst, int shift) {
  // Shift 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
2501
  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
2502 2503 2504
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
2505 2506
}

C
cfang 已提交
2507 2508
void Assembler::ptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
K
kvn 已提交
2509
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
C
cfang 已提交
2510
  InstructionMark im(this);
K
kvn 已提交
2511
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2512
  emit_int8(0x17);
C
cfang 已提交
2513 2514 2515 2516 2517
  emit_operand(dst, src);
}

void Assembler::ptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
2518
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2519 2520
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
C
cfang 已提交
2521 2522
}

2523 2524 2525 2526 2527 2528 2529
void Assembler::vptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
2530
  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
  emit_int8(0x17);
  emit_operand(dst, src);
}

void Assembler::vptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
}

K
kvn 已提交
2543 2544 2545
void Assembler::punpcklbw(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2546
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
K
kvn 已提交
2547 2548
}

2549 2550
void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2551
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
D
duke 已提交
2552 2553
}

K
kvn 已提交
2554 2555 2556
void Assembler::punpckldq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2557
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2558 2559 2560 2561
}

void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2562
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
K
kvn 已提交
2563 2564
}

K
kvn 已提交
2565 2566
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2567
  emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
K
kvn 已提交
2568 2569
}

2570 2571 2572
void Assembler::push(int32_t imm32) {
  // in 64bits we push 64bits onto the stack but only
  // take a 32bit immediate
2573
  emit_int8(0x68);
2574
  emit_int32(imm32);
D
duke 已提交
2575 2576
}

2577 2578 2579
void Assembler::push(Register src) {
  int encode = prefix_and_encode(src->encoding());

2580
  emit_int8(0x50 | encode);
D
duke 已提交
2581 2582
}

2583
void Assembler::pushf() {
2584
  emit_int8((unsigned char)0x9C);
D
duke 已提交
2585 2586
}

R
roland 已提交
2587
#ifndef _LP64 // no 32bit push/pop on amd64
2588 2589 2590 2591
void Assembler::pushl(Address src) {
  // Note this will push 64bit on 64bit
  InstructionMark im(this);
  prefix(src);
2592
  emit_int8((unsigned char)0xFF);
2593
  emit_operand(rsi, src);
D
duke 已提交
2594
}
R
roland 已提交
2595
#endif
D
duke 已提交
2596

2597 2598 2599 2600
void Assembler::rcll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1) {
2601 2602
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
2603
  } else {
2604 2605 2606
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)0xD0 | encode);
    emit_int8(imm8);
2607
  }
D
duke 已提交
2608 2609
}

2610 2611 2612
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
void Assembler::rep_mov() {
2613
  emit_int8((unsigned char)0xF3);
2614 2615
  // MOVSQ
  LP64_ONLY(prefix(REX_W));
2616
  emit_int8((unsigned char)0xA5);
D
duke 已提交
2617 2618
}

2619 2620 2621 2622 2623 2624 2625
// sets rcx bytes with rax, value at [edi]
void Assembler::rep_stosb() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xAA); // STOSB
}

2626 2627
// sets rcx pointer sized words with rax, value at [edi]
// generic
2628 2629 2630
void Assembler::rep_stos() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));       // LP64:STOSQ, LP32:STOSD
2631
  emit_int8((unsigned char)0xAB);
2632 2633 2634 2635 2636
}

// scans rcx pointer sized words at [edi] for occurance of rax,
// generic
void Assembler::repne_scan() { // repne_scan
2637
  emit_int8((unsigned char)0xF2);
2638 2639
  // SCASQ
  LP64_ONLY(prefix(REX_W));
2640
  emit_int8((unsigned char)0xAF);
2641 2642 2643 2644 2645 2646
}

#ifdef _LP64
// scans rcx 4 byte words at [edi] for occurance of rax,
// generic
void Assembler::repne_scanl() { // repne_scan
2647
  emit_int8((unsigned char)0xF2);
2648
  // SCASL
2649
  emit_int8((unsigned char)0xAF);
2650 2651 2652 2653 2654
}
#endif

void Assembler::ret(int imm16) {
  if (imm16 == 0) {
2655
    emit_int8((unsigned char)0xC3);
2656
  } else {
2657
    emit_int8((unsigned char)0xC2);
2658
    emit_int16(imm16);
2659 2660 2661 2662 2663 2664 2665 2666
  }
}

void Assembler::sahf() {
#ifdef _LP64
  // Not supported in 64bit mode
  ShouldNotReachHere();
#endif
2667
  emit_int8((unsigned char)0x9E);
2668 2669 2670 2671 2672 2673
}

void Assembler::sarl(Register dst, int imm8) {
  int encode = prefix_and_encode(dst->encoding());
  assert(isShiftCount(imm8), "illegal shift count");
  if (imm8 == 1) {
2674 2675
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
2676
  } else {
2677 2678 2679
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
2680 2681 2682 2683 2684
  }
}

void Assembler::sarl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2685 2686
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
}

void Assembler::sbbl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}

void Assembler::sbbl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD8, dst, imm32);
}


void Assembler::sbbl(Register dst, Address src) {
D
duke 已提交
2702 2703
  InstructionMark im(this);
  prefix(src, dst);
2704
  emit_int8(0x1B);
D
duke 已提交
2705 2706 2707
  emit_operand(dst, src);
}

2708 2709 2710
void Assembler::sbbl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
D
duke 已提交
2711 2712
}

2713 2714 2715
void Assembler::setb(Condition cc, Register dst) {
  assert(0 <= cc && cc < 16, "illegal cc");
  int encode = prefix_and_encode(dst->encoding(), true);
2716 2717 2718
  emit_int8(0x0F);
  emit_int8((unsigned char)0x90 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
2719 2720
}

2721 2722 2723 2724
void Assembler::shll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1 ) {
2725 2726
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
2727
  } else {
2728 2729 2730
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
2731
  }
D
duke 已提交
2732 2733
}

2734 2735
void Assembler::shll(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2736 2737
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
2738 2739 2740 2741 2742
}

void Assembler::shrl(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
2743 2744 2745
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
2746 2747 2748 2749
}

void Assembler::shrl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
2750 2751
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE8 | encode));
2752 2753 2754 2755
}

// copies a single word from [esi] to [edi]
void Assembler::smovl() {
2756
  emit_int8((unsigned char)0xA5);
2757 2758 2759 2760
}

void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2761
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
D
duke 已提交
2762 2763
}

2764 2765
void Assembler::sqrtsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2766
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2767 2768 2769
}

void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
K
kvn 已提交
2770
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2771
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2772 2773
}

2774
void Assembler::std() {
2775
  emit_int8((unsigned char)0xFD);
2776 2777
}

2778
void Assembler::sqrtss(XMMRegister dst, Address src) {
K
kvn 已提交
2779
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2780
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2781 2782
}

2783 2784 2785 2786
void Assembler::stmxcsr( Address dst) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  prefix(dst);
2787 2788
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
2789
  emit_operand(as_Register(3), dst);
D
duke 已提交
2790 2791
}

2792 2793 2794
void Assembler::subl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
2795
  emit_arith_operand(0x81, rbp, dst, imm32);
2796 2797 2798 2799 2800
}

void Assembler::subl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
2801
  emit_int8(0x29);
2802 2803 2804
  emit_operand(src, dst);
}

2805 2806 2807 2808 2809
void Assembler::subl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xE8, dst, imm32);
}

2810 2811 2812 2813 2814 2815
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subl_imm32(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

2816
void Assembler::subl(Register dst, Address src) {
D
duke 已提交
2817 2818
  InstructionMark im(this);
  prefix(src, dst);
2819
  emit_int8(0x2B);
D
duke 已提交
2820 2821 2822
  emit_operand(dst, src);
}

2823 2824 2825 2826 2827 2828 2829
void Assembler::subl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::subsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2830
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2831 2832
}

2833 2834
void Assembler::subsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2835
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
D
duke 已提交
2836 2837
}

2838 2839
void Assembler::subss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2840
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2841 2842
}

2843 2844
void Assembler::subss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2845
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
D
duke 已提交
2846 2847
}

2848 2849 2850 2851
void Assembler::testb(Register dst, int imm8) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  (void) prefix_and_encode(dst->encoding(), true);
  emit_arith_b(0xF6, 0xC0, dst, imm8);
D
duke 已提交
2852 2853
}

2854 2855 2856 2857 2858 2859
void Assembler::testl(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
2860
    emit_int8((unsigned char)0xA9);
2861 2862
  } else {
    encode = prefix_and_encode(encode);
2863 2864
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
2865
  }
2866
  emit_int32(imm32);
D
duke 已提交
2867 2868
}

2869 2870 2871 2872
void Assembler::testl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
}
D
duke 已提交
2873

2874 2875 2876
void Assembler::testl(Register dst, Address  src) {
  InstructionMark im(this);
  prefix(src, dst);
2877
  emit_int8((unsigned char)0x85);
2878
  emit_operand(dst, src);
D
duke 已提交
2879 2880
}

2881 2882
void Assembler::ucomisd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2883
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2884 2885
}

2886 2887
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2888
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
D
duke 已提交
2889 2890
}

2891 2892
void Assembler::ucomiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2893
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2894 2895 2896 2897
}

void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
2898
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2899 2900 2901 2902 2903 2904
}


void Assembler::xaddl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
2905 2906
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
2907 2908 2909 2910 2911 2912
  emit_operand(src, dst);
}

void Assembler::xchgl(Register dst, Address src) { // xchg
  InstructionMark im(this);
  prefix(src, dst);
2913
  emit_int8((unsigned char)0x87);
2914 2915 2916 2917 2918
  emit_operand(dst, src);
}

void Assembler::xchgl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
2919 2920
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xC0 | encode));
2921 2922
}

2923
void Assembler::xgetbv() {
2924 2925 2926
  emit_int8(0x0F);
  emit_int8(0x01);
  emit_int8((unsigned char)0xD0);
2927 2928
}

2929 2930 2931 2932 2933 2934 2935 2936
void Assembler::xorl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF0, dst, imm32);
}

void Assembler::xorl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
2937
  emit_int8(0x33);
2938 2939 2940 2941 2942 2943 2944 2945 2946
  emit_operand(dst, src);
}

void Assembler::xorl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
}


2947
// AVX 3-operands scalar float-point arithmetic instructions
2948 2949 2950

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
2951
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2952 2953 2954 2955
}

void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
2956
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2957 2958 2959 2960
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
2961
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2962 2963 2964 2965
}

void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
2966
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2967 2968 2969 2970
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
2971
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2972 2973 2974 2975
}

void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
2976
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2977 2978 2979 2980
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
2981
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2982 2983 2984 2985
}

void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
2986
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2987 2988 2989 2990
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
2991
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2992 2993 2994 2995
}

void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
2996
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2997 2998 2999
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
3000 3001
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3002 3003 3004 3005
}

void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3006
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3007 3008 3009 3010
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3011
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3012 3013 3014 3015
}

void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3016
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
3017 3018 3019 3020
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
3021
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3022 3023 3024 3025
}

void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
3026
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
3027 3028
}

3029 3030 3031 3032 3033 3034 3035
//====================VECTOR ARITHMETIC=====================================

// Float-point vector arithmetic

void Assembler::addpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
3036 3037
}

3038 3039 3040
void Assembler::addps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
3041 3042
}

3043
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3044
  assert(VM_Version::supports_avx(), "");
3045
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
3046 3047
}

3048
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3049
  assert(VM_Version::supports_avx(), "");
3050
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3051 3052
}

3053 3054 3055
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
K
kvn 已提交
3056 3057
}

3058
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3059
  assert(VM_Version::supports_avx(), "");
3060
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
3061 3062
}

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362
void Assembler::subpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
}

void Assembler::subps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
}

void Assembler::mulps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::divpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
}

void Assembler::divps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::andpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::andps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}

void Assembler::andpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::xorpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}

void Assembler::xorps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}

void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}


// Integer vector arithmetic
void Assembler::paddb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
}

void Assembler::paddw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
}

void Assembler::paddd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
}

void Assembler::paddq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::psubb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
}

void Assembler::psubw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
}

void Assembler::psubd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
}

void Assembler::psubq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
}

void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
3363 3364
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
3375 3376
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389
}

void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  InstructionMark im(this);
  int dst_enc = dst->encoding();
  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
3390
  emit_int8(0x40);
3391 3392 3393 3394 3395 3396 3397 3398
  emit_operand(dst, src);
}

// Shift packed integers left by specified number of bits.
void Assembler::psllw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3399 3400 3401
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3402 3403 3404 3405 3406 3407
}

void Assembler::pslld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3408 3409 3410
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3411 3412 3413 3414 3415 3416
}

void Assembler::psllq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3417 3418 3419
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440
}

void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
}

void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
}

void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
3441
  emit_int8(shift & 0xFF);
3442 3443 3444 3445 3446 3447
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
3448
  emit_int8(shift & 0xFF);
3449 3450 3451 3452 3453 3454
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
3455
  emit_int8(shift & 0xFF);
3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
}

void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers logically right by specified number of bits.
void Assembler::psrlw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 71 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3478 3479 3480
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3481 3482 3483 3484 3485 3486
}

void Assembler::psrld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 72 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3487 3488 3489
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3490 3491 3492 3493 3494 3495 3496 3497
}

void Assembler::psrlq(XMMRegister dst, int shift) {
  // Do not confuse it with psrldq SSE2 instruction which
  // shifts 128 bit value in xmm register by number of bytes.
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3498 3499 3500
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
}

void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
}

void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
3522
  emit_int8(shift & 0xFF);
3523 3524 3525 3526 3527 3528
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
3529
  emit_int8(shift & 0xFF);
3530 3531 3532 3533 3534 3535
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
3536
  emit_int8(shift & 0xFF);
3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
}

void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
}

// Shift packed integers arithmetically right by specified number of bits.
void Assembler::psraw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3559 3560 3561
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3562 3563 3564 3565 3566 3567
}

void Assembler::psrad(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  // XMM4 is for /4 encoding: 66 0F 72 /4 ib
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3568 3569 3570
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
}

void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
}

void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
3587
  emit_int8(shift & 0xFF);
3588 3589 3590 3591 3592 3593
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
3594
  emit_int8(shift & 0xFF);
3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658
}

void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
}

void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
}


// AND packed integers
void Assembler::pand(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::por(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::pxor(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}

void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}


void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3659 3660
  emit_int8(0x18);
  emit_int8((unsigned char)(0xC0 | encode));
3661 3662
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3663
  emit_int8(0x01);
3664 3665
}

3666 3667 3668 3669 3670 3671 3672 3673
void Assembler::vinsertf128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3674
  emit_int8(0x18);
3675 3676
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3677
  emit_int8(0x01);
3678 3679 3680 3681 3682 3683 3684 3685 3686
}

void Assembler::vextractf128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3687
  emit_int8(0x19);
3688 3689
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3690
  emit_int8(0x01);
3691 3692
}

3693 3694 3695
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
K
kvn 已提交
3696
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3697 3698
  emit_int8(0x38);
  emit_int8((unsigned char)(0xC0 | encode));
K
kvn 已提交
3699 3700
  // 0x00 - insert into lower 128 bits
  // 0x01 - insert into upper 128 bits
3701
  emit_int8(0x01);
K
kvn 已提交
3702 3703
}

3704 3705 3706 3707 3708 3709 3710 3711
void Assembler::vinserti128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  // swap src<->dst for encoding
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3712
  emit_int8(0x38);
3713 3714
  emit_operand(dst, src);
  // 0x01 - insert into upper 128 bits
3715
  emit_int8(0x01);
3716 3717 3718 3719 3720 3721 3722 3723 3724
}

void Assembler::vextracti128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3725
  emit_int8(0x39);
3726 3727
  emit_operand(src, dst);
  // 0x01 - extract from upper 128 bits
3728
  emit_int8(0x01);
3729 3730
}

3731 3732 3733 3734 3735 3736 3737 3738 3739
// duplicate 4-bytes integer data from src into 8 locations in dest
void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x58);
  emit_int8((unsigned char)(0xC0 | encode));
}

3740 3741 3742 3743 3744 3745 3746 3747 3748 3749
// Carry-Less Multiplication Quadword
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
  assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
  bool vector256 = false;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}

3750 3751 3752
void Assembler::vzeroupper() {
  assert(VM_Version::supports_avx(), "");
  (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3753
  emit_int8(0x77);
3754 3755
}

3756

3757 3758 3759 3760 3761 3762
#ifndef _LP64
// 32bit only pieces of the assembler

void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT
  InstructionMark im(this);
3763 3764
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | src1->encoding()));
3765 3766 3767 3768 3769 3770
  emit_data(imm32, rspec, 0);
}

void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
  InstructionMark im(this);
3771
  emit_int8((unsigned char)0x81);
3772 3773 3774 3775 3776 3777 3778 3779 3780
  emit_operand(rdi, src1);
  emit_data(imm32, rspec, 0);
}

// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
// into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler::cmpxchg8(Address adr) {
  InstructionMark im(this);
3781 3782
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC7);
3783 3784 3785 3786 3787
  emit_operand(rcx, adr);
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
3788
 emit_int8(0x48 | dst->encoding());
3789 3790 3791 3792 3793 3794 3795
}

#endif // _LP64

// 64bit typically doesn't use the x87 but needs to for the trig funcs

void Assembler::fabs() {
3796 3797
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE1);
3798 3799 3800 3801 3802 3803 3804 3805
}

void Assembler::fadd(int i) {
  emit_farith(0xD8, 0xC0, i);
}

void Assembler::fadd_d(Address src) {
  InstructionMark im(this);
3806
  emit_int8((unsigned char)0xDC);
3807 3808 3809 3810 3811
  emit_operand32(rax, src);
}

void Assembler::fadd_s(Address src) {
  InstructionMark im(this);
3812
  emit_int8((unsigned char)0xD8);
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
  emit_operand32(rax, src);
}

void Assembler::fadda(int i) {
  emit_farith(0xDC, 0xC0, i);
}

void Assembler::faddp(int i) {
  emit_farith(0xDE, 0xC0, i);
}

void Assembler::fchs() {
3825 3826
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE0);
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
}

void Assembler::fcom(int i) {
  emit_farith(0xD8, 0xD0, i);
}

void Assembler::fcomp(int i) {
  emit_farith(0xD8, 0xD8, i);
}

void Assembler::fcomp_d(Address src) {
  InstructionMark im(this);
3839
  emit_int8((unsigned char)0xDC);
3840 3841 3842 3843 3844
  emit_operand32(rbx, src);
}

void Assembler::fcomp_s(Address src) {
  InstructionMark im(this);
3845
  emit_int8((unsigned char)0xD8);
3846 3847 3848 3849
  emit_operand32(rbx, src);
}

void Assembler::fcompp() {
3850 3851
  emit_int8((unsigned char)0xDE);
  emit_int8((unsigned char)0xD9);
3852 3853 3854
}

void Assembler::fcos() {
3855 3856
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFF);
3857 3858 3859
}

void Assembler::fdecstp() {
3860 3861
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF6);
3862 3863 3864 3865 3866 3867 3868 3869
}

void Assembler::fdiv(int i) {
  emit_farith(0xD8, 0xF0, i);
}

void Assembler::fdiv_d(Address src) {
  InstructionMark im(this);
3870
  emit_int8((unsigned char)0xDC);
3871 3872 3873 3874 3875
  emit_operand32(rsi, src);
}

void Assembler::fdiv_s(Address src) {
  InstructionMark im(this);
3876
  emit_int8((unsigned char)0xD8);
3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896
  emit_operand32(rsi, src);
}

void Assembler::fdiva(int i) {
  emit_farith(0xDC, 0xF8, i);
}

// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
//       is erroneous for some of the floating-point instructions below.

void Assembler::fdivp(int i) {
  emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}

void Assembler::fdivr(int i) {
  emit_farith(0xD8, 0xF8, i);
}

void Assembler::fdivr_d(Address src) {
  InstructionMark im(this);
3897
  emit_int8((unsigned char)0xDC);
3898 3899 3900 3901 3902
  emit_operand32(rdi, src);
}

void Assembler::fdivr_s(Address src) {
  InstructionMark im(this);
3903
  emit_int8((unsigned char)0xD8);
3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
  emit_operand32(rdi, src);
}

void Assembler::fdivra(int i) {
  emit_farith(0xDC, 0xF0, i);
}

void Assembler::fdivrp(int i) {
  emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
}

void Assembler::ffree(int i) {
  emit_farith(0xDD, 0xC0, i);
}

void Assembler::fild_d(Address adr) {
  InstructionMark im(this);
3921
  emit_int8((unsigned char)0xDF);
3922 3923 3924 3925 3926
  emit_operand32(rbp, adr);
}

void Assembler::fild_s(Address adr) {
  InstructionMark im(this);
3927
  emit_int8((unsigned char)0xDB);
3928 3929 3930 3931
  emit_operand32(rax, adr);
}

void Assembler::fincstp() {
3932 3933
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF7);
3934 3935 3936
}

void Assembler::finit() {
3937 3938 3939
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xDB);
  emit_int8((unsigned char)0xE3);
3940 3941 3942 3943
}

void Assembler::fist_s(Address adr) {
  InstructionMark im(this);
3944
  emit_int8((unsigned char)0xDB);
3945 3946 3947 3948 3949
  emit_operand32(rdx, adr);
}

void Assembler::fistp_d(Address adr) {
  InstructionMark im(this);
3950
  emit_int8((unsigned char)0xDF);
3951 3952 3953 3954 3955
  emit_operand32(rdi, adr);
}

void Assembler::fistp_s(Address adr) {
  InstructionMark im(this);
3956
  emit_int8((unsigned char)0xDB);
3957 3958 3959 3960
  emit_operand32(rbx, adr);
}

void Assembler::fld1() {
3961 3962
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE8);
3963 3964 3965 3966
}

void Assembler::fld_d(Address adr) {
  InstructionMark im(this);
3967
  emit_int8((unsigned char)0xDD);
3968 3969 3970 3971 3972
  emit_operand32(rax, adr);
}

void Assembler::fld_s(Address adr) {
  InstructionMark im(this);
3973
  emit_int8((unsigned char)0xD9);
3974 3975 3976 3977 3978 3979 3980 3981 3982 3983
  emit_operand32(rax, adr);
}


void Assembler::fld_s(int index) {
  emit_farith(0xD9, 0xC0, index);
}

void Assembler::fld_x(Address adr) {
  InstructionMark im(this);
3984
  emit_int8((unsigned char)0xDB);
3985 3986 3987 3988 3989
  emit_operand32(rbp, adr);
}

void Assembler::fldcw(Address src) {
  InstructionMark im(this);
3990
  emit_int8((unsigned char)0xD9);
3991 3992 3993 3994 3995
  emit_operand32(rbp, src);
}

void Assembler::fldenv(Address src) {
  InstructionMark im(this);
3996
  emit_int8((unsigned char)0xD9);
3997 3998 3999 4000
  emit_operand32(rsp, src);
}

void Assembler::fldlg2() {
4001 4002
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEC);
4003 4004 4005
}

void Assembler::fldln2() {
4006 4007
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xED);
4008 4009 4010
}

void Assembler::fldz() {
4011 4012
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEE);
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
}

void Assembler::flog() {
  fldln2();
  fxch();
  fyl2x();
}

void Assembler::flog10() {
  fldlg2();
  fxch();
  fyl2x();
}

void Assembler::fmul(int i) {
  emit_farith(0xD8, 0xC8, i);
}

void Assembler::fmul_d(Address src) {
  InstructionMark im(this);
4033
  emit_int8((unsigned char)0xDC);
4034 4035 4036 4037 4038
  emit_operand32(rcx, src);
}

void Assembler::fmul_s(Address src) {
  InstructionMark im(this);
4039
  emit_int8((unsigned char)0xD8);
4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
  emit_operand32(rcx, src);
}

void Assembler::fmula(int i) {
  emit_farith(0xDC, 0xC8, i);
}

void Assembler::fmulp(int i) {
  emit_farith(0xDE, 0xC8, i);
}

void Assembler::fnsave(Address dst) {
  InstructionMark im(this);
4053
  emit_int8((unsigned char)0xDD);
4054 4055 4056 4057 4058
  emit_operand32(rsi, dst);
}

void Assembler::fnstcw(Address src) {
  InstructionMark im(this);
4059 4060
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xD9);
4061 4062 4063 4064
  emit_operand32(rdi, src);
}

void Assembler::fnstsw_ax() {
4065 4066
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)0xE0);
4067 4068 4069
}

void Assembler::fprem() {
4070 4071
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF8);
4072 4073 4074
}

void Assembler::fprem1() {
4075 4076
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF5);
4077 4078 4079 4080
}

void Assembler::frstor(Address src) {
  InstructionMark im(this);
4081
  emit_int8((unsigned char)0xDD);
4082 4083 4084 4085
  emit_operand32(rsp, src);
}

void Assembler::fsin() {
4086 4087
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFE);
4088 4089 4090
}

void Assembler::fsqrt() {
4091 4092
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFA);
4093 4094 4095 4096
}

void Assembler::fst_d(Address adr) {
  InstructionMark im(this);
4097
  emit_int8((unsigned char)0xDD);
4098 4099 4100 4101 4102
  emit_operand32(rdx, adr);
}

void Assembler::fst_s(Address adr) {
  InstructionMark im(this);
4103
  emit_int8((unsigned char)0xD9);
4104 4105 4106 4107 4108
  emit_operand32(rdx, adr);
}

void Assembler::fstp_d(Address adr) {
  InstructionMark im(this);
4109
  emit_int8((unsigned char)0xDD);
4110 4111 4112 4113 4114 4115 4116 4117 4118
  emit_operand32(rbx, adr);
}

void Assembler::fstp_d(int index) {
  emit_farith(0xDD, 0xD8, index);
}

void Assembler::fstp_s(Address adr) {
  InstructionMark im(this);
4119
  emit_int8((unsigned char)0xD9);
4120 4121 4122 4123 4124
  emit_operand32(rbx, adr);
}

void Assembler::fstp_x(Address adr) {
  InstructionMark im(this);
4125
  emit_int8((unsigned char)0xDB);
4126 4127 4128 4129 4130 4131 4132 4133 4134
  emit_operand32(rdi, adr);
}

void Assembler::fsub(int i) {
  emit_farith(0xD8, 0xE0, i);
}

void Assembler::fsub_d(Address src) {
  InstructionMark im(this);
4135
  emit_int8((unsigned char)0xDC);
4136 4137 4138 4139 4140
  emit_operand32(rsp, src);
}

void Assembler::fsub_s(Address src) {
  InstructionMark im(this);
4141
  emit_int8((unsigned char)0xD8);
4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158
  emit_operand32(rsp, src);
}

void Assembler::fsuba(int i) {
  emit_farith(0xDC, 0xE8, i);
}

void Assembler::fsubp(int i) {
  emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
}

void Assembler::fsubr(int i) {
  emit_farith(0xD8, 0xE8, i);
}

void Assembler::fsubr_d(Address src) {
  InstructionMark im(this);
4159
  emit_int8((unsigned char)0xDC);
4160 4161 4162 4163 4164
  emit_operand32(rbp, src);
}

void Assembler::fsubr_s(Address src) {
  InstructionMark im(this);
4165
  emit_int8((unsigned char)0xD8);
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
  emit_operand32(rbp, src);
}

void Assembler::fsubra(int i) {
  emit_farith(0xDC, 0xE0, i);
}

void Assembler::fsubrp(int i) {
  emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
}

void Assembler::ftan() {
4178 4179 4180 4181
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)0xD8);
4182 4183 4184
}

void Assembler::ftst() {
4185 4186
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE4);
4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201
}

void Assembler::fucomi(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDB, 0xE8, i);
}

void Assembler::fucomip(int i) {
  // make sure the instruction is supported (introduced for P6, together with cmov)
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDF, 0xE8, i);
}

void Assembler::fwait() {
4202
  emit_int8((unsigned char)0x9B);
4203 4204 4205 4206 4207 4208 4209
}

void Assembler::fxch(int i) {
  emit_farith(0xD9, 0xC8, i);
}

void Assembler::fyl2x() {
4210 4211
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF1);
4212 4213
}

4214
void Assembler::frndint() {
4215 4216
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFC);
4217 4218 4219
}

void Assembler::f2xm1() {
4220 4221
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF0);
4222 4223 4224
}

void Assembler::fldl2e() {
4225 4226
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEA);
4227 4228
}

K
kvn 已提交
4229 4230 4231 4232 4233 4234 4235 4236
// SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
// SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
static int simd_opc[4] = { 0,    0, 0x38, 0x3A };

// Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4237
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4238 4239 4240 4241 4242 4243 4244
  }
  if (rex_w) {
    prefixq(adr, xreg);
  } else {
    prefix(adr, xreg);
  }
  if (opc > 0) {
4245
    emit_int8(0x0F);
K
kvn 已提交
4246 4247
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4248
      emit_int8(opc2);
K
kvn 已提交
4249 4250 4251 4252 4253 4254
    }
  }
}

int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
4255
    emit_int8(simd_pre[pre]);
K
kvn 已提交
4256 4257 4258 4259
  }
  int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
                          prefix_and_encode(dst_enc, src_enc);
  if (opc > 0) {
4260
    emit_int8(0x0F);
K
kvn 已提交
4261 4262
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
4263
      emit_int8(opc2);
K
kvn 已提交
4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276
    }
  }
  return encode;
}


void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
  if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
    prefix(VEX_3bytes);

    int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
    byte1 = (~byte1) & 0xE0;
    byte1 |= opc;
4277
    emit_int8(byte1);
K
kvn 已提交
4278 4279 4280

    int byte2 = ((~nds_enc) & 0xf) << 3;
    byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
4281
    emit_int8(byte2);
K
kvn 已提交
4282 4283 4284 4285 4286 4287 4288
  } else {
    prefix(VEX_2bytes);

    int byte1 = vex_r ? VEX_R : 0;
    byte1 = (~byte1) & 0x80;
    byte1 |= ((~nds_enc) & 0xf) << 3;
    byte1 |= (vector256 ? 4 : 0) | pre;
4289
    emit_int8(byte1);
K
kvn 已提交
4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330
  }
}

void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
  bool vex_r = (xreg_enc >= 8);
  bool vex_b = adr.base_needs_rex();
  bool vex_x = adr.index_needs_rex();
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
}

int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
  bool vex_r = (dst_enc >= 8);
  bool vex_b = (src_enc >= 8);
  bool vex_x = false;
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  return (((dst_enc & 7) << 3) | (src_enc & 7));
}


void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  if (UseAVX > 0) {
    int xreg_enc = xreg->encoding();
    int  nds_enc = nds->is_valid() ? nds->encoding() : 0;
    vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
    rex_prefix(adr, xreg, pre, opc, rex_w);
  }
}

int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  int dst_enc = dst->encoding();
  int src_enc = src->encoding();
  if (UseAVX > 0) {
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
    return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
  }
}
4331

4332 4333 4334
void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, dst, src, pre);
4335
  emit_int8(opcode);
4336 4337 4338 4339 4340
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, dst, src, pre);
4341 4342
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4343 4344 4345 4346 4347 4348
}

// Versions with no second source register (non-destructive source).
void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, xnoreg, src, pre);
4349
  emit_int8(opcode);
4350 4351 4352 4353 4354
  emit_operand(dst, src);
}

void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
4355 4356
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4357 4358 4359 4360 4361 4362 4363
}

// 3-operands AVX instructions
void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               Address src, VexSimdPrefix pre, bool vector256) {
  InstructionMark im(this);
  vex_prefix(dst, nds, src, pre, vector256);
4364
  emit_int8(opcode);
4365 4366 4367 4368 4369 4370
  emit_operand(dst, src);
}

void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               XMMRegister src, VexSimdPrefix pre, bool vector256) {
  int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
4371 4372
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
4373 4374
}

4375 4376 4377 4378
#ifndef _LP64

void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
4379
  emit_int8(0x40 | dst->encoding());
4380 4381 4382 4383 4384 4385 4386 4387
}

void Assembler::lea(Register dst, Address src) {
  leal(dst, src);
}

void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
4388
  emit_int8((unsigned char)0xC7);
4389 4390 4391 4392
  emit_operand(rax, dst);
  emit_data((int)imm32, rspec, 0);
}

4393 4394 4395
void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
4396
  emit_int8((unsigned char)(0xB8 | encode));
4397 4398
  emit_data((int)imm32, rspec, 0);
}
4399 4400

void Assembler::popa() { // 32bit
4401
  emit_int8(0x61);
4402 4403 4404 4405
}

void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
4406
  emit_int8(0x68);
4407 4408 4409 4410
  emit_data(imm32, rspec, 0);
}

void Assembler::pusha() { // 32bit
4411
  emit_int8(0x60);
4412 4413 4414
}

void Assembler::set_byte_if_not_zero(Register dst) {
4415 4416 4417
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | dst->encoding()));
4418 4419 4420
}

void Assembler::shldl(Register dst, Register src) {
4421 4422 4423
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA5);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4424 4425 4426
}

void Assembler::shrdl(Register dst, Register src) {
4427 4428 4429
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAD);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
4430 4431 4432 4433
}

#else // LP64

I
iveresov 已提交
4434 4435
void Assembler::set_byte_if_not_zero(Register dst) {
  int enc = prefix_and_encode(dst->encoding(), true);
4436 4437 4438
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | enc));
I
iveresov 已提交
4439 4440
}

4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492
// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.

bool Assembler::reachable(AddressLiteral adr) {
  int64_t disp;
  // None will force a 64bit literal to the code stream. Likely a placeholder
  // for something that will be patched later and we need to certain it will
  // always be reachable.
  if (adr.reloc() == relocInfo::none) {
    return false;
  }
  if (adr.reloc() == relocInfo::internal_word_type) {
    // This should be rip relative and easily reachable.
    return true;
  }
  if (adr.reloc() == relocInfo::virtual_call_type ||
      adr.reloc() == relocInfo::opt_virtual_call_type ||
      adr.reloc() == relocInfo::static_call_type ||
      adr.reloc() == relocInfo::static_stub_type ) {
    // This should be rip relative within the code cache and easily
    // reachable until we get huge code caches. (At which point
    // ic code is going to have issues).
    return true;
  }
  if (adr.reloc() != relocInfo::external_word_type &&
      adr.reloc() != relocInfo::poll_return_type &&  // these are really external_word but need special
      adr.reloc() != relocInfo::poll_type &&         // relocs to identify them
      adr.reloc() != relocInfo::runtime_call_type ) {
    return false;
  }

  // Stress the correction code
  if (ForceUnreachable) {
    // Must be runtimecall reloc, see if it is in the codecache
    // Flipping stuff in the codecache to be unreachable causes issues
    // with things like inline caches where the additional instructions
    // are not handled.
    if (CodeCache::find_blob(adr._target) == NULL) {
      return false;
    }
  }
  // For external_word_type/runtime_call_type if it is reachable from where we
  // are now (possibly a temp buffer) and where we might end up
  // anywhere in the codeCache then we are always reachable.
  // This would have to change if we ever save/restore shared code
  // to be more pessimistic.
  disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;
  disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;

4493
  disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510

  // Because rip relative is a disp + address_of_next_instruction and we
  // don't know the value of address_of_next_instruction we apply a fudge factor
  // to make sure we will be ok no matter the size of the instruction we get placed into.
  // We don't have to fudge the checks above here because they are already worst case.

  // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
  // + 4 because better safe than sorry.
  const int fudge = 12 + 4;
  if (disp < 0) {
    disp -= fudge;
  } else {
    disp += fudge;
  }
  return is_simm32(disp);
}

4511 4512 4513 4514
// Check if the polling page is not reachable from the code cache using rip-relative
// addressing.
bool Assembler::is_polling_page_far() {
  intptr_t addr = (intptr_t)os::get_polling_page();
4515 4516
  return ForceUnreachable ||
         !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
4517 4518 4519
         !is_simm32(addr - (intptr_t)CodeCache::high_bound());
}

4520 4521 4522 4523
void Assembler::emit_data64(jlong data,
                            relocInfo::relocType rtype,
                            int format) {
  if (rtype == relocInfo::none) {
4524
    emit_int64(data);
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
  } else {
    emit_data64(data, Relocation::spec_simple(rtype), format);
  }
}

void Assembler::emit_data64(jlong data,
                            RelocationHolder const& rspec,
                            int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
  assert(imm_operand == format, "must be immediate");
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  // Do not use AbstractAssembler::relocate, which is not intended for
  // embedded words.  Instead, relocate to the enclosing instruction.
  code_section()->relocate(inst_mark(), rspec, format);
#ifdef ASSERT
  check_relocation(rspec, format);
#endif
4542
  emit_int64(data);
4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652
}

int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
  if (reg_enc >= 8) {
    prefix(REX_B);
    reg_enc -= 8;
  } else if (byteinst && reg_enc >= 4) {
    prefix(REX);
  }
  return reg_enc;
}

int Assembler::prefixq_and_encode(int reg_enc) {
  if (reg_enc < 8) {
    prefix(REX_W);
  } else {
    prefix(REX_WB);
    reg_enc -= 8;
  }
  return reg_enc;
}

int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
  if (dst_enc < 8) {
    if (src_enc >= 8) {
      prefix(REX_B);
      src_enc -= 8;
    } else if (byteinst && src_enc >= 4) {
      prefix(REX);
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_R);
    } else {
      prefix(REX_RB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
  if (dst_enc < 8) {
    if (src_enc < 8) {
      prefix(REX_W);
    } else {
      prefix(REX_WB);
      src_enc -= 8;
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_WR);
    } else {
      prefix(REX_WRB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}

void Assembler::prefix(Register reg) {
  if (reg->encoding() >= 8) {
    prefix(REX_B);
  }
}

void Assembler::prefix(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_XB);
    } else {
      prefix(REX_B);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_X);
    }
  }
}

void Assembler::prefixq(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_WXB);
    } else {
      prefix(REX_WB);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_WX);
    } else {
      prefix(REX_W);
    }
  }
}


void Assembler::prefix(Address adr, Register reg, bool byteinst) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
4653
      } else if (byteinst && reg->encoding() >= 4 ) {
4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735
        prefix(REX);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

void Assembler::prefixq(Address adr, Register src) {
  if (src->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WXB);
      } else {
        prefix(REX_WB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WX);
      } else {
        prefix(REX_W);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WRXB);
      } else {
        prefix(REX_WRB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WRX);
      } else {
        prefix(REX_WR);
      }
    }
  }
}

void Assembler::prefix(Address adr, XMMRegister reg) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}

K
kvn 已提交
4736 4737
void Assembler::prefixq(Address adr, XMMRegister src) {
  if (src->encoding() < 8) {
4738 4739
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4740
        prefix(REX_WXB);
4741
      } else {
K
kvn 已提交
4742
        prefix(REX_WB);
4743 4744 4745
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4746 4747 4748
        prefix(REX_WX);
      } else {
        prefix(REX_W);
4749 4750 4751 4752 4753
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4754
        prefix(REX_WRXB);
4755
      } else {
K
kvn 已提交
4756
        prefix(REX_WRB);
4757 4758 4759
      }
    } else {
      if (adr.index_needs_rex()) {
K
kvn 已提交
4760
        prefix(REX_WRX);
4761
      } else {
K
kvn 已提交
4762
        prefix(REX_WR);
4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775
      }
    }
  }
}

void Assembler::adcq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD0, dst, imm32);
}

void Assembler::adcq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4776
  emit_int8(0x13);
4777 4778 4779 4780
  emit_operand(dst, src);
}

void Assembler::adcq(Register dst, Register src) {
4781
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793
  emit_arith(0x13, 0xC0, dst, src);
}

void Assembler::addq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rax, dst,imm32);
}

void Assembler::addq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
4794
  emit_int8(0x01);
4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
  emit_operand(src, dst);
}

void Assembler::addq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC0, dst, imm32);
}

void Assembler::addq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4806
  emit_int8(0x03);
4807 4808 4809 4810 4811 4812 4813 4814
  emit_operand(dst, src);
}

void Assembler::addq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
}

4815 4816 4817
void Assembler::andq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
4818
  emit_int8((unsigned char)0x81);
4819
  emit_operand(rsp, dst, 4);
4820
  emit_int32(imm32);
4821 4822
}

4823 4824 4825 4826 4827 4828 4829 4830
void Assembler::andq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE0, dst, imm32);
}

void Assembler::andq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4831
  emit_int8(0x23);
4832 4833 4834 4835
  emit_operand(dst, src);
}

void Assembler::andq(Register dst, Register src) {
4836
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
4837 4838 4839
  emit_arith(0x23, 0xC0, dst, src);
}

4840 4841
void Assembler::bsfq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4842 4843 4844
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
4845 4846 4847 4848 4849
}

void Assembler::bsrq(Register dst, Register src) {
  assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4850 4851 4852
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
4853 4854
}

4855 4856
void Assembler::bswapq(Register reg) {
  int encode = prefixq_and_encode(reg->encoding());
4857 4858
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
4859 4860 4861 4862
}

void Assembler::cdqq() {
  prefix(REX_W);
4863
  emit_int8((unsigned char)0x99);
4864 4865 4866 4867
}

void Assembler::clflush(Address adr) {
  prefix(adr);
4868 4869
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
4870 4871 4872 4873 4874
  emit_operand(rdi, adr);
}

void Assembler::cmovq(Condition cc, Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4875 4876 4877
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
4878 4879 4880 4881 4882
}

void Assembler::cmovq(Condition cc, Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
4883 4884
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
4885 4886 4887 4888 4889 4890
  emit_operand(dst, src);
}

void Assembler::cmpq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
4891
  emit_int8((unsigned char)0x81);
4892
  emit_operand(rdi, dst, 4);
4893
  emit_int32(imm32);
4894 4895 4896 4897 4898 4899 4900 4901 4902 4903
}

void Assembler::cmpq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xF8, dst, imm32);
}

void Assembler::cmpq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
4904
  emit_int8(0x3B);
4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915
  emit_operand(src, dst);
}

void Assembler::cmpq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
}

void Assembler::cmpq(Register dst, Address  src) {
  InstructionMark im(this);
  prefixq(src, dst);
4916
  emit_int8(0x3B);
4917 4918 4919 4920 4921 4922
  emit_operand(dst, src);
}

void Assembler::cmpxchgq(Register reg, Address adr) {
  InstructionMark im(this);
  prefixq(adr, reg);
4923 4924
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
4925 4926 4927 4928 4929
  emit_operand(reg, adr);
}

void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
4930
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
4931 4932
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
4933 4934
}

K
kvn 已提交
4935 4936 4937 4938
void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
4939
  emit_int8(0x2A);
K
kvn 已提交
4940 4941 4942
  emit_operand(dst, src);
}

4943 4944
void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
4945
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
4946 4947
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
4948 4949
}

K
kvn 已提交
4950 4951 4952 4953
void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
4954
  emit_int8(0x2A);
K
kvn 已提交
4955 4956 4957
  emit_operand(dst, src);
}

4958 4959
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
K
kvn 已提交
4960
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
4961 4962
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
4963 4964 4965 4966
}

void Assembler::cvttss2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
K
kvn 已提交
4967
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
4968 4969
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
4970 4971 4972 4973 4974 4975
}

void Assembler::decl(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementl() instead.
  // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
4976 4977
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC8 | encode));
4978 4979 4980 4981 4982 4983
}

void Assembler::decq(Register dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
4984 4985
  emit_int8((unsigned char)0xFF);
  emit_int8(0xC8 | encode);
4986 4987 4988 4989 4990 4991
}

void Assembler::decq(Address dst) {
  // Don't use it directly. Use MacroAssembler::decrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
4992
  emit_int8((unsigned char)0xFF);
4993 4994 4995 4996 4997
  emit_operand(rcx, dst);
}

void Assembler::fxrstor(Address src) {
  prefixq(src);
4998 4999
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5000 5001 5002 5003 5004
  emit_operand(as_Register(1), src);
}

void Assembler::fxsave(Address dst) {
  prefixq(dst);
5005 5006
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
5007 5008 5009 5010 5011
  emit_operand(as_Register(0), dst);
}

void Assembler::idivq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
5012 5013
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
5014 5015 5016 5017
}

void Assembler::imulq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5018 5019 5020
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
5021 5022 5023 5024 5025
}

void Assembler::imulq(Register dst, Register src, int value) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  if (is8bit(value)) {
5026 5027 5028
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
5029
  } else {
5030 5031
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
5032
    emit_int32(value);
5033 5034 5035
  }
}

5036 5037 5038 5039 5040 5041 5042 5043
void Assembler::imulq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}

5044 5045 5046 5047
void Assembler::incl(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementl() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefix_and_encode(dst->encoding());
5048 5049
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5050 5051 5052 5053 5054 5055
}

void Assembler::incq(Register dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  int encode = prefixq_and_encode(dst->encoding());
5056 5057
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
5058 5059 5060 5061 5062 5063
}

void Assembler::incq(Address dst) {
  // Don't use it directly. Use MacroAssembler::incrementq() instead.
  InstructionMark im(this);
  prefixq(dst);
5064
  emit_int8((unsigned char)0xFF);
5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
  emit_operand(rax, dst);
}

void Assembler::lea(Register dst, Address src) {
  leaq(dst, src);
}

void Assembler::leaq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5075
  emit_int8((unsigned char)0x8D);
5076 5077 5078 5079 5080 5081
  emit_operand(dst, src);
}

void Assembler::mov64(Register dst, int64_t imm64) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5082
  emit_int8((unsigned char)(0xB8 | encode));
5083
  emit_int64(imm64);
5084 5085 5086 5087 5088
}

void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5089
  emit_int8(0xB8 | encode);
5090 5091 5092
  emit_data64(imm64, rspec);
}

5093 5094 5095
void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
5096
  emit_int8((unsigned char)(0xB8 | encode));
5097 5098 5099 5100 5101 5102
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(dst);
5103
  emit_int8((unsigned char)0xC7);
5104 5105 5106 5107 5108 5109 5110
  emit_operand(rax, dst, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(src1->encoding());
5111 5112
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | encode));
5113 5114 5115 5116 5117 5118
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(src1);
5119
  emit_int8((unsigned char)0x81);
5120 5121 5122 5123
  emit_operand(rax, src1, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}

5124 5125
void Assembler::lzcntq(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
5126
  emit_int8((unsigned char)0xF3);
5127
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5128 5129 5130
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
5131 5132
}

5133 5134
void Assembler::movdq(XMMRegister dst, Register src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5135 5136
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
5137 5138
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
5139 5140 5141 5142
}

void Assembler::movdq(Register dst, XMMRegister src) {
  // table D-1 says MMX/SSE2
K
kvn 已提交
5143
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5144
  // swap src/dst to get correct prefix
K
kvn 已提交
5145
  int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
5146 5147
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5148 5149
}

5150 5151
void Assembler::movq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5152 5153
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
5154
}
D
duke 已提交
5155

5156 5157 5158
void Assembler::movq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5159
  emit_int8((unsigned char)0x8B);
5160 5161
  emit_operand(dst, src);
}
D
duke 已提交
5162

5163 5164 5165
void Assembler::movq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5166
  emit_int8((unsigned char)0x89);
5167 5168
  emit_operand(src, dst);
}
D
duke 已提交
5169

5170 5171 5172
void Assembler::movsbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5173 5174
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
5175 5176 5177 5178 5179
  emit_operand(dst, src);
}

void Assembler::movsbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5180 5181 5182
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
5183 5184
}

5185 5186 5187 5188 5189 5190 5191
void Assembler::movslq(Register dst, int32_t imm32) {
  // dbx shows movslq(rcx, 3) as movq     $0x0000000049000000,(%rbx)
  // and movslq(r8, 3); as movl     $0x0000000048000000,(%rbx)
  // as a result we shouldn't use until tested at runtime...
  ShouldNotReachHere();
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
5192
  emit_int8((unsigned char)(0xC7 | encode));
5193
  emit_int32(imm32);
5194 5195 5196 5197 5198 5199
}

void Assembler::movslq(Address dst, int32_t imm32) {
  assert(is_simm32(imm32), "lost bits");
  InstructionMark im(this);
  prefixq(dst);
5200
  emit_int8((unsigned char)0xC7);
5201
  emit_operand(rax, dst, 4);
5202
  emit_int32(imm32);
5203 5204 5205 5206 5207
}

void Assembler::movslq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5208
  emit_int8(0x63);
5209 5210 5211 5212 5213
  emit_operand(dst, src);
}

void Assembler::movslq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5214 5215
  emit_int8(0x63);
  emit_int8((unsigned char)(0xC0 | encode));
5216 5217
}

5218 5219 5220
void Assembler::movswq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5221 5222
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
5223 5224 5225 5226 5227
  emit_operand(dst, src);
}

void Assembler::movswq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5228 5229 5230
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
5231 5232 5233 5234 5235
}

void Assembler::movzbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5236 5237
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB6);
5238 5239 5240 5241 5242
  emit_operand(dst, src);
}

void Assembler::movzbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5243 5244 5245
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
5246 5247 5248 5249 5250
}

void Assembler::movzwq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5251 5252
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
5253 5254 5255 5256 5257
  emit_operand(dst, src);
}

void Assembler::movzwq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5258 5259 5260
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8((unsigned char)(0xC0 | encode));
5261 5262
}

5263 5264
void Assembler::negq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5265 5266
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
5267 5268 5269 5270
}

void Assembler::notq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5271 5272
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
5273 5274 5275 5276 5277
}

void Assembler::orq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5278
  emit_int8((unsigned char)0x81);
5279
  emit_operand(rcx, dst, 4);
5280
  emit_int32(imm32);
5281 5282 5283 5284 5285 5286 5287 5288 5289 5290
}

void Assembler::orq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC8, dst, imm32);
}

void Assembler::orq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5291
  emit_int8(0x0B);
5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320
  emit_operand(dst, src);
}

void Assembler::orq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
}

void Assembler::popa() { // 64bit
  movq(r15, Address(rsp, 0));
  movq(r14, Address(rsp, wordSize));
  movq(r13, Address(rsp, 2 * wordSize));
  movq(r12, Address(rsp, 3 * wordSize));
  movq(r11, Address(rsp, 4 * wordSize));
  movq(r10, Address(rsp, 5 * wordSize));
  movq(r9,  Address(rsp, 6 * wordSize));
  movq(r8,  Address(rsp, 7 * wordSize));
  movq(rdi, Address(rsp, 8 * wordSize));
  movq(rsi, Address(rsp, 9 * wordSize));
  movq(rbp, Address(rsp, 10 * wordSize));
  // skip rsp
  movq(rbx, Address(rsp, 12 * wordSize));
  movq(rdx, Address(rsp, 13 * wordSize));
  movq(rcx, Address(rsp, 14 * wordSize));
  movq(rax, Address(rsp, 15 * wordSize));

  addq(rsp, 16 * wordSize);
}

5321 5322 5323
void Assembler::popcntq(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
5324
  emit_int8((unsigned char)0xF3);
5325
  prefixq(src, dst);
5326 5327
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
5328 5329 5330 5331 5332
  emit_operand(dst, src);
}

void Assembler::popcntq(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
5333
  emit_int8((unsigned char)0xF3);
5334
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5335 5336 5337
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
5338 5339
}

5340 5341 5342
void Assembler::popq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
5343
  emit_int8((unsigned char)0x8F);
5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374
  emit_operand(rax, dst);
}

void Assembler::pusha() { // 64bit
  // we have to store original rsp.  ABI says that 128 bytes
  // below rsp are local scratch.
  movq(Address(rsp, -5 * wordSize), rsp);

  subq(rsp, 16 * wordSize);

  movq(Address(rsp, 15 * wordSize), rax);
  movq(Address(rsp, 14 * wordSize), rcx);
  movq(Address(rsp, 13 * wordSize), rdx);
  movq(Address(rsp, 12 * wordSize), rbx);
  // skip rsp
  movq(Address(rsp, 10 * wordSize), rbp);
  movq(Address(rsp, 9 * wordSize), rsi);
  movq(Address(rsp, 8 * wordSize), rdi);
  movq(Address(rsp, 7 * wordSize), r8);
  movq(Address(rsp, 6 * wordSize), r9);
  movq(Address(rsp, 5 * wordSize), r10);
  movq(Address(rsp, 4 * wordSize), r11);
  movq(Address(rsp, 3 * wordSize), r12);
  movq(Address(rsp, 2 * wordSize), r13);
  movq(Address(rsp, wordSize), r14);
  movq(Address(rsp, 0), r15);
}

void Assembler::pushq(Address src) {
  InstructionMark im(this);
  prefixq(src);
5375
  emit_int8((unsigned char)0xFF);
5376 5377 5378 5379 5380 5381 5382
  emit_operand(rsi, src);
}

void Assembler::rclq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5383 5384
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
5385
  } else {
5386 5387 5388
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD0 | encode));
    emit_int8(imm8);
D
duke 已提交
5389
  }
5390 5391 5392 5393 5394
}
void Assembler::sarq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5395 5396
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
5397
  } else {
5398 5399 5400
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
5401 5402
  }
}
D
duke 已提交
5403

5404 5405
void Assembler::sarq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5406 5407
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
5408
}
5409

5410 5411 5412 5413 5414
void Assembler::sbbq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}
D
duke 已提交
5415

5416 5417 5418 5419
void Assembler::sbbq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD8, dst, imm32);
}
D
duke 已提交
5420

5421 5422 5423
void Assembler::sbbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5424
  emit_int8(0x1B);
5425 5426
  emit_operand(dst, src);
}
D
duke 已提交
5427

5428 5429 5430 5431
void Assembler::sbbq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
}
D
duke 已提交
5432

5433 5434 5435 5436
void Assembler::shlq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
5437 5438
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
5439
  } else {
5440 5441 5442
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
D
duke 已提交
5443
  }
5444 5445 5446 5447
}

void Assembler::shlq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5448 5449
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
5450 5451 5452 5453 5454
}

void Assembler::shrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
5455 5456 5457
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
5458 5459 5460 5461
}

void Assembler::shrq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
5462 5463
  emit_int8((unsigned char)0xD3);
  emit_int8(0xE8 | encode);
5464 5465 5466 5467 5468
}

void Assembler::subq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
5469
  emit_arith_operand(0x81, rbp, dst, imm32);
5470 5471 5472 5473 5474
}

void Assembler::subq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5475
  emit_int8(0x29);
5476 5477 5478
  emit_operand(src, dst);
}

5479 5480 5481 5482 5483
void Assembler::subq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE8, dst, imm32);
}

5484 5485 5486 5487 5488 5489
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler::subq_imm32(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}

5490 5491 5492
void Assembler::subq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5493
  emit_int8(0x2B);
5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508
  emit_operand(dst, src);
}

void Assembler::subq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}

void Assembler::testq(Register dst, int32_t imm32) {
  // not using emit_arith because test
  // doesn't support sign-extension of
  // 8bit operands
  int encode = dst->encoding();
  if (encode == 0) {
    prefix(REX_W);
5509
    emit_int8((unsigned char)0xA9);
D
duke 已提交
5510
  } else {
5511
    encode = prefixq_and_encode(encode);
5512 5513
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
D
duke 已提交
5514
  }
5515
  emit_int32(imm32);
D
duke 已提交
5516 5517
}

5518 5519 5520
void Assembler::testq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
D
duke 已提交
5521 5522
}

5523 5524 5525
void Assembler::xaddq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
5526 5527
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
5528
  emit_operand(src, dst);
D
duke 已提交
5529 5530
}

5531 5532 5533
void Assembler::xchgq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5534
  emit_int8((unsigned char)0x87);
5535
  emit_operand(dst, src);
D
duke 已提交
5536 5537
}

5538 5539
void Assembler::xchgq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5540 5541
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xc0 | encode));
D
duke 已提交
5542 5543
}

5544 5545 5546
void Assembler::xorq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
D
duke 已提交
5547 5548
}

5549 5550 5551
void Assembler::xorq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
5552
  emit_int8(0x33);
5553
  emit_operand(dst, src);
5554 5555
}

5556
#endif // !LP64