2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 static const int DoubleConditionBitInvert = 0x10;
38 static const int DoubleConditionBitSpecial = 0x20;
39 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
42 typedef X86Assembler::FPRegisterID FPRegisterID;
44 static const int MaximumCompactPtrAlignedAddressOffset = 127;
46 enum RelationalCondition {
47 Equal = X86Assembler::ConditionE,
48 NotEqual = X86Assembler::ConditionNE,
49 Above = X86Assembler::ConditionA,
50 AboveOrEqual = X86Assembler::ConditionAE,
51 Below = X86Assembler::ConditionB,
52 BelowOrEqual = X86Assembler::ConditionBE,
53 GreaterThan = X86Assembler::ConditionG,
54 GreaterThanOrEqual = X86Assembler::ConditionGE,
55 LessThan = X86Assembler::ConditionL,
56 LessThanOrEqual = X86Assembler::ConditionLE
59 enum ResultCondition {
60 Overflow = X86Assembler::ConditionO,
61 Signed = X86Assembler::ConditionS,
62 Zero = X86Assembler::ConditionE,
63 NonZero = X86Assembler::ConditionNE
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
69 DoubleNotEqual = X86Assembler::ConditionNE,
70 DoubleGreaterThan = X86Assembler::ConditionA,
71 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
72 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
73 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = X86Assembler::ConditionE,
76 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
77 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
78 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
79 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
80 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
83 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
84 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
86 static const RegisterID stackPointerRegister = X86Registers::esp;
88 // Integer arithmetic operations:
90 // Operations are typically two operand - operation(source, srcDst)
91 // For many operations the source may be an TrustedImm32, the srcDst operand
92 // may often be a memory location (explictly described using an Address
95 void add32(RegisterID src, RegisterID dest)
97 m_assembler.addl_rr(src, dest);
100 void add32(TrustedImm32 imm, Address address)
102 m_assembler.addl_im(imm.m_value, address.offset, address.base);
105 void add32(TrustedImm32 imm, RegisterID dest)
107 m_assembler.addl_ir(imm.m_value, dest);
110 void add32(Address src, RegisterID dest)
112 m_assembler.addl_mr(src.offset, src.base, dest);
115 void add32(RegisterID src, Address dest)
117 m_assembler.addl_rm(src, dest.offset, dest.base);
120 void and32(RegisterID src, RegisterID dest)
122 m_assembler.andl_rr(src, dest);
125 void and32(TrustedImm32 imm, RegisterID dest)
127 m_assembler.andl_ir(imm.m_value, dest);
130 void and32(RegisterID src, Address dest)
132 m_assembler.andl_rm(src, dest.offset, dest.base);
135 void and32(Address src, RegisterID dest)
137 m_assembler.andl_mr(src.offset, src.base, dest);
140 void and32(TrustedImm32 imm, Address address)
142 m_assembler.andl_im(imm.m_value, address.offset, address.base);
145 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
148 zeroExtend32ToPtr(op1, dest);
149 else if (op1 == dest)
157 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
163 void lshift32(RegisterID shift_amount, RegisterID dest)
165 ASSERT(shift_amount != dest);
167 if (shift_amount == X86Registers::ecx)
168 m_assembler.shll_CLr(dest);
170 // On x86 we can only shift by ecx; if asked to shift by another register we'll
171 // need rejig the shift amount into ecx first, and restore the registers afterwards.
172 // If we dest is ecx, then shift the swapped register!
173 swap(shift_amount, X86Registers::ecx);
174 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
175 swap(shift_amount, X86Registers::ecx);
179 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
181 ASSERT(shift_amount != dest);
185 lshift32(shift_amount, dest);
188 void lshift32(TrustedImm32 imm, RegisterID dest)
190 m_assembler.shll_i8r(imm.m_value, dest);
193 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
200 void mul32(RegisterID src, RegisterID dest)
202 m_assembler.imull_rr(src, dest);
205 void mul32(Address src, RegisterID dest)
207 m_assembler.imull_mr(src.offset, src.base, dest);
210 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
212 m_assembler.imull_i32r(src, imm.m_value, dest);
215 void neg32(RegisterID srcDest)
217 m_assembler.negl_r(srcDest);
220 void neg32(Address srcDest)
222 m_assembler.negl_m(srcDest.offset, srcDest.base);
225 void not32(RegisterID srcDest)
227 m_assembler.notl_r(srcDest);
230 void not32(Address srcDest)
232 m_assembler.notl_m(srcDest.offset, srcDest.base);
235 void or32(RegisterID src, RegisterID dest)
237 m_assembler.orl_rr(src, dest);
240 void or32(TrustedImm32 imm, RegisterID dest)
242 m_assembler.orl_ir(imm.m_value, dest);
245 void or32(RegisterID src, Address dest)
247 m_assembler.orl_rm(src, dest.offset, dest.base);
250 void or32(Address src, RegisterID dest)
252 m_assembler.orl_mr(src.offset, src.base, dest);
255 void or32(TrustedImm32 imm, Address address)
257 m_assembler.orl_im(imm.m_value, address.offset, address.base);
260 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
263 zeroExtend32ToPtr(op1, dest);
264 else if (op1 == dest)
272 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
278 void rshift32(RegisterID shift_amount, RegisterID dest)
280 ASSERT(shift_amount != dest);
282 if (shift_amount == X86Registers::ecx)
283 m_assembler.sarl_CLr(dest);
285 // On x86 we can only shift by ecx; if asked to shift by another register we'll
286 // need rejig the shift amount into ecx first, and restore the registers afterwards.
287 // If we dest is ecx, then shift the swapped register!
288 swap(shift_amount, X86Registers::ecx);
289 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
290 swap(shift_amount, X86Registers::ecx);
294 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
296 ASSERT(shift_amount != dest);
300 rshift32(shift_amount, dest);
303 void rshift32(TrustedImm32 imm, RegisterID dest)
305 m_assembler.sarl_i8r(imm.m_value, dest);
308 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
315 void urshift32(RegisterID shift_amount, RegisterID dest)
317 ASSERT(shift_amount != dest);
319 if (shift_amount == X86Registers::ecx)
320 m_assembler.shrl_CLr(dest);
322 // On x86 we can only shift by ecx; if asked to shift by another register we'll
323 // need rejig the shift amount into ecx first, and restore the registers afterwards.
324 // If we dest is ecx, then shift the swapped register!
325 swap(shift_amount, X86Registers::ecx);
326 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
327 swap(shift_amount, X86Registers::ecx);
331 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
333 ASSERT(shift_amount != dest);
337 urshift32(shift_amount, dest);
340 void urshift32(TrustedImm32 imm, RegisterID dest)
342 m_assembler.shrl_i8r(imm.m_value, dest);
345 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
349 urshift32(imm, dest);
352 void sub32(RegisterID src, RegisterID dest)
354 m_assembler.subl_rr(src, dest);
357 void sub32(TrustedImm32 imm, RegisterID dest)
359 m_assembler.subl_ir(imm.m_value, dest);
362 void sub32(TrustedImm32 imm, Address address)
364 m_assembler.subl_im(imm.m_value, address.offset, address.base);
367 void sub32(Address src, RegisterID dest)
369 m_assembler.subl_mr(src.offset, src.base, dest);
372 void sub32(RegisterID src, Address dest)
374 m_assembler.subl_rm(src, dest.offset, dest.base);
378 void xor32(RegisterID src, RegisterID dest)
380 m_assembler.xorl_rr(src, dest);
383 void xor32(TrustedImm32 imm, Address dest)
385 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
388 void xor32(TrustedImm32 imm, RegisterID dest)
390 m_assembler.xorl_ir(imm.m_value, dest);
393 void xor32(RegisterID src, Address dest)
395 m_assembler.xorl_rm(src, dest.offset, dest.base);
398 void xor32(Address src, RegisterID dest)
400 m_assembler.xorl_mr(src.offset, src.base, dest);
403 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
406 move(TrustedImm32(0), dest);
407 else if (op1 == dest)
415 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
421 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
423 m_assembler.sqrtsd_rr(src, dst);
426 void andnotDouble(FPRegisterID src, FPRegisterID dst)
428 m_assembler.andnpd_rr(src, dst);
431 // Memory access operations:
433 // Loads are of the form load(address, destination) and stores of the form
434 // store(source, address). The source for a store may be an TrustedImm32. Address
435 // operand objects to loads and store will be implicitly constructed if a
436 // register is passed.
438 void load32(ImplicitAddress address, RegisterID dest)
440 m_assembler.movl_mr(address.offset, address.base, dest);
443 void load32(BaseIndex address, RegisterID dest)
445 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
448 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
450 load32(address, dest);
453 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
455 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
456 return DataLabel32(this);
459 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
461 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
462 return DataLabelCompact(this);
465 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
468 ASSERT(value < MaximumCompactPtrAlignedAddressOffset);
469 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
472 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
474 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
475 return DataLabelCompact(this);
478 void load8(BaseIndex address, RegisterID dest)
480 m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
483 void load16(BaseIndex address, RegisterID dest)
485 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
488 void load16(Address address, RegisterID dest)
490 m_assembler.movzwl_mr(address.offset, address.base, dest);
493 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
495 m_assembler.movl_rm_disp32(src, address.offset, address.base);
496 return DataLabel32(this);
499 void store32(RegisterID src, ImplicitAddress address)
501 m_assembler.movl_rm(src, address.offset, address.base);
504 void store32(RegisterID src, BaseIndex address)
506 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
509 void store32(TrustedImm32 imm, ImplicitAddress address)
511 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
515 // Floating-point operation:
517 // Presently only supports SSE, not x87 floating point.
519 void moveDouble(FPRegisterID src, FPRegisterID dest)
521 ASSERT(isSSE2Present());
523 m_assembler.movsd_rr(src, dest);
526 void loadDouble(ImplicitAddress address, FPRegisterID dest)
528 ASSERT(isSSE2Present());
529 m_assembler.movsd_mr(address.offset, address.base, dest);
532 void storeDouble(FPRegisterID src, ImplicitAddress address)
534 ASSERT(isSSE2Present());
535 m_assembler.movsd_rm(src, address.offset, address.base);
538 void addDouble(FPRegisterID src, FPRegisterID dest)
540 ASSERT(isSSE2Present());
541 m_assembler.addsd_rr(src, dest);
544 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
546 ASSERT(isSSE2Present());
548 addDouble(op2, dest);
550 moveDouble(op2, dest);
551 addDouble(op1, dest);
555 void addDouble(Address src, FPRegisterID dest)
557 ASSERT(isSSE2Present());
558 m_assembler.addsd_mr(src.offset, src.base, dest);
561 void divDouble(FPRegisterID src, FPRegisterID dest)
563 ASSERT(isSSE2Present());
564 m_assembler.divsd_rr(src, dest);
567 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
569 // B := A / B is invalid.
570 ASSERT(op1 == dest || op2 != dest);
572 moveDouble(op1, dest);
573 divDouble(op2, dest);
576 void divDouble(Address src, FPRegisterID dest)
578 ASSERT(isSSE2Present());
579 m_assembler.divsd_mr(src.offset, src.base, dest);
582 void subDouble(FPRegisterID src, FPRegisterID dest)
584 ASSERT(isSSE2Present());
585 m_assembler.subsd_rr(src, dest);
588 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
590 // B := A - B is invalid.
591 ASSERT(op1 == dest || op2 != dest);
593 moveDouble(op1, dest);
594 subDouble(op2, dest);
597 void subDouble(Address src, FPRegisterID dest)
599 ASSERT(isSSE2Present());
600 m_assembler.subsd_mr(src.offset, src.base, dest);
603 void mulDouble(FPRegisterID src, FPRegisterID dest)
605 ASSERT(isSSE2Present());
606 m_assembler.mulsd_rr(src, dest);
609 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
611 ASSERT(isSSE2Present());
613 mulDouble(op2, dest);
615 moveDouble(op2, dest);
616 mulDouble(op1, dest);
620 void mulDouble(Address src, FPRegisterID dest)
622 ASSERT(isSSE2Present());
623 m_assembler.mulsd_mr(src.offset, src.base, dest);
626 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
628 ASSERT(isSSE2Present());
629 m_assembler.cvtsi2sd_rr(src, dest);
632 void convertInt32ToDouble(Address src, FPRegisterID dest)
634 ASSERT(isSSE2Present());
635 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
638 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
640 ASSERT(isSSE2Present());
642 if (cond & DoubleConditionBitInvert)
643 m_assembler.ucomisd_rr(left, right);
645 m_assembler.ucomisd_rr(right, left);
647 if (cond == DoubleEqual) {
648 Jump isUnordered(m_assembler.jp());
649 Jump result = Jump(m_assembler.je());
650 isUnordered.link(this);
652 } else if (cond == DoubleNotEqualOrUnordered) {
653 Jump isUnordered(m_assembler.jp());
654 Jump isEqual(m_assembler.je());
655 isUnordered.link(this);
656 Jump result = jump();
661 ASSERT(!(cond & DoubleConditionBitSpecial));
662 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
665 // Truncates 'src' to an integer, and places the resulting 'dest'.
666 // If the result is not representable as a 32 bit value, branch.
667 // May also branch for some values that are representable in 32 bits
668 // (specifically, in this case, INT_MIN).
669 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
670 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
672 ASSERT(isSSE2Present());
673 m_assembler.cvttsd2si_rr(src, dest);
674 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
677 // Convert 'src' to an integer, and places the resulting 'dest'.
678 // If the result is not representable as a 32 bit value, branch.
679 // May also branch for some values that are representable in 32 bits
680 // (specifically, in this case, 0).
681 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
683 ASSERT(isSSE2Present());
684 m_assembler.cvttsd2si_rr(src, dest);
686 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
687 failureCases.append(branchTest32(Zero, dest));
689 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
690 convertInt32ToDouble(dest, fpTemp);
691 m_assembler.ucomisd_rr(fpTemp, src);
692 failureCases.append(m_assembler.jp());
693 failureCases.append(m_assembler.jne());
696 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
698 ASSERT(isSSE2Present());
699 m_assembler.xorpd_rr(scratch, scratch);
700 return branchDouble(DoubleNotEqual, reg, scratch);
703 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
705 ASSERT(isSSE2Present());
706 m_assembler.xorpd_rr(scratch, scratch);
707 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
710 // Stack manipulation operations:
712 // The ABI is assumed to provide a stack abstraction to memory,
713 // containing machine word sized units of data. Push and pop
714 // operations add and remove a single register sized unit of data
715 // to or from the stack. Peek and poke operations read or write
716 // values on the stack, without moving the current stack position.
718 void pop(RegisterID dest)
720 m_assembler.pop_r(dest);
723 void push(RegisterID src)
725 m_assembler.push_r(src);
728 void push(Address address)
730 m_assembler.push_m(address.offset, address.base);
733 void push(TrustedImm32 imm)
735 m_assembler.push_i32(imm.m_value);
739 // Register move operations:
741 // Move values in registers.
743 void move(TrustedImm32 imm, RegisterID dest)
745 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
746 // may be useful to have a separate version that sign extends the value?
748 m_assembler.xorl_rr(dest, dest);
750 m_assembler.movl_i32r(imm.m_value, dest);
754 void move(RegisterID src, RegisterID dest)
756 // Note: on 64-bit this is is a full register move; perhaps it would be
757 // useful to have separate move32 & movePtr, with move32 zero extending?
759 m_assembler.movq_rr(src, dest);
762 void move(TrustedImmPtr imm, RegisterID dest)
764 m_assembler.movq_i64r(imm.asIntptr(), dest);
767 void swap(RegisterID reg1, RegisterID reg2)
770 m_assembler.xchgq_rr(reg1, reg2);
773 void signExtend32ToPtr(RegisterID src, RegisterID dest)
775 m_assembler.movsxd_rr(src, dest);
778 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
780 m_assembler.movl_rr(src, dest);
783 void move(RegisterID src, RegisterID dest)
786 m_assembler.movl_rr(src, dest);
789 void move(TrustedImmPtr imm, RegisterID dest)
791 m_assembler.movl_i32r(imm.asIntptr(), dest);
794 void swap(RegisterID reg1, RegisterID reg2)
797 m_assembler.xchgl_rr(reg1, reg2);
800 void signExtend32ToPtr(RegisterID src, RegisterID dest)
805 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
812 // Forwards / external control flow operations:
814 // This set of jump and conditional branch operations return a Jump
815 // object which may linked at a later point, allow forwards jump,
816 // or jumps that will require external linkage (after the code has been
819 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
820 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
821 // used (representing the names 'below' and 'above').
823 // Operands to the comparision are provided in the expected order, e.g.
824 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
825 // treated as a signed 32bit value, is less than or equal to 5.
827 // jz and jnz test whether the first operand is equal to zero, and take
828 // an optional second operand of a mask under which to perform the test.
831 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
833 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
834 return Jump(m_assembler.jCC(x86Condition(cond)));
837 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
839 m_assembler.cmpl_rr(right, left);
840 return Jump(m_assembler.jCC(x86Condition(cond)));
843 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
845 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
846 m_assembler.testl_rr(left, left);
848 m_assembler.cmpl_ir(right.m_value, left);
849 return Jump(m_assembler.jCC(x86Condition(cond)));
852 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
854 m_assembler.cmpl_mr(right.offset, right.base, left);
855 return Jump(m_assembler.jCC(x86Condition(cond)));
858 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
860 m_assembler.cmpl_rm(right, left.offset, left.base);
861 return Jump(m_assembler.jCC(x86Condition(cond)));
864 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
866 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
867 return Jump(m_assembler.jCC(x86Condition(cond)));
870 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
872 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
873 return Jump(m_assembler.jCC(x86Condition(cond)));
876 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
878 return branch32(cond, left, right);
881 Jump branch16(RelationalCondition cond, RegisterID left, TrustedImm32 right)
883 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
884 m_assembler.testw_rr(left, left);
886 m_assembler.cmpw_ir(right.m_value, left);
887 return Jump(m_assembler.jCC(x86Condition(cond)));
890 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
892 m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
893 return Jump(m_assembler.jCC(x86Condition(cond)));
896 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
898 ASSERT(!(right.m_value & 0xFFFF0000));
900 m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
901 return Jump(m_assembler.jCC(x86Condition(cond)));
904 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
906 m_assembler.testl_rr(reg, mask);
907 return Jump(m_assembler.jCC(x86Condition(cond)));
910 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
912 // if we are only interested in the low seven bits, this can be tested with a testb
913 if (mask.m_value == -1)
914 m_assembler.testl_rr(reg, reg);
915 else if ((mask.m_value & ~0x7f) == 0)
916 m_assembler.testb_i8r(mask.m_value, reg);
918 m_assembler.testl_i32r(mask.m_value, reg);
919 return Jump(m_assembler.jCC(x86Condition(cond)));
922 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
924 if (mask.m_value == -1)
925 m_assembler.cmpl_im(0, address.offset, address.base);
927 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
928 return Jump(m_assembler.jCC(x86Condition(cond)));
931 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
933 if (mask.m_value == -1)
934 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
936 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
937 return Jump(m_assembler.jCC(x86Condition(cond)));
940 Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
942 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
943 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
944 if (mask.m_value == -1)
945 m_assembler.testb_rr(reg, reg);
947 m_assembler.testb_i8r(mask.m_value, reg);
948 return Jump(m_assembler.jCC(x86Condition(cond)));
951 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
953 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
954 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
955 if (mask.m_value == -1)
956 m_assembler.cmpb_im(0, address.offset, address.base);
958 m_assembler.testb_im(mask.m_value, address.offset, address.base);
959 return Jump(m_assembler.jCC(x86Condition(cond)));
962 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
964 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
965 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
966 if (mask.m_value == -1)
967 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
969 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
970 return Jump(m_assembler.jCC(x86Condition(cond)));
973 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
975 ASSERT(!(right.m_value & 0xFFFFFF00));
977 m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
978 return Jump(m_assembler.jCC(x86Condition(cond)));
983 return Jump(m_assembler.jmp());
986 void jump(RegisterID target)
988 m_assembler.jmp_r(target);
991 // Address is a memory location containing the address to jump to
992 void jump(Address address)
994 m_assembler.jmp_m(address.offset, address.base);
998 // Arithmetic control flow operations:
1000 // This set of conditional branch operations branch based
1001 // on the result of an arithmetic operation. The operation
1002 // is performed as normal, storing the result.
1004 // * jz operations branch if the result is zero.
1005 // * jo operations branch if the (signed) arithmetic
1006 // operation caused an overflow to occur.
1008 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1011 return Jump(m_assembler.jCC(x86Condition(cond)));
1014 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1017 return Jump(m_assembler.jCC(x86Condition(cond)));
1020 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1023 return Jump(m_assembler.jCC(x86Condition(cond)));
1026 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1029 return Jump(m_assembler.jCC(x86Condition(cond)));
1032 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1035 return Jump(m_assembler.jCC(x86Condition(cond)));
1038 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1041 return branchAdd32(cond, src2, dest);
1043 return branchAdd32(cond, src1, dest);
1046 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1049 return branchAdd32(cond, imm, dest);
1052 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1055 if (cond != Overflow)
1056 m_assembler.testl_rr(dest, dest);
1057 return Jump(m_assembler.jCC(x86Condition(cond)));
1060 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1063 if (cond != Overflow)
1064 m_assembler.testl_rr(dest, dest);
1065 return Jump(m_assembler.jCC(x86Condition(cond)));
1068 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1070 mul32(imm, src, dest);
1071 if (cond != Overflow)
1072 m_assembler.testl_rr(dest, dest);
1073 return Jump(m_assembler.jCC(x86Condition(cond)));
1076 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1079 return branchMul32(cond, src2, dest);
1081 return branchMul32(cond, src1, dest);
1084 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1087 return Jump(m_assembler.jCC(x86Condition(cond)));
1090 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1093 return Jump(m_assembler.jCC(x86Condition(cond)));
1096 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1099 return Jump(m_assembler.jCC(x86Condition(cond)));
1102 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1105 return Jump(m_assembler.jCC(x86Condition(cond)));
1108 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1111 return Jump(m_assembler.jCC(x86Condition(cond)));
1114 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1116 // B := A - B is invalid.
1117 ASSERT(src1 == dest || src2 != dest);
1120 return branchSub32(cond, src2, dest);
1123 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1126 return branchSub32(cond, src2, dest);
1129 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1132 return Jump(m_assembler.jCC(x86Condition(cond)));
1135 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1138 return Jump(m_assembler.jCC(x86Condition(cond)));
1142 // Miscellaneous operations:
1151 return Call(m_assembler.call(), Call::LinkableNear);
1154 Call call(RegisterID target)
1156 return Call(m_assembler.call(target), Call::None);
1159 void call(Address address)
1161 m_assembler.call_m(address.offset, address.base);
1169 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1171 m_assembler.cmpl_rr(right, left);
1172 m_assembler.setCC_r(x86Condition(cond), dest);
1173 m_assembler.movzbl_rr(dest, dest);
1176 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1178 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1179 m_assembler.testl_rr(left, left);
1181 m_assembler.cmpl_ir(right.m_value, left);
1182 m_assembler.setCC_r(x86Condition(cond), dest);
1183 m_assembler.movzbl_rr(dest, dest);
1187 // The mask should be optional... paerhaps the argument order should be
1188 // dest-src, operations always have a dest? ... possibly not true, considering
1189 // asm ops like test, or pseudo ops like pop().
1191 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1193 if (mask.m_value == -1)
1194 m_assembler.cmpb_im(0, address.offset, address.base);
1196 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1197 m_assembler.setCC_r(x86Condition(cond), dest);
1198 m_assembler.movzbl_rr(dest, dest);
1201 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1203 if (mask.m_value == -1)
1204 m_assembler.cmpl_im(0, address.offset, address.base);
1206 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1207 m_assembler.setCC_r(x86Condition(cond), dest);
1208 m_assembler.movzbl_rr(dest, dest);
1211 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1212 static RelationalCondition invert(RelationalCondition cond)
1214 return static_cast<RelationalCondition>(cond ^ 1);
1223 X86Assembler::Condition x86Condition(RelationalCondition cond)
1225 return static_cast<X86Assembler::Condition>(cond);
1228 X86Assembler::Condition x86Condition(ResultCondition cond)
1230 return static_cast<X86Assembler::Condition>(cond);
1234 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1235 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1236 friend class MacroAssemblerX86;
1241 // All X86 Macs are guaranteed to support at least SSE2,
1242 static bool isSSE2Present()
1247 #else // OS(MAC_OS_X)
1249 enum SSE2CheckState {
1255 static bool isSSE2Present()
1257 if (s_sse2CheckState == NotCheckedSSE2) {
1258 // Default the flags value to zero; if the compiler is
1259 // not MSVC or GCC we will read this as SSE2 not present.
1263 mov eax, 1 // cpuid function 1 gives us the standard feature set
1276 : "%eax", "%ecx", "%edx"
1279 static const int SSE2FeatureBit = 1 << 26;
1280 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1283 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1285 return s_sse2CheckState == HasSSE2;
1288 static SSE2CheckState s_sse2CheckState;
1290 #endif // OS(MAC_OS_X)
1291 #elif !defined(NDEBUG) // CPU(X86)
1293 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1294 // but non debug add this method to keep the asserts above happy.
1295 static bool isSSE2Present()
1305 #endif // ENABLE(ASSEMBLER)
1307 #endif // MacroAssemblerX86Common_h