2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39 static const int DoubleConditionMask = 0x0f;
40 static const int DoubleConditionBitSpecial = 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
43 typedef ARMRegisters::FPRegisterID FPRegisterID;
44 static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
46 enum RelationalCondition {
47 Equal = ARMAssembler::EQ,
48 NotEqual = ARMAssembler::NE,
49 Above = ARMAssembler::HI,
50 AboveOrEqual = ARMAssembler::CS,
51 Below = ARMAssembler::CC,
52 BelowOrEqual = ARMAssembler::LS,
53 GreaterThan = ARMAssembler::GT,
54 GreaterThanOrEqual = ARMAssembler::GE,
55 LessThan = ARMAssembler::LT,
56 LessThanOrEqual = ARMAssembler::LE
59 enum ResultCondition {
60 Overflow = ARMAssembler::VS,
61 Signed = ARMAssembler::MI,
62 Zero = ARMAssembler::EQ,
63 NonZero = ARMAssembler::NE
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = ARMAssembler::EQ,
69 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
70 DoubleGreaterThan = ARMAssembler::GT,
71 DoubleGreaterThanOrEqual = ARMAssembler::GE,
72 DoubleLessThan = ARMAssembler::CC,
73 DoubleLessThanOrEqual = ARMAssembler::LS,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
76 DoubleNotEqualOrUnordered = ARMAssembler::NE,
77 DoubleGreaterThanOrUnordered = ARMAssembler::HI,
78 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
79 DoubleLessThanOrUnordered = ARMAssembler::LT,
80 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
83 static const RegisterID stackPointerRegister = ARMRegisters::sp;
84 static const RegisterID linkRegister = ARMRegisters::lr;
86 static const Scale ScalePtr = TimesFour;
88 void add32(RegisterID src, RegisterID dest)
90 m_assembler.adds_r(dest, dest, src);
93 void add32(TrustedImm32 imm, Address address)
95 load32(address, ARMRegisters::S1);
96 add32(imm, ARMRegisters::S1);
97 store32(ARMRegisters::S1, address);
100 void add32(TrustedImm32 imm, RegisterID dest)
102 m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
105 void add32(Address src, RegisterID dest)
107 load32(src, ARMRegisters::S1);
108 add32(ARMRegisters::S1, dest);
111 void and32(RegisterID src, RegisterID dest)
113 m_assembler.ands_r(dest, dest, src);
116 void and32(TrustedImm32 imm, RegisterID dest)
118 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
119 if (w & ARMAssembler::OP2_INV_IMM)
120 m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
122 m_assembler.ands_r(dest, dest, w);
125 void lshift32(RegisterID shift_amount, RegisterID dest)
127 ARMWord w = ARMAssembler::getOp2(0x1f);
128 ASSERT(w != ARMAssembler::INVALID_IMM);
129 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
131 m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
134 void lshift32(TrustedImm32 imm, RegisterID dest)
136 m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
139 void mul32(RegisterID src, RegisterID dest)
142 move(src, ARMRegisters::S0);
143 src = ARMRegisters::S0;
145 m_assembler.muls_r(dest, dest, src);
148 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
150 move(imm, ARMRegisters::S0);
151 m_assembler.muls_r(dest, src, ARMRegisters::S0);
154 void neg32(RegisterID srcDest)
156 m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
159 void not32(RegisterID dest)
161 m_assembler.mvns_r(dest, dest);
164 void or32(RegisterID src, RegisterID dest)
166 m_assembler.orrs_r(dest, dest, src);
169 void or32(TrustedImm32 imm, RegisterID dest)
171 m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
174 void rshift32(RegisterID shift_amount, RegisterID dest)
176 ARMWord w = ARMAssembler::getOp2(0x1f);
177 ASSERT(w != ARMAssembler::INVALID_IMM);
178 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
180 m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
183 void rshift32(TrustedImm32 imm, RegisterID dest)
185 rshift32(dest, imm, dest);
188 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
190 m_assembler.movs_r(dest, m_assembler.asr(src, imm.m_value & 0x1f));
193 void urshift32(RegisterID shift_amount, RegisterID dest)
195 ARMWord w = ARMAssembler::getOp2(0x1f);
196 ASSERT(w != ARMAssembler::INVALID_IMM);
197 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
199 m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
202 void urshift32(TrustedImm32 imm, RegisterID dest)
204 m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
207 void sub32(RegisterID src, RegisterID dest)
209 m_assembler.subs_r(dest, dest, src);
212 void sub32(TrustedImm32 imm, RegisterID dest)
214 m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
217 void sub32(TrustedImm32 imm, Address address)
219 load32(address, ARMRegisters::S1);
220 sub32(imm, ARMRegisters::S1);
221 store32(ARMRegisters::S1, address);
224 void sub32(Address src, RegisterID dest)
226 load32(src, ARMRegisters::S1);
227 sub32(ARMRegisters::S1, dest);
230 void xor32(RegisterID src, RegisterID dest)
232 m_assembler.eors_r(dest, dest, src);
235 void xor32(TrustedImm32 imm, RegisterID dest)
237 m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
240 void countLeadingZeros32(RegisterID src, RegisterID dest)
242 #if WTF_ARM_ARCH_AT_LEAST(5)
243 m_assembler.clz_r(dest, src);
247 ASSERT_NOT_REACHED();
251 void load8(ImplicitAddress address, RegisterID dest)
253 m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
256 void load8(BaseIndex address, RegisterID dest)
258 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset, true);
261 void load32(ImplicitAddress address, RegisterID dest)
263 m_assembler.dataTransfer32(true, dest, address.base, address.offset);
266 void load32(BaseIndex address, RegisterID dest)
268 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
271 #if CPU(ARMV5_OR_LOWER)
272 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
274 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
276 load32(address, dest);
280 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
282 DataLabel32 dataLabel(this);
283 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
284 m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
288 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
290 DataLabelCompact dataLabel(this);
291 load32WithAddressOffsetPatch(address, dest);
295 void load16(BaseIndex address, RegisterID dest)
297 m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale));
298 load16(Address(ARMRegisters::S1, address.offset), dest);
301 void load16(ImplicitAddress address, RegisterID dest)
303 if (address.offset >= 0)
304 m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0));
306 m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0));
309 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
311 DataLabel32 dataLabel(this);
312 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
313 m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
317 void store32(RegisterID src, ImplicitAddress address)
319 m_assembler.dataTransfer32(false, src, address.base, address.offset);
322 void store32(RegisterID src, BaseIndex address)
324 m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
327 void store32(TrustedImm32 imm, ImplicitAddress address)
330 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
332 move(imm, ARMRegisters::S1);
333 store32(ARMRegisters::S1, address);
336 void store32(RegisterID src, void* address)
338 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
339 m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
342 void store32(TrustedImm32 imm, void* address)
344 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
346 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
348 m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
349 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
352 void pop(RegisterID dest)
354 m_assembler.pop_r(dest);
357 void push(RegisterID src)
359 m_assembler.push_r(src);
362 void push(Address address)
364 load32(address, ARMRegisters::S1);
365 push(ARMRegisters::S1);
368 void push(TrustedImm32 imm)
370 move(imm, ARMRegisters::S0);
371 push(ARMRegisters::S0);
374 void move(TrustedImm32 imm, RegisterID dest)
377 m_assembler.ldr_un_imm(dest, imm.m_value);
379 m_assembler.moveImm(imm.m_value, dest);
382 void move(RegisterID src, RegisterID dest)
384 m_assembler.mov_r(dest, src);
387 void move(TrustedImmPtr imm, RegisterID dest)
389 move(TrustedImm32(imm), dest);
392 void swap(RegisterID reg1, RegisterID reg2)
394 m_assembler.mov_r(ARMRegisters::S0, reg1);
395 m_assembler.mov_r(reg1, reg2);
396 m_assembler.mov_r(reg2, ARMRegisters::S0);
399 void signExtend32ToPtr(RegisterID src, RegisterID dest)
405 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
411 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
413 load8(left, ARMRegisters::S1);
414 return branch32(cond, ARMRegisters::S1, right);
417 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
419 ASSERT(!(right.m_value & 0xFFFFFF00));
420 load8(left, ARMRegisters::S1);
421 return branch32(cond, ARMRegisters::S1, right);
424 Jump branch16(RelationalCondition cond, RegisterID left, TrustedImm32 right)
426 ASSERT(!(right.m_value & 0xFFFF0000));
427 right.m_value <<= 16;
428 m_assembler.mov_r(ARMRegisters::S1, left);
429 lshift32(TrustedImm32(16), ARMRegisters::S1);
430 return branch32(cond, ARMRegisters::S1, right);
433 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
435 m_assembler.cmp_r(left, right);
436 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
439 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
441 if (right.m_isPointer) {
442 m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
443 m_assembler.cmp_r(left, ARMRegisters::S0);
445 ARMWord tmp = (right.m_value == 0x80000000) ? ARMAssembler::INVALID_IMM : m_assembler.getOp2(-right.m_value);
446 if (tmp != ARMAssembler::INVALID_IMM)
447 m_assembler.cmn_r(left, tmp);
449 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
451 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
454 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
456 load32(right, ARMRegisters::S1);
457 return branch32(cond, left, ARMRegisters::S1);
460 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
462 load32(left, ARMRegisters::S1);
463 return branch32(cond, ARMRegisters::S1, right);
466 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
468 load32(left, ARMRegisters::S1);
469 return branch32(cond, ARMRegisters::S1, right);
472 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
474 load32(left, ARMRegisters::S1);
475 return branch32(cond, ARMRegisters::S1, right);
478 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
480 load32WithUnalignedHalfWords(left, ARMRegisters::S1);
481 return branch32(cond, ARMRegisters::S1, right);
484 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
489 ASSERT_NOT_REACHED();
493 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
495 load16(left, ARMRegisters::S0);
496 move(right, ARMRegisters::S1);
497 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
498 return m_assembler.jmp(ARMCondition(cond));
501 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
503 load8(address, ARMRegisters::S1);
504 return branchTest32(cond, ARMRegisters::S1, mask);
507 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
509 ASSERT((cond == Zero) || (cond == NonZero));
510 m_assembler.tst_r(reg, mask);
511 return Jump(m_assembler.jmp(ARMCondition(cond)));
514 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
516 ASSERT((cond == Zero) || (cond == NonZero));
517 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
518 if (w & ARMAssembler::OP2_INV_IMM)
519 m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
521 m_assembler.tst_r(reg, w);
522 return Jump(m_assembler.jmp(ARMCondition(cond)));
525 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
527 load32(address, ARMRegisters::S1);
528 return branchTest32(cond, ARMRegisters::S1, mask);
531 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
533 load32(address, ARMRegisters::S1);
534 return branchTest32(cond, ARMRegisters::S1, mask);
539 return Jump(m_assembler.jmp());
542 void jump(RegisterID target)
544 m_assembler.bx(target);
547 void jump(Address address)
549 load32(address, ARMRegisters::pc);
552 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
554 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
556 return Jump(m_assembler.jmp(ARMCondition(cond)));
559 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
561 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
563 return Jump(m_assembler.jmp(ARMCondition(cond)));
566 void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
569 move(src1, ARMRegisters::S0);
570 src1 = ARMRegisters::S0;
572 m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
573 m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
576 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
578 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
579 if (cond == Overflow) {
580 mull32(src, dest, dest);
585 return Jump(m_assembler.jmp(ARMCondition(cond)));
588 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
590 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
591 if (cond == Overflow) {
592 move(imm, ARMRegisters::S0);
593 mull32(ARMRegisters::S0, src, dest);
597 mul32(imm, src, dest);
598 return Jump(m_assembler.jmp(ARMCondition(cond)));
601 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
603 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
605 return Jump(m_assembler.jmp(ARMCondition(cond)));
608 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
610 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
612 return Jump(m_assembler.jmp(ARMCondition(cond)));
615 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
617 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
619 return Jump(m_assembler.jmp(ARMCondition(cond)));
622 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
624 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
626 return Jump(m_assembler.jmp(ARMCondition(cond)));
636 #if WTF_ARM_ARCH_AT_LEAST(5)
637 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
638 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
639 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
642 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
646 Call call(RegisterID target)
648 return Call(m_assembler.blx(target), Call::None);
651 void call(Address address)
653 call32(address.base, address.offset);
658 m_assembler.bx(linkRegister);
661 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
663 m_assembler.cmp_r(left, right);
664 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
665 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
668 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
670 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
671 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
672 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
675 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
677 if (mask.m_value == -1)
678 m_assembler.cmp_r(0, reg);
680 m_assembler.tst_r(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
681 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
682 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
685 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
687 load32(address, ARMRegisters::S1);
688 test32(cond, ARMRegisters::S1, mask, dest);
691 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
693 load8(address, ARMRegisters::S1);
694 test32(cond, ARMRegisters::S1, mask, dest);
697 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
699 m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
702 void add32(TrustedImm32 imm, AbsoluteAddress address)
704 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
705 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
706 add32(imm, ARMRegisters::S1);
707 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
708 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
711 void sub32(TrustedImm32 imm, AbsoluteAddress address)
713 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
714 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
715 sub32(imm, ARMRegisters::S1);
716 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
717 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
720 void load32(const void* address, RegisterID dest)
722 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
723 m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
726 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
728 load32(left.m_ptr, ARMRegisters::S1);
729 return branch32(cond, ARMRegisters::S1, right);
732 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
734 load32(left.m_ptr, ARMRegisters::S1);
735 return branch32(cond, ARMRegisters::S1, right);
738 void relativeTableJump(RegisterID index, int scale)
740 ASSERT(scale >= 0 && scale <= 31);
741 m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
743 // NOP the default prefetching
744 m_assembler.mov_r(ARMRegisters::r0, ARMRegisters::r0);
749 #if WTF_ARM_ARCH_AT_LEAST(5)
750 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
751 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
752 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
755 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
759 Call tailRecursiveCall()
761 return Call::fromTailJump(jump());
764 Call makeTailRecursiveCall(Jump oldJump)
766 return Call::fromTailJump(oldJump);
769 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
771 DataLabelPtr dataLabel(this);
772 m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
776 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
778 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
779 Jump jump = branch32(cond, left, ARMRegisters::S1, true);
783 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
785 load32(left, ARMRegisters::S1);
786 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
787 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
791 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
793 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
794 store32(ARMRegisters::S1, address);
798 DataLabelPtr storePtrWithPatch(ImplicitAddress address)
800 return storePtrWithPatch(TrustedImmPtr(0), address);
803 // Floating point operators
804 bool supportsFloatingPoint() const
806 return s_isVFPPresent;
809 bool supportsFloatingPointTruncate() const
814 bool supportsFloatingPointSqrt() const
816 return s_isVFPPresent;
818 bool supportsDoubleBitops() const { return false; }
820 void loadDouble(ImplicitAddress address, FPRegisterID dest)
822 m_assembler.doubleTransfer(true, dest, address.base, address.offset);
825 void loadDouble(const void* address, FPRegisterID dest)
827 m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
828 m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
831 void storeDouble(FPRegisterID src, ImplicitAddress address)
833 m_assembler.doubleTransfer(false, src, address.base, address.offset);
836 void addDouble(FPRegisterID src, FPRegisterID dest)
838 m_assembler.vadd_f64_r(dest, dest, src);
841 void addDouble(Address src, FPRegisterID dest)
843 loadDouble(src, ARMRegisters::SD0);
844 addDouble(ARMRegisters::SD0, dest);
847 void divDouble(FPRegisterID src, FPRegisterID dest)
849 m_assembler.vdiv_f64_r(dest, dest, src);
852 void divDouble(Address src, FPRegisterID dest)
854 ASSERT_NOT_REACHED(); // Untested
855 loadDouble(src, ARMRegisters::SD0);
856 divDouble(ARMRegisters::SD0, dest);
859 void subDouble(FPRegisterID src, FPRegisterID dest)
861 m_assembler.vsub_f64_r(dest, dest, src);
864 void subDouble(Address src, FPRegisterID dest)
866 loadDouble(src, ARMRegisters::SD0);
867 subDouble(ARMRegisters::SD0, dest);
870 void mulDouble(FPRegisterID src, FPRegisterID dest)
872 m_assembler.vmul_f64_r(dest, dest, src);
875 void mulDouble(Address src, FPRegisterID dest)
877 loadDouble(src, ARMRegisters::SD0);
878 mulDouble(ARMRegisters::SD0, dest);
881 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
883 m_assembler.vsqrt_f64_r(dest, src);
886 void andnotDouble(FPRegisterID, FPRegisterID)
888 ASSERT_NOT_REACHED();
891 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
893 m_assembler.vmov_vfp_r(dest << 1, src);
894 m_assembler.vcvt_f64_s32_r(dest, dest << 1);
897 void convertInt32ToDouble(Address src, FPRegisterID dest)
899 ASSERT_NOT_REACHED(); // Untested
900 // flds does not worth the effort here
901 load32(src, ARMRegisters::S1);
902 convertInt32ToDouble(ARMRegisters::S1, dest);
905 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
907 ASSERT_NOT_REACHED(); // Untested
908 // flds does not worth the effort here
909 m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
910 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
911 convertInt32ToDouble(ARMRegisters::S1, dest);
914 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
916 m_assembler.vcmp_f64_r(left, right);
917 m_assembler.vmrs_apsr();
918 if (cond & DoubleConditionBitSpecial)
919 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
920 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
923 // Truncates 'src' to an integer, and places the resulting 'dest'.
924 // If the result is not representable as a 32 bit value, branch.
925 // May also branch for some values that are representable in 32 bits
926 // (specifically, in this case, INT_MIN).
927 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
931 ASSERT_NOT_REACHED();
935 // Convert 'src' to an integer, and places the resulting 'dest'.
936 // If the result is not representable as a 32 bit value, branch.
937 // May also branch for some values that are representable in 32 bits
938 // (specifically, in this case, 0).
939 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
941 m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src);
942 m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
944 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
945 m_assembler.vcvt_f64_s32_r(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
946 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
948 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
949 failureCases.append(branchTest32(Zero, dest));
952 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
954 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
955 convertInt32ToDouble(ARMRegisters::S0, scratch);
956 return branchDouble(DoubleNotEqual, reg, scratch);
959 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
961 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
962 convertInt32ToDouble(ARMRegisters::S0, scratch);
963 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
972 ARMAssembler::Condition ARMCondition(RelationalCondition cond)
974 return static_cast<ARMAssembler::Condition>(cond);
977 ARMAssembler::Condition ARMCondition(ResultCondition cond)
979 return static_cast<ARMAssembler::Condition>(cond);
982 void ensureSpace(int insnSpace, int constSpace)
984 m_assembler.ensureSpace(insnSpace, constSpace);
987 int sizeOfConstantPool()
989 return m_assembler.sizeOfConstantPool();
994 #if WTF_ARM_ARCH_VERSION < 5
995 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
997 m_assembler.mov_r(linkRegister, ARMRegisters::pc);
1001 void call32(RegisterID base, int32_t offset)
1003 #if WTF_ARM_ARCH_AT_LEAST(5)
1004 int targetReg = ARMRegisters::S1;
1006 int targetReg = ARMRegisters::pc;
1008 int tmpReg = ARMRegisters::S1;
1010 if (base == ARMRegisters::sp)
1014 if (offset <= 0xfff) {
1016 m_assembler.dtr_u(true, targetReg, base, offset);
1017 } else if (offset <= 0xfffff) {
1018 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
1020 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
1022 m_assembler.moveImm(offset, tmpReg);
1024 m_assembler.dtr_ur(true, targetReg, base, tmpReg);
1028 if (offset <= 0xfff) {
1030 m_assembler.dtr_d(true, targetReg, base, offset);
1031 } else if (offset <= 0xfffff) {
1032 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
1034 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
1036 m_assembler.moveImm(offset, tmpReg);
1038 m_assembler.dtr_dr(true, targetReg, base, tmpReg);
1041 #if WTF_ARM_ARCH_AT_LEAST(5)
1042 m_assembler.blx(targetReg);
1047 friend class LinkBuffer;
1048 friend class RepatchBuffer;
1050 static void linkCall(void* code, Call call, FunctionPtr function)
1052 ARMAssembler::linkCall(code, call.m_label, function.value());
1055 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1057 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1060 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1062 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1065 static const bool s_isVFPPresent;
1070 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1072 #endif // MacroAssemblerARM_h