2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
48 typedef ARMv7Assembler::LinkRecord LinkRecord;
49 typedef ARMv7Assembler::JumpType JumpType;
50 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
51 // Magic number is the biggest useful offset we can get on ARMv7 with
52 // a LDR_imm_T2 encoding
53 static const int MaximumCompactPtrAlignedAddressOffset = 124;
56 : m_inUninterruptedSequence(false)
60 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
61 void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
62 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
63 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
64 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
65 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
66 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
67 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
68 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
69 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
85 explicit ArmAddress(RegisterID base, int32_t offset = 0)
92 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
102 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
104 static const Scale ScalePtr = TimesFour;
106 enum RelationalCondition {
107 Equal = ARMv7Assembler::ConditionEQ,
108 NotEqual = ARMv7Assembler::ConditionNE,
109 Above = ARMv7Assembler::ConditionHI,
110 AboveOrEqual = ARMv7Assembler::ConditionHS,
111 Below = ARMv7Assembler::ConditionLO,
112 BelowOrEqual = ARMv7Assembler::ConditionLS,
113 GreaterThan = ARMv7Assembler::ConditionGT,
114 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
115 LessThan = ARMv7Assembler::ConditionLT,
116 LessThanOrEqual = ARMv7Assembler::ConditionLE
119 enum ResultCondition {
120 Overflow = ARMv7Assembler::ConditionVS,
121 Signed = ARMv7Assembler::ConditionMI,
122 Zero = ARMv7Assembler::ConditionEQ,
123 NonZero = ARMv7Assembler::ConditionNE
126 enum DoubleCondition {
127 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
128 DoubleEqual = ARMv7Assembler::ConditionEQ,
129 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
130 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
131 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
132 DoubleLessThan = ARMv7Assembler::ConditionLO,
133 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
134 // If either operand is NaN, these conditions always evaluate to true.
135 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
136 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
137 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
138 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
139 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
140 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
143 static const RegisterID stackPointerRegister = ARMRegisters::sp;
144 static const RegisterID linkRegister = ARMRegisters::lr;
146 // Integer arithmetic operations:
148 // Operations are typically two operand - operation(source, srcDst)
149 // For many operations the source may be an TrustedImm32, the srcDst operand
150 // may often be a memory location (explictly described using an Address
153 void add32(RegisterID src, RegisterID dest)
155 m_assembler.add(dest, dest, src);
158 void add32(TrustedImm32 imm, RegisterID dest)
160 add32(imm, dest, dest);
163 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
165 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
166 if (armImm.isValid())
167 m_assembler.add(dest, src, armImm);
169 move(imm, dataTempRegister);
170 m_assembler.add(dest, src, dataTempRegister);
174 void add32(TrustedImm32 imm, Address address)
176 load32(address, dataTempRegister);
178 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
179 if (armImm.isValid())
180 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
182 // Hrrrm, since dataTempRegister holds the data loaded,
183 // use addressTempRegister to hold the immediate.
184 move(imm, addressTempRegister);
185 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
188 store32(dataTempRegister, address);
191 void add32(Address src, RegisterID dest)
193 load32(src, dataTempRegister);
194 add32(dataTempRegister, dest);
197 void add32(TrustedImm32 imm, AbsoluteAddress address)
199 load32(address.m_ptr, dataTempRegister);
201 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
202 if (armImm.isValid())
203 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
205 // Hrrrm, since dataTempRegister holds the data loaded,
206 // use addressTempRegister to hold the immediate.
207 move(imm, addressTempRegister);
208 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
211 store32(dataTempRegister, address.m_ptr);
214 void and32(RegisterID src, RegisterID dest)
216 m_assembler.ARM_and(dest, dest, src);
219 void and32(TrustedImm32 imm, RegisterID dest)
221 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
222 if (armImm.isValid())
223 m_assembler.ARM_and(dest, dest, armImm);
225 move(imm, dataTempRegister);
226 m_assembler.ARM_and(dest, dest, dataTempRegister);
230 void countLeadingZeros32(RegisterID src, RegisterID dest)
232 m_assembler.clz(dest, src);
235 void lshift32(RegisterID shift_amount, RegisterID dest)
237 // Clamp the shift to the range 0..31
238 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
239 ASSERT(armImm.isValid());
240 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
242 m_assembler.lsl(dest, dest, dataTempRegister);
245 void lshift32(TrustedImm32 imm, RegisterID dest)
247 m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
250 void mul32(RegisterID src, RegisterID dest)
252 m_assembler.smull(dest, dataTempRegister, dest, src);
255 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
257 move(imm, dataTempRegister);
258 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
261 void neg32(RegisterID srcDest)
263 m_assembler.neg(srcDest, srcDest);
266 void not32(RegisterID srcDest)
268 m_assembler.mvn(srcDest, srcDest);
271 void or32(RegisterID src, RegisterID dest)
273 m_assembler.orr(dest, dest, src);
276 void or32(TrustedImm32 imm, RegisterID dest)
278 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
279 if (armImm.isValid())
280 m_assembler.orr(dest, dest, armImm);
282 move(imm, dataTempRegister);
283 m_assembler.orr(dest, dest, dataTempRegister);
287 void rshift32(RegisterID shift_amount, RegisterID dest)
289 // Clamp the shift to the range 0..31
290 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
291 ASSERT(armImm.isValid());
292 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
294 m_assembler.asr(dest, dest, dataTempRegister);
297 void rshift32(TrustedImm32 imm, RegisterID dest)
299 rshift32(dest, imm, dest);
302 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
304 m_assembler.asr(dest, src, imm.m_value & 0x1f);
307 void urshift32(RegisterID shift_amount, RegisterID dest)
309 // Clamp the shift to the range 0..31
310 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
311 ASSERT(armImm.isValid());
312 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
314 m_assembler.lsr(dest, dest, dataTempRegister);
317 void urshift32(TrustedImm32 imm, RegisterID dest)
319 m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
322 void sub32(RegisterID src, RegisterID dest)
324 m_assembler.sub(dest, dest, src);
327 void sub32(TrustedImm32 imm, RegisterID dest)
329 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
330 if (armImm.isValid())
331 m_assembler.sub(dest, dest, armImm);
333 move(imm, dataTempRegister);
334 m_assembler.sub(dest, dest, dataTempRegister);
338 void sub32(TrustedImm32 imm, Address address)
340 load32(address, dataTempRegister);
342 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
343 if (armImm.isValid())
344 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
346 // Hrrrm, since dataTempRegister holds the data loaded,
347 // use addressTempRegister to hold the immediate.
348 move(imm, addressTempRegister);
349 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
352 store32(dataTempRegister, address);
355 void sub32(Address src, RegisterID dest)
357 load32(src, dataTempRegister);
358 sub32(dataTempRegister, dest);
361 void sub32(TrustedImm32 imm, AbsoluteAddress address)
363 load32(address.m_ptr, dataTempRegister);
365 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
366 if (armImm.isValid())
367 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
369 // Hrrrm, since dataTempRegister holds the data loaded,
370 // use addressTempRegister to hold the immediate.
371 move(imm, addressTempRegister);
372 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
375 store32(dataTempRegister, address.m_ptr);
378 void xor32(RegisterID src, RegisterID dest)
380 m_assembler.eor(dest, dest, src);
383 void xor32(TrustedImm32 imm, RegisterID dest)
385 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
386 if (armImm.isValid())
387 m_assembler.eor(dest, dest, armImm);
389 move(imm, dataTempRegister);
390 m_assembler.eor(dest, dest, dataTempRegister);
395 // Memory access operations:
397 // Loads are of the form load(address, destination) and stores of the form
398 // store(source, address). The source for a store may be an TrustedImm32. Address
399 // operand objects to loads and store will be implicitly constructed if a
400 // register is passed.
403 void load32(ArmAddress address, RegisterID dest)
405 if (address.type == ArmAddress::HasIndex)
406 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
407 else if (address.u.offset >= 0) {
408 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
409 ASSERT(armImm.isValid());
410 m_assembler.ldr(dest, address.base, armImm);
412 ASSERT(address.u.offset >= -255);
413 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
417 void load16(ArmAddress address, RegisterID dest)
419 if (address.type == ArmAddress::HasIndex)
420 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
421 else if (address.u.offset >= 0) {
422 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
423 ASSERT(armImm.isValid());
424 m_assembler.ldrh(dest, address.base, armImm);
426 ASSERT(address.u.offset >= -255);
427 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
431 void load8(ArmAddress address, RegisterID dest)
433 if (address.type == ArmAddress::HasIndex)
434 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
435 else if (address.u.offset >= 0) {
436 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
437 ASSERT(armImm.isValid());
438 m_assembler.ldrb(dest, address.base, armImm);
440 ASSERT(address.u.offset >= -255);
441 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
445 void store32(RegisterID src, ArmAddress address)
447 if (address.type == ArmAddress::HasIndex)
448 m_assembler.str(src, address.base, address.u.index, address.u.scale);
449 else if (address.u.offset >= 0) {
450 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
451 ASSERT(armImm.isValid());
452 m_assembler.str(src, address.base, armImm);
454 ASSERT(address.u.offset >= -255);
455 m_assembler.str(src, address.base, address.u.offset, true, false);
460 void load32(ImplicitAddress address, RegisterID dest)
462 load32(setupArmAddress(address), dest);
465 void load32(BaseIndex address, RegisterID dest)
467 load32(setupArmAddress(address), dest);
470 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
472 load32(setupArmAddress(address), dest);
475 void load32(const void* address, RegisterID dest)
477 move(TrustedImmPtr(address), addressTempRegister);
478 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
481 void load8(ImplicitAddress address, RegisterID dest)
483 load8(setupArmAddress(address), dest);
486 void load8(BaseIndex address, RegisterID dest)
488 load8(setupArmAddress(address), dest);
491 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
493 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
494 load32(ArmAddress(address.base, dataTempRegister), dest);
498 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
500 DataLabelCompact label(this);
501 ASSERT(address.offset >= 0);
502 ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
503 ASSERT(ARMThumbImmediate::makeUInt12(address.offset).isUInt7());
504 m_assembler.ldrCompact(dest, address.base, ARMThumbImmediate::makeUInt12(address.offset));
508 void load16(BaseIndex address, RegisterID dest)
510 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
513 void load16(ImplicitAddress address, RegisterID dest)
515 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
516 if (armImm.isValid())
517 m_assembler.ldrh(dest, address.base, armImm);
519 move(TrustedImm32(address.offset), dataTempRegister);
520 m_assembler.ldrh(dest, address.base, dataTempRegister);
524 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
526 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
527 store32(src, ArmAddress(address.base, dataTempRegister));
531 void store32(RegisterID src, ImplicitAddress address)
533 store32(src, setupArmAddress(address));
536 void store32(RegisterID src, BaseIndex address)
538 store32(src, setupArmAddress(address));
541 void store32(TrustedImm32 imm, ImplicitAddress address)
543 move(imm, dataTempRegister);
544 store32(dataTempRegister, setupArmAddress(address));
547 void store32(RegisterID src, const void* address)
549 move(TrustedImmPtr(address), addressTempRegister);
550 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
553 void store32(TrustedImm32 imm, const void* address)
555 move(imm, dataTempRegister);
556 store32(dataTempRegister, address);
560 // Floating-point operations:
562 bool supportsFloatingPoint() const { return true; }
563 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
564 // If a value is not representable as an integer, and possibly for some values that are,
565 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
566 // a branch will be taken. It is not clear whether this interface will be well suited to
567 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
568 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
569 // temporary solution while we work out what this interface should be. Either we need to
570 // decide to make this interface work on all platforms, rework the interface to make it more
571 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
572 // operations, and make clients go directly to the m_assembler to plant truncation instructions.
574 bool supportsFloatingPointTruncate() const { return false; }
576 bool supportsFloatingPointSqrt() const
580 bool supportsDoubleBitops() const { return false; }
582 void loadDouble(ImplicitAddress address, FPRegisterID dest)
584 RegisterID base = address.base;
585 int32_t offset = address.offset;
587 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
588 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
589 add32(TrustedImm32(offset), base, addressTempRegister);
590 base = addressTempRegister;
594 m_assembler.vldr(dest, base, offset);
597 void loadDouble(const void* address, FPRegisterID dest)
599 move(TrustedImmPtr(address), addressTempRegister);
600 m_assembler.vldr(dest, addressTempRegister, 0);
603 void storeDouble(FPRegisterID src, ImplicitAddress address)
605 RegisterID base = address.base;
606 int32_t offset = address.offset;
608 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
609 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
610 add32(TrustedImm32(offset), base, addressTempRegister);
611 base = addressTempRegister;
615 m_assembler.vstr(src, base, offset);
618 void addDouble(FPRegisterID src, FPRegisterID dest)
620 m_assembler.vadd_F64(dest, dest, src);
623 void addDouble(Address src, FPRegisterID dest)
625 loadDouble(src, fpTempRegister);
626 addDouble(fpTempRegister, dest);
629 void divDouble(FPRegisterID src, FPRegisterID dest)
631 m_assembler.vdiv_F64(dest, dest, src);
634 void subDouble(FPRegisterID src, FPRegisterID dest)
636 m_assembler.vsub_F64(dest, dest, src);
639 void subDouble(Address src, FPRegisterID dest)
641 loadDouble(src, fpTempRegister);
642 subDouble(fpTempRegister, dest);
645 void mulDouble(FPRegisterID src, FPRegisterID dest)
647 m_assembler.vmul_F64(dest, dest, src);
650 void mulDouble(Address src, FPRegisterID dest)
652 loadDouble(src, fpTempRegister);
653 mulDouble(fpTempRegister, dest);
656 void sqrtDouble(FPRegisterID, FPRegisterID)
658 ASSERT_NOT_REACHED();
661 void andnotDouble(FPRegisterID, FPRegisterID)
663 ASSERT_NOT_REACHED();
666 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
668 m_assembler.vmov(fpTempRegisterAsSingle(), src);
669 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
672 void convertInt32ToDouble(Address address, FPRegisterID dest)
674 // Fixme: load directly into the fpr!
675 load32(address, dataTempRegister);
676 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
677 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
680 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
682 // Fixme: load directly into the fpr!
683 load32(address.m_ptr, dataTempRegister);
684 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
685 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
688 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
690 m_assembler.vcmp_F64(left, right);
693 if (cond == DoubleNotEqual) {
694 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
695 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
696 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
697 unordered.link(this);
700 if (cond == DoubleEqualOrUnordered) {
701 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
702 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
703 unordered.link(this);
704 // We get here if either unordered or equal.
705 Jump result = jump();
709 return makeBranch(cond);
712 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
714 ASSERT_NOT_REACHED();
718 // Convert 'src' to an integer, and places the resulting 'dest'.
719 // If the result is not representable as a 32 bit value, branch.
720 // May also branch for some values that are representable in 32 bits
721 // (specifically, in this case, 0).
722 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
724 m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src);
725 m_assembler.vmov(dest, fpTempRegisterAsSingle());
727 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
728 m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle());
729 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
731 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
732 failureCases.append(branchTest32(Zero, dest));
735 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
737 m_assembler.vcmpz_F64(reg);
739 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
740 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
741 unordered.link(this);
745 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
747 m_assembler.vcmpz_F64(reg);
749 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
750 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
751 unordered.link(this);
752 // We get here if either unordered or equal.
753 Jump result = jump();
758 // Stack manipulation operations:
760 // The ABI is assumed to provide a stack abstraction to memory,
761 // containing machine word sized units of data. Push and pop
762 // operations add and remove a single register sized unit of data
763 // to or from the stack. Peek and poke operations read or write
764 // values on the stack, without moving the current stack position.
766 void pop(RegisterID dest)
768 // store postindexed with writeback
769 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
772 void push(RegisterID src)
774 // store preindexed with writeback
775 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
778 void push(Address address)
780 load32(address, dataTempRegister);
781 push(dataTempRegister);
784 void push(TrustedImm32 imm)
786 move(imm, dataTempRegister);
787 push(dataTempRegister);
790 // Register move operations:
792 // Move values in registers.
794 void move(TrustedImm32 imm, RegisterID dest)
796 uint32_t value = imm.m_value;
799 moveFixedWidthEncoding(imm, dest);
801 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
803 if (armImm.isValid())
804 m_assembler.mov(dest, armImm);
805 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
806 m_assembler.mvn(dest, armImm);
808 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
809 if (value & 0xffff0000)
810 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
815 void move(RegisterID src, RegisterID dest)
817 m_assembler.mov(dest, src);
820 void move(TrustedImmPtr imm, RegisterID dest)
822 move(TrustedImm32(imm), dest);
825 void swap(RegisterID reg1, RegisterID reg2)
827 move(reg1, dataTempRegister);
829 move(dataTempRegister, reg2);
832 void signExtend32ToPtr(RegisterID src, RegisterID dest)
838 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
849 // Forwards / external control flow operations:
851 // This set of jump and conditional branch operations return a Jump
852 // object which may linked at a later point, allow forwards jump,
853 // or jumps that will require external linkage (after the code has been
856 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
857 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
858 // used (representing the names 'below' and 'above').
860 // Operands to the comparision are provided in the expected order, e.g.
861 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
862 // treated as a signed 32bit value, is less than or equal to 5.
864 // jz and jnz test whether the first operand is equal to zero, and take
865 // an optional second operand of a mask under which to perform the test.
868 // Should we be using TEQ for equal/not-equal?
869 void compare32(RegisterID left, TrustedImm32 right)
871 int32_t imm = right.m_value;
873 m_assembler.tst(left, left);
875 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
876 if (armImm.isValid())
877 m_assembler.cmp(left, armImm);
878 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
879 m_assembler.cmn(left, armImm);
881 move(TrustedImm32(imm), dataTempRegister);
882 m_assembler.cmp(left, dataTempRegister);
887 void test32(RegisterID reg, TrustedImm32 mask)
889 int32_t imm = mask.m_value;
892 m_assembler.tst(reg, reg);
894 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
895 if (armImm.isValid())
896 m_assembler.tst(reg, armImm);
898 move(mask, dataTempRegister);
899 m_assembler.tst(reg, dataTempRegister);
905 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
907 m_assembler.cmp(left, right);
908 return Jump(makeBranch(cond));
911 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
913 compare32(left, right);
914 return Jump(makeBranch(cond));
917 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
919 load32(right, dataTempRegister);
920 return branch32(cond, left, dataTempRegister);
923 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
925 load32(left, dataTempRegister);
926 return branch32(cond, dataTempRegister, right);
929 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
931 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
932 load32(left, addressTempRegister);
933 return branch32(cond, addressTempRegister, right);
936 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
938 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
939 load32(left, addressTempRegister);
940 return branch32(cond, addressTempRegister, right);
943 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
945 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
946 load32WithUnalignedHalfWords(left, addressTempRegister);
947 return branch32(cond, addressTempRegister, right);
950 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
952 load32(left.m_ptr, dataTempRegister);
953 return branch32(cond, dataTempRegister, right);
956 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
958 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
959 load32(left.m_ptr, addressTempRegister);
960 return branch32(cond, addressTempRegister, right);
963 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
965 load16(left, dataTempRegister);
966 m_assembler.lsl(addressTempRegister, right, 16);
967 m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
968 return branch32(cond, dataTempRegister, addressTempRegister);
971 Jump branch16(RelationalCondition cond, RegisterID left, TrustedImm32 right)
973 ASSERT(!(0xffff0000 & right.m_value));
974 // Extract the lower 16 bits into a temp for comparison
975 m_assembler.ubfx(dataTempRegister, left, 0, 16);
976 return branch32(cond, dataTempRegister, right);
979 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
981 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
982 load16(left, addressTempRegister);
983 m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
984 return branch32(cond, addressTempRegister, TrustedImm32(right.m_value << 16));
987 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
989 compare32(left, right);
990 return Jump(makeBranch(cond));
993 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
995 ASSERT(!(0xffffff00 & right.m_value));
996 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
997 load8(left, addressTempRegister);
998 return branch8(cond, addressTempRegister, right);
1001 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1003 ASSERT(!(0xffffff00 & right.m_value));
1004 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1005 load8(left, addressTempRegister);
1006 return branch32(cond, addressTempRegister, right);
1009 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1011 m_assembler.tst(reg, mask);
1012 return Jump(makeBranch(cond));
1015 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1018 return Jump(makeBranch(cond));
1021 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1023 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1024 load32(address, addressTempRegister);
1025 return branchTest32(cond, addressTempRegister, mask);
1028 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1030 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1031 load32(address, addressTempRegister);
1032 return branchTest32(cond, addressTempRegister, mask);
1035 Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1038 return Jump(makeBranch(cond));
1041 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1043 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1044 load8(address, addressTempRegister);
1045 return branchTest8(cond, addressTempRegister, mask);
1048 void jump(RegisterID target)
1050 m_assembler.bx(target);
1053 // Address is a memory location containing the address to jump to
1054 void jump(Address address)
1056 load32(address, dataTempRegister);
1057 m_assembler.bx(dataTempRegister);
1061 // Arithmetic control flow operations:
1063 // This set of conditional branch operations branch based
1064 // on the result of an arithmetic operation. The operation
1065 // is performed as normal, storing the result.
1067 // * jz operations branch if the result is zero.
1068 // * jo operations branch if the (signed) arithmetic
1069 // operation caused an overflow to occur.
1071 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1073 m_assembler.add_S(dest, dest, src);
1074 return Jump(makeBranch(cond));
1077 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1079 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1080 if (armImm.isValid())
1081 m_assembler.add_S(dest, dest, armImm);
1083 move(imm, dataTempRegister);
1084 m_assembler.add_S(dest, dest, dataTempRegister);
1086 return Jump(makeBranch(cond));
1089 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1091 m_assembler.smull(dest, dataTempRegister, src1, src2);
1093 if (cond == Overflow) {
1094 m_assembler.asr(addressTempRegister, dest, 31);
1095 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1098 return branchTest32(cond, dest);
1101 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1103 return branchMul32(cond, src, dest, dest);
1106 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1108 move(imm, dataTempRegister);
1109 return branchMul32(cond, dataTempRegister, src, dest);
1112 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1114 m_assembler.orr_S(dest, dest, src);
1115 return Jump(makeBranch(cond));
1118 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1120 m_assembler.sub_S(dest, dest, src);
1121 return Jump(makeBranch(cond));
1124 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1126 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1127 if (armImm.isValid())
1128 m_assembler.sub_S(dest, dest, armImm);
1130 move(imm, dataTempRegister);
1131 m_assembler.sub_S(dest, dest, dataTempRegister);
1133 return Jump(makeBranch(cond));
1136 void relativeTableJump(RegisterID index, int scale)
1138 ASSERT(scale >= 0 && scale <= 31);
1140 // dataTempRegister will point after the jump if index register contains zero
1141 move(ARMRegisters::pc, dataTempRegister);
1142 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1144 ShiftTypeAndAmount shift(SRType_LSL, scale);
1145 m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1146 jump(dataTempRegister);
1149 // Miscellaneous operations:
1153 m_assembler.bkpt(0);
1156 ALWAYS_INLINE Call nearCall()
1158 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1159 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1162 ALWAYS_INLINE Call call()
1164 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1165 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1168 ALWAYS_INLINE Call call(RegisterID target)
1170 return Call(m_assembler.blx(target), Call::None);
1173 ALWAYS_INLINE Call call(Address address)
1175 load32(address, dataTempRegister);
1176 return Call(m_assembler.blx(dataTempRegister), Call::None);
1179 ALWAYS_INLINE void ret()
1181 m_assembler.bx(linkRegister);
1184 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1186 m_assembler.cmp(left, right);
1187 m_assembler.it(armV7Condition(cond), false);
1188 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1189 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1192 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1194 load32(left, dataTempRegister);
1195 compare32(cond, dataTempRegister, right, dest);
1198 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1200 compare32(left, right);
1201 m_assembler.it(armV7Condition(cond), false);
1202 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1203 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1207 // The mask should be optional... paerhaps the argument order should be
1208 // dest-src, operations always have a dest? ... possibly not true, considering
1209 // asm ops like test, or pseudo ops like pop().
1210 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1212 load32(address, dataTempRegister);
1213 test32(dataTempRegister, mask);
1214 m_assembler.it(armV7Condition(cond), false);
1215 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1216 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1219 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1221 load8(address, dataTempRegister);
1222 test32(dataTempRegister, mask);
1223 m_assembler.it(armV7Condition(cond), false);
1224 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1225 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1228 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1230 moveFixedWidthEncoding(imm, dst);
1231 return DataLabel32(this);
1234 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1236 moveFixedWidthEncoding(TrustedImm32(imm), dst);
1237 return DataLabelPtr(this);
1240 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1242 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1243 return branch32(cond, left, dataTempRegister);
1246 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1248 load32(left, addressTempRegister);
1249 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1250 return branch32(cond, addressTempRegister, dataTempRegister);
1253 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1255 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1256 store32(dataTempRegister, address);
1259 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1262 ALWAYS_INLINE Call tailRecursiveCall()
1264 // Like a normal call, but don't link.
1265 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1266 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1269 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1272 return tailRecursiveCall();
1276 int executableOffsetFor(int location)
1278 return m_assembler.executableOffsetFor(location);
1282 bool inUninterruptedSequence()
1284 return m_inUninterruptedSequence;
1287 ALWAYS_INLINE Jump jump()
1289 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1290 return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1293 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
1295 m_assembler.it(cond, true, true);
1296 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1297 return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1299 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
1300 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
1301 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1303 ArmAddress setupArmAddress(BaseIndex address)
1305 if (address.offset) {
1306 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1308 m_assembler.add(addressTempRegister, address.base, imm);
1310 move(TrustedImm32(address.offset), addressTempRegister);
1311 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1314 return ArmAddress(addressTempRegister, address.index, address.scale);
1316 return ArmAddress(address.base, address.index, address.scale);
1319 ArmAddress setupArmAddress(Address address)
1321 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1322 return ArmAddress(address.base, address.offset);
1324 move(TrustedImm32(address.offset), addressTempRegister);
1325 return ArmAddress(address.base, addressTempRegister);
1328 ArmAddress setupArmAddress(ImplicitAddress address)
1330 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1331 return ArmAddress(address.base, address.offset);
1333 move(TrustedImm32(address.offset), addressTempRegister);
1334 return ArmAddress(address.base, addressTempRegister);
1337 RegisterID makeBaseIndexBase(BaseIndex address)
1339 if (!address.offset)
1340 return address.base;
1342 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1344 m_assembler.add(addressTempRegister, address.base, imm);
1346 move(TrustedImm32(address.offset), addressTempRegister);
1347 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1350 return addressTempRegister;
1353 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
1355 uint32_t value = imm.m_value;
1356 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1357 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1360 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
1362 return static_cast<ARMv7Assembler::Condition>(cond);
1365 ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
1367 return static_cast<ARMv7Assembler::Condition>(cond);
1370 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1372 return static_cast<ARMv7Assembler::Condition>(cond);
1376 friend class LinkBuffer;
1377 friend class RepatchBuffer;
1379 static void linkCall(void* code, Call call, FunctionPtr function)
1381 ARMv7Assembler::linkCall(code, call.m_label, function.value());
1384 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1386 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1389 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1391 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1394 bool m_inUninterruptedSequence;
1399 #endif // ENABLE(ASSEMBLER)
1401 #endif // MacroAssemblerARMv7_h