2 * Copyright (C) 2009 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
31 #include "ARMAssembler.h"
37 void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
39 ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
40 ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
41 ARMWord index = (*ldr & 0xfff) >> 1;
44 if (diff >= 2 || index > 0) {
45 diff = (diff + index - 2) * sizeof(ARMWord);
46 ASSERT(diff <= 0xfff);
47 *ldr = (*ldr & ~0xfff) | diff;
49 *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
54 ARMWord ARMAssembler::getOp2(ARMWord imm)
61 if ((imm & 0xff000000) == 0) {
66 imm = (imm << 24) | (imm >> 8);
70 if ((imm & 0xff000000) == 0) {
75 if ((imm & 0xf0000000) == 0) {
80 if ((imm & 0xc0000000) == 0) {
85 if ((imm & 0x00ffffff) == 0)
86 return OP2_IMM | (imm >> 24) | (rol << 8);
91 int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
93 // Step1: Search a non-immediate part
102 if ((imm & mask) == 0) {
103 imm = (imm << rol) | (imm >> (32 - rol));
104 rol = 4 + (rol >> 1);
111 imm = (imm << 8) | (imm >> 24);
115 if ((imm & mask) == 0) {
116 imm = (imm << rol) | (imm >> (32 - rol));
117 rol = (rol >> 1) - 8;
129 ASSERT((imm & 0xff) == 0);
131 if ((imm & 0xff000000) == 0) {
132 imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
133 imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
134 } else if (imm & 0xc0000000) {
135 imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
139 if ((imm & 0xff000000) == 0) {
144 if ((imm & 0xf0000000) == 0) {
149 if ((imm & 0xc0000000) == 0) {
154 if ((imm & 0x00ffffff) == 0)
155 imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
159 if ((imm & 0xf0000000) == 0) {
164 if ((imm & 0xc0000000) == 0) {
169 imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
173 if ((imm & 0xf0000000) == 0) {
178 if ((imm & 0xc0000000) == 0) {
183 if ((imm & 0x00ffffff) == 0)
184 imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
191 orr_r(reg, reg, imm2);
194 bic_r(reg, reg, imm2);
200 ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
204 // Do it by 1 instruction
206 if (tmp != INVALID_IMM)
210 if (tmp != INVALID_IMM) {
212 return tmp | OP2_INV_IMM;
217 return encodeComplexImm(imm, tmpReg);
220 void ARMAssembler::moveImm(ARMWord imm, int dest)
224 // Do it by 1 instruction
226 if (tmp != INVALID_IMM) {
232 if (tmp != INVALID_IMM) {
237 encodeComplexImm(imm, dest);
240 ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
242 #if WTF_ARM_ARCH_AT_LEAST(7)
243 ARMWord tmp = getImm16Op2(imm);
244 if (tmp != INVALID_IMM) {
248 movw_r(dest, getImm16Op2(imm & 0xffff));
249 movt_r(dest, getImm16Op2(imm >> 16));
252 // Do it by 2 instruction
253 if (genInt(dest, imm, true))
255 if (genInt(dest, ~imm, false))
263 // Memory load/store helpers
265 void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes)
267 ARMWord transferFlag = bytes ? DT_BYTE : 0;
270 dtr_u(isLoad, srcDst, base, offset | transferFlag);
271 else if (offset <= 0xfffff) {
272 add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
273 dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
275 moveImm(offset, ARMRegisters::S0);
276 dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
279 if (offset >= -0xfff)
280 dtr_d(isLoad, srcDst, base, -offset | transferFlag);
281 else if (offset >= -0xfffff) {
282 sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | (10 << 8));
283 dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag);
285 moveImm(offset, ARMRegisters::S0);
286 dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
291 void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes)
294 ARMWord transferFlag = bytes ? DT_BYTE : 0;
296 ASSERT(scale >= 0 && scale <= 3);
297 op2 = lsl(index, scale);
299 if (offset >= 0 && offset <= 0xfff) {
300 add_r(ARMRegisters::S0, base, op2);
301 dtr_u(isLoad, srcDst, ARMRegisters::S0, offset | transferFlag);
304 if (offset <= 0 && offset >= -0xfff) {
305 add_r(ARMRegisters::S0, base, op2);
306 dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag);
310 ldr_un_imm(ARMRegisters::S0, offset);
311 add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
312 dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
315 void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
317 // VFP cannot directly access memory that is not four-byte-aligned
318 if (!(offset & 0x3)) {
319 if (offset <= 0x3ff && offset >= 0) {
320 fdtr_u(isLoad, srcDst, base, offset >> 2);
323 if (offset <= 0x3ffff && offset >= 0) {
324 add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
325 fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
330 if (offset <= 0x3ff && offset >= 0) {
331 fdtr_d(isLoad, srcDst, base, offset >> 2);
334 if (offset <= 0x3ffff && offset >= 0) {
335 sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
336 fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
342 ldr_un_imm(ARMRegisters::S0, offset);
343 add_r(ARMRegisters::S0, ARMRegisters::S0, base);
344 fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
347 PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData)
349 // 64-bit alignment is required for next constant pool and JIT code as well
350 m_buffer.flushWithoutBarrier(true);
351 if (!m_buffer.isAligned(8))
354 RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData);
355 char* data = reinterpret_cast<char*>(result->start());
357 for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
358 // The last bit is set if the constant must be placed on constant pool.
359 int pos = (iter->m_offset) & (~0x1);
360 ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
361 ARMWord* addr = getLdrImmAddress(ldrAddr);
362 if (*addr != InvalidBranchTarget) {
363 if (!(iter->m_offset & 1)) {
364 int diff = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
366 if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
367 *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
371 *addr = reinterpret_cast<ARMWord>(data + *addr);
380 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)