2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "ResultType.h"
39 #include "SamplingTool.h"
49 void JIT::emit_op_jless(Instruction* currentInstruction)
51 unsigned op1 = currentInstruction[1].u.operand;
52 unsigned op2 = currentInstruction[2].u.operand;
53 unsigned target = currentInstruction[3].u.operand;
55 emit_compareAndJump(op_jless, op1, op2, target, LessThan);
58 void JIT::emit_op_jlesseq(Instruction* currentInstruction)
60 unsigned op1 = currentInstruction[1].u.operand;
61 unsigned op2 = currentInstruction[2].u.operand;
62 unsigned target = currentInstruction[3].u.operand;
64 emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
67 void JIT::emit_op_jgreater(Instruction* currentInstruction)
69 unsigned op1 = currentInstruction[1].u.operand;
70 unsigned op2 = currentInstruction[2].u.operand;
71 unsigned target = currentInstruction[3].u.operand;
73 emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
76 void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
78 unsigned op1 = currentInstruction[1].u.operand;
79 unsigned op2 = currentInstruction[2].u.operand;
80 unsigned target = currentInstruction[3].u.operand;
82 emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
85 void JIT::emit_op_jnless(Instruction* currentInstruction)
87 unsigned op1 = currentInstruction[1].u.operand;
88 unsigned op2 = currentInstruction[2].u.operand;
89 unsigned target = currentInstruction[3].u.operand;
91 emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
94 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
96 unsigned op1 = currentInstruction[1].u.operand;
97 unsigned op2 = currentInstruction[2].u.operand;
98 unsigned target = currentInstruction[3].u.operand;
100 emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
103 void JIT::emit_op_jngreater(Instruction* currentInstruction)
105 unsigned op1 = currentInstruction[1].u.operand;
106 unsigned op2 = currentInstruction[2].u.operand;
107 unsigned target = currentInstruction[3].u.operand;
109 emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
112 void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
114 unsigned op1 = currentInstruction[1].u.operand;
115 unsigned op2 = currentInstruction[2].u.operand;
116 unsigned target = currentInstruction[3].u.operand;
118 emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
121 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
123 unsigned op1 = currentInstruction[1].u.operand;
124 unsigned op2 = currentInstruction[2].u.operand;
125 unsigned target = currentInstruction[3].u.operand;
127 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, cti_op_jless, false, iter);
130 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
132 unsigned op1 = currentInstruction[1].u.operand;
133 unsigned op2 = currentInstruction[2].u.operand;
134 unsigned target = currentInstruction[3].u.operand;
136 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, cti_op_jlesseq, false, iter);
139 void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
141 unsigned op1 = currentInstruction[1].u.operand;
142 unsigned op2 = currentInstruction[2].u.operand;
143 unsigned target = currentInstruction[3].u.operand;
145 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, cti_op_jgreater, false, iter);
148 void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
150 unsigned op1 = currentInstruction[1].u.operand;
151 unsigned op2 = currentInstruction[2].u.operand;
152 unsigned target = currentInstruction[3].u.operand;
154 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, cti_op_jgreatereq, false, iter);
157 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
159 unsigned op1 = currentInstruction[1].u.operand;
160 unsigned op2 = currentInstruction[2].u.operand;
161 unsigned target = currentInstruction[3].u.operand;
163 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, cti_op_jless, true, iter);
166 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
168 unsigned op1 = currentInstruction[1].u.operand;
169 unsigned op2 = currentInstruction[2].u.operand;
170 unsigned target = currentInstruction[3].u.operand;
172 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, cti_op_jlesseq, true, iter);
175 void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
177 unsigned op1 = currentInstruction[1].u.operand;
178 unsigned op2 = currentInstruction[2].u.operand;
179 unsigned target = currentInstruction[3].u.operand;
181 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, cti_op_jgreater, true, iter);
184 void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
186 unsigned op1 = currentInstruction[1].u.operand;
187 unsigned op2 = currentInstruction[2].u.operand;
188 unsigned target = currentInstruction[3].u.operand;
190 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, cti_op_jgreatereq, true, iter);
195 void JIT::emit_op_lshift(Instruction* currentInstruction)
197 unsigned result = currentInstruction[1].u.operand;
198 unsigned op1 = currentInstruction[2].u.operand;
199 unsigned op2 = currentInstruction[3].u.operand;
201 emitGetVirtualRegisters(op1, regT0, op2, regT2);
202 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
203 emitJumpSlowCaseIfNotImmediateInteger(regT0);
204 emitJumpSlowCaseIfNotImmediateInteger(regT2);
205 emitFastArithImmToInt(regT0);
206 emitFastArithImmToInt(regT2);
207 lshift32(regT2, regT0);
208 emitFastArithReTagImmediate(regT0, regT0);
209 emitPutVirtualRegister(result);
212 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
214 unsigned result = currentInstruction[1].u.operand;
215 unsigned op1 = currentInstruction[2].u.operand;
216 unsigned op2 = currentInstruction[3].u.operand;
222 JITStubCall stubCall(this, cti_op_lshift);
223 stubCall.addArgument(regT0);
224 stubCall.addArgument(regT2);
225 stubCall.call(result);
228 void JIT::emit_op_rshift(Instruction* currentInstruction)
230 unsigned result = currentInstruction[1].u.operand;
231 unsigned op1 = currentInstruction[2].u.operand;
232 unsigned op2 = currentInstruction[3].u.operand;
234 if (isOperandConstantImmediateInt(op2)) {
235 // isOperandConstantImmediateInt(op2) => 1 SlowCase
236 emitGetVirtualRegister(op1, regT0);
237 emitJumpSlowCaseIfNotImmediateInteger(regT0);
238 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
239 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
241 emitGetVirtualRegisters(op1, regT0, op2, regT2);
242 if (supportsFloatingPointTruncate()) {
243 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
244 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
245 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
246 addPtr(tagTypeNumberRegister, regT0);
247 movePtrToDouble(regT0, fpRegT0);
248 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
250 emitJumpSlowCaseIfNotImmediateInteger(regT2);
252 // !supportsFloatingPoint() => 2 SlowCases
253 emitJumpSlowCaseIfNotImmediateInteger(regT0);
254 emitJumpSlowCaseIfNotImmediateInteger(regT2);
256 emitFastArithImmToInt(regT2);
257 rshift32(regT2, regT0);
259 emitFastArithIntToImmNoCheck(regT0, regT0);
260 emitPutVirtualRegister(result);
263 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
265 unsigned result = currentInstruction[1].u.operand;
266 unsigned op1 = currentInstruction[2].u.operand;
267 unsigned op2 = currentInstruction[3].u.operand;
269 JITStubCall stubCall(this, cti_op_rshift);
271 if (isOperandConstantImmediateInt(op2)) {
273 stubCall.addArgument(regT0);
274 stubCall.addArgument(op2, regT2);
276 if (supportsFloatingPointTruncate()) {
280 // We're reloading op1 to regT0 as we can no longer guarantee that
281 // we have not munged the operand. It may have already been shifted
282 // correctly, but it still will not have been tagged.
283 stubCall.addArgument(op1, regT0);
284 stubCall.addArgument(regT2);
288 stubCall.addArgument(regT0);
289 stubCall.addArgument(regT2);
293 stubCall.call(result);
296 void JIT::emit_op_urshift(Instruction* currentInstruction)
298 unsigned dst = currentInstruction[1].u.operand;
299 unsigned op1 = currentInstruction[2].u.operand;
300 unsigned op2 = currentInstruction[3].u.operand;
302 // Slow case of urshift makes assumptions about what registers hold the
303 // shift arguments, so any changes must be updated there as well.
304 if (isOperandConstantImmediateInt(op2)) {
305 emitGetVirtualRegister(op1, regT0);
306 emitJumpSlowCaseIfNotImmediateInteger(regT0);
307 emitFastArithImmToInt(regT0);
308 int shift = getConstantOperand(op2).asInt32();
310 urshift32(Imm32(shift & 0x1f), regT0);
311 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
312 // a toUint conversion, which can result in a value we can represent
313 // as an immediate int.
314 if (shift < 0 || !(shift & 31))
315 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
316 emitFastArithReTagImmediate(regT0, regT0);
317 emitPutVirtualRegister(dst, regT0);
320 emitGetVirtualRegisters(op1, regT0, op2, regT1);
321 if (!isOperandConstantImmediateInt(op1))
322 emitJumpSlowCaseIfNotImmediateInteger(regT0);
323 emitJumpSlowCaseIfNotImmediateInteger(regT1);
324 emitFastArithImmToInt(regT0);
325 emitFastArithImmToInt(regT1);
326 urshift32(regT1, regT0);
327 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
328 emitFastArithReTagImmediate(regT0, regT0);
329 emitPutVirtualRegister(dst, regT0);
332 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
334 unsigned dst = currentInstruction[1].u.operand;
335 unsigned op1 = currentInstruction[2].u.operand;
336 unsigned op2 = currentInstruction[3].u.operand;
337 if (isOperandConstantImmediateInt(op2)) {
338 int shift = getConstantOperand(op2).asInt32();
340 linkSlowCase(iter); // int32 check
341 if (supportsFloatingPointTruncate()) {
343 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
344 addPtr(tagTypeNumberRegister, regT0);
345 movePtrToDouble(regT0, fpRegT0);
346 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
348 urshift32(Imm32(shift & 0x1f), regT0);
349 if (shift < 0 || !(shift & 31))
350 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
351 emitFastArithReTagImmediate(regT0, regT0);
352 emitPutVirtualRegister(dst, regT0);
353 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
356 if (shift < 0 || !(shift & 31))
357 linkSlowCase(iter); // failed to box in hot path
361 if (!isOperandConstantImmediateInt(op1)) {
362 linkSlowCase(iter); // int32 check -- op1 is not an int
363 if (supportsFloatingPointTruncate()) {
365 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
366 addPtr(tagTypeNumberRegister, regT0);
367 movePtrToDouble(regT0, fpRegT0);
368 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
369 failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
370 emitFastArithImmToInt(regT1);
371 urshift32(regT1, regT0);
372 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
373 emitFastArithReTagImmediate(regT0, regT0);
374 emitPutVirtualRegister(dst, regT0);
375 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
380 linkSlowCase(iter); // int32 check - op2 is not an int
381 linkSlowCase(iter); // Can't represent unsigned result as an immediate
384 JITStubCall stubCall(this, cti_op_urshift);
385 stubCall.addArgument(op1, regT0);
386 stubCall.addArgument(op2, regT1);
390 void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
392 // We generate inline code for the following cases in the fast path:
393 // - int immediate to constant int immediate
394 // - constant int immediate to int immediate
395 // - int immediate to int immediate
397 if (isOperandConstantImmediateChar(op1)) {
398 emitGetVirtualRegister(op2, regT0);
399 addSlowCase(emitJumpIfNotJSCell(regT0));
401 emitLoadCharacterString(regT0, regT0, failures);
402 addSlowCase(failures);
403 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
406 if (isOperandConstantImmediateChar(op2)) {
407 emitGetVirtualRegister(op1, regT0);
408 addSlowCase(emitJumpIfNotJSCell(regT0));
410 emitLoadCharacterString(regT0, regT0, failures);
411 addSlowCase(failures);
412 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
415 if (isOperandConstantImmediateInt(op2)) {
416 emitGetVirtualRegister(op1, regT0);
417 emitJumpSlowCaseIfNotImmediateInteger(regT0);
418 int32_t op2imm = getConstantOperandImmediateInt(op2);
419 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
420 } else if (isOperandConstantImmediateInt(op1)) {
421 emitGetVirtualRegister(op2, regT1);
422 emitJumpSlowCaseIfNotImmediateInteger(regT1);
423 int32_t op1imm = getConstantOperandImmediateInt(op1);
424 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
426 emitGetVirtualRegisters(op1, regT0, op2, regT1);
427 emitJumpSlowCaseIfNotImmediateInteger(regT0);
428 emitJumpSlowCaseIfNotImmediateInteger(regT1);
430 addJump(branch32(condition, regT0, regT1), target);
434 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition condition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
436 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
437 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
438 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless);
439 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless);
440 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless);
441 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless);
442 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless);
444 // We generate inline code for the following cases in the slow path:
445 // - floating-point number to constant int immediate
446 // - constant int immediate to floating-point number
447 // - floating-point number to floating-point number.
448 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
453 JITStubCall stubCall(this, stub);
454 stubCall.addArgument(op1, regT0);
455 stubCall.addArgument(op2, regT1);
457 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
461 if (isOperandConstantImmediateInt(op2)) {
464 if (supportsFloatingPoint()) {
465 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
466 addPtr(tagTypeNumberRegister, regT0);
467 movePtrToDouble(regT0, fpRegT0);
469 int32_t op2imm = getConstantOperand(op2).asInt32();
471 move(Imm32(op2imm), regT1);
472 convertInt32ToDouble(regT1, fpRegT1);
474 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
476 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
481 JITStubCall stubCall(this, stub);
482 stubCall.addArgument(regT0);
483 stubCall.addArgument(op2, regT2);
485 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
487 } else if (isOperandConstantImmediateInt(op1)) {
490 if (supportsFloatingPoint()) {
491 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
492 addPtr(tagTypeNumberRegister, regT1);
493 movePtrToDouble(regT1, fpRegT1);
495 int32_t op1imm = getConstantOperand(op1).asInt32();
497 move(Imm32(op1imm), regT0);
498 convertInt32ToDouble(regT0, fpRegT0);
500 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
502 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
507 JITStubCall stubCall(this, stub);
508 stubCall.addArgument(op1, regT2);
509 stubCall.addArgument(regT1);
511 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
515 if (supportsFloatingPoint()) {
516 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
517 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
518 Jump fail3 = emitJumpIfImmediateInteger(regT1);
519 addPtr(tagTypeNumberRegister, regT0);
520 addPtr(tagTypeNumberRegister, regT1);
521 movePtrToDouble(regT0, fpRegT0);
522 movePtrToDouble(regT1, fpRegT1);
524 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
526 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
534 JITStubCall stubCall(this, stub);
535 stubCall.addArgument(regT0);
536 stubCall.addArgument(regT1);
538 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
542 void JIT::emit_op_bitand(Instruction* currentInstruction)
544 unsigned result = currentInstruction[1].u.operand;
545 unsigned op1 = currentInstruction[2].u.operand;
546 unsigned op2 = currentInstruction[3].u.operand;
548 if (isOperandConstantImmediateInt(op1)) {
549 emitGetVirtualRegister(op2, regT0);
550 emitJumpSlowCaseIfNotImmediateInteger(regT0);
551 int32_t imm = getConstantOperandImmediateInt(op1);
552 andPtr(Imm32(imm), regT0);
554 emitFastArithIntToImmNoCheck(regT0, regT0);
555 } else if (isOperandConstantImmediateInt(op2)) {
556 emitGetVirtualRegister(op1, regT0);
557 emitJumpSlowCaseIfNotImmediateInteger(regT0);
558 int32_t imm = getConstantOperandImmediateInt(op2);
559 andPtr(Imm32(imm), regT0);
561 emitFastArithIntToImmNoCheck(regT0, regT0);
563 emitGetVirtualRegisters(op1, regT0, op2, regT1);
564 andPtr(regT1, regT0);
565 emitJumpSlowCaseIfNotImmediateInteger(regT0);
567 emitPutVirtualRegister(result);
570 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
572 unsigned result = currentInstruction[1].u.operand;
573 unsigned op1 = currentInstruction[2].u.operand;
574 unsigned op2 = currentInstruction[3].u.operand;
577 if (isOperandConstantImmediateInt(op1)) {
578 JITStubCall stubCall(this, cti_op_bitand);
579 stubCall.addArgument(op1, regT2);
580 stubCall.addArgument(regT0);
581 stubCall.call(result);
582 } else if (isOperandConstantImmediateInt(op2)) {
583 JITStubCall stubCall(this, cti_op_bitand);
584 stubCall.addArgument(regT0);
585 stubCall.addArgument(op2, regT2);
586 stubCall.call(result);
588 JITStubCall stubCall(this, cti_op_bitand);
589 stubCall.addArgument(op1, regT2);
590 stubCall.addArgument(regT1);
591 stubCall.call(result);
595 void JIT::emit_op_post_inc(Instruction* currentInstruction)
597 unsigned result = currentInstruction[1].u.operand;
598 unsigned srcDst = currentInstruction[2].u.operand;
600 emitGetVirtualRegister(srcDst, regT0);
602 emitJumpSlowCaseIfNotImmediateInteger(regT0);
603 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
604 emitFastArithIntToImmNoCheck(regT1, regT1);
605 emitPutVirtualRegister(srcDst, regT1);
606 emitPutVirtualRegister(result);
609 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
611 unsigned result = currentInstruction[1].u.operand;
612 unsigned srcDst = currentInstruction[2].u.operand;
616 JITStubCall stubCall(this, cti_op_post_inc);
617 stubCall.addArgument(regT0);
618 stubCall.addArgument(Imm32(srcDst));
619 stubCall.call(result);
622 void JIT::emit_op_post_dec(Instruction* currentInstruction)
624 unsigned result = currentInstruction[1].u.operand;
625 unsigned srcDst = currentInstruction[2].u.operand;
627 emitGetVirtualRegister(srcDst, regT0);
629 emitJumpSlowCaseIfNotImmediateInteger(regT0);
630 addSlowCase(branchSub32(Zero, TrustedImm32(1), regT1));
631 emitFastArithIntToImmNoCheck(regT1, regT1);
632 emitPutVirtualRegister(srcDst, regT1);
633 emitPutVirtualRegister(result);
636 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
638 unsigned result = currentInstruction[1].u.operand;
639 unsigned srcDst = currentInstruction[2].u.operand;
643 JITStubCall stubCall(this, cti_op_post_dec);
644 stubCall.addArgument(regT0);
645 stubCall.addArgument(Imm32(srcDst));
646 stubCall.call(result);
649 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
651 unsigned srcDst = currentInstruction[1].u.operand;
653 emitGetVirtualRegister(srcDst, regT0);
654 emitJumpSlowCaseIfNotImmediateInteger(regT0);
655 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
656 emitFastArithIntToImmNoCheck(regT0, regT0);
657 emitPutVirtualRegister(srcDst);
660 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
662 unsigned srcDst = currentInstruction[1].u.operand;
664 Jump notImm = getSlowCase(iter);
666 emitGetVirtualRegister(srcDst, regT0);
668 JITStubCall stubCall(this, cti_op_pre_inc);
669 stubCall.addArgument(regT0);
670 stubCall.call(srcDst);
673 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
675 unsigned srcDst = currentInstruction[1].u.operand;
677 emitGetVirtualRegister(srcDst, regT0);
678 emitJumpSlowCaseIfNotImmediateInteger(regT0);
679 addSlowCase(branchSub32(Zero, TrustedImm32(1), regT0));
680 emitFastArithIntToImmNoCheck(regT0, regT0);
681 emitPutVirtualRegister(srcDst);
684 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
686 unsigned srcDst = currentInstruction[1].u.operand;
688 Jump notImm = getSlowCase(iter);
690 emitGetVirtualRegister(srcDst, regT0);
692 JITStubCall stubCall(this, cti_op_pre_dec);
693 stubCall.addArgument(regT0);
694 stubCall.call(srcDst);
697 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
699 #if CPU(X86) || CPU(X86_64) || CPU(MIPS)
701 void JIT::emit_op_mod(Instruction* currentInstruction)
703 unsigned result = currentInstruction[1].u.operand;
704 unsigned op1 = currentInstruction[2].u.operand;
705 unsigned op2 = currentInstruction[3].u.operand;
707 #if CPU(X86) || CPU(X86_64)
708 // Make sure registers are correct for x86 IDIV instructions.
709 ASSERT(regT0 == X86Registers::eax);
710 ASSERT(regT1 == X86Registers::edx);
711 ASSERT(regT2 == X86Registers::ecx);
714 emitGetVirtualRegisters(op1, regT0, op2, regT2);
715 emitJumpSlowCaseIfNotImmediateInteger(regT0);
716 emitJumpSlowCaseIfNotImmediateInteger(regT2);
718 addSlowCase(branchPtr(Equal, regT2, TrustedImmPtr(JSValue::encode(jsNumber(0)))));
720 m_assembler.idivl_r(regT2);
721 emitFastArithReTagImmediate(regT1, regT0);
722 emitPutVirtualRegister(result);
725 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
727 unsigned result = currentInstruction[1].u.operand;
732 JITStubCall stubCall(this, cti_op_mod);
733 stubCall.addArgument(regT0);
734 stubCall.addArgument(regT2);
735 stubCall.call(result);
738 #else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
740 void JIT::emit_op_mod(Instruction* currentInstruction)
742 unsigned result = currentInstruction[1].u.operand;
743 unsigned op1 = currentInstruction[2].u.operand;
744 unsigned op2 = currentInstruction[3].u.operand;
746 JITStubCall stubCall(this, cti_op_mod);
747 stubCall.addArgument(op1, regT2);
748 stubCall.addArgument(op2, regT2);
749 stubCall.call(result);
752 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
754 #if ENABLE(JIT_USE_SOFT_MODULO)
755 unsigned result = currentInstruction[1].u.operand;
756 unsigned op1 = currentInstruction[2].u.operand;
757 unsigned op2 = currentInstruction[3].u.operand;
761 JITStubCall stubCall(this, cti_op_mod);
762 stubCall.addArgument(op1, regT2);
763 stubCall.addArgument(op2, regT2);
764 stubCall.call(result);
766 ASSERT_NOT_REACHED();
770 #endif // CPU(X86) || CPU(X86_64)
772 /* ------------------------------ END: OP_MOD ------------------------------ */
774 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
776 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
778 emitGetVirtualRegisters(op1, regT0, op2, regT1);
779 emitJumpSlowCaseIfNotImmediateInteger(regT0);
780 emitJumpSlowCaseIfNotImmediateInteger(regT1);
781 if (opcodeID == op_add)
782 addSlowCase(branchAdd32(Overflow, regT1, regT0));
783 else if (opcodeID == op_sub)
784 addSlowCase(branchSub32(Overflow, regT1, regT0));
786 ASSERT(opcodeID == op_mul);
787 addSlowCase(branchMul32(Overflow, regT1, regT0));
788 addSlowCase(branchTest32(Zero, regT0));
790 emitFastArithIntToImmNoCheck(regT0, regT0);
793 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
795 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
796 COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
800 if (op1HasImmediateIntFastCase) {
801 notImm2 = getSlowCase(iter);
802 } else if (op2HasImmediateIntFastCase) {
803 notImm1 = getSlowCase(iter);
805 notImm1 = getSlowCase(iter);
806 notImm2 = getSlowCase(iter);
809 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
810 if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
812 emitGetVirtualRegister(op1, regT0);
814 Label stubFunctionCall(this);
815 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
816 if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
817 emitGetVirtualRegister(op1, regT0);
818 emitGetVirtualRegister(op2, regT1);
820 stubCall.addArgument(regT0);
821 stubCall.addArgument(regT1);
822 stubCall.call(result);
825 if (op1HasImmediateIntFastCase) {
827 if (!types.second().definitelyIsNumber())
828 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
829 emitGetVirtualRegister(op1, regT1);
830 convertInt32ToDouble(regT1, fpRegT1);
831 addPtr(tagTypeNumberRegister, regT0);
832 movePtrToDouble(regT0, fpRegT2);
833 } else if (op2HasImmediateIntFastCase) {
835 if (!types.first().definitelyIsNumber())
836 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
837 emitGetVirtualRegister(op2, regT1);
838 convertInt32ToDouble(regT1, fpRegT1);
839 addPtr(tagTypeNumberRegister, regT0);
840 movePtrToDouble(regT0, fpRegT2);
842 // if we get here, eax is not an int32, edx not yet checked.
844 if (!types.first().definitelyIsNumber())
845 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
846 if (!types.second().definitelyIsNumber())
847 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
848 addPtr(tagTypeNumberRegister, regT0);
849 movePtrToDouble(regT0, fpRegT1);
850 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
851 convertInt32ToDouble(regT1, fpRegT2);
852 Jump op2wasInteger = jump();
854 // if we get here, eax IS an int32, edx is not.
856 if (!types.second().definitelyIsNumber())
857 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
858 convertInt32ToDouble(regT0, fpRegT1);
859 op2isDouble.link(this);
860 addPtr(tagTypeNumberRegister, regT1);
861 movePtrToDouble(regT1, fpRegT2);
862 op2wasInteger.link(this);
865 if (opcodeID == op_add)
866 addDouble(fpRegT2, fpRegT1);
867 else if (opcodeID == op_sub)
868 subDouble(fpRegT2, fpRegT1);
869 else if (opcodeID == op_mul)
870 mulDouble(fpRegT2, fpRegT1);
872 ASSERT(opcodeID == op_div);
873 divDouble(fpRegT2, fpRegT1);
875 moveDoubleToPtr(fpRegT1, regT0);
876 subPtr(tagTypeNumberRegister, regT0);
877 emitPutVirtualRegister(result, regT0);
882 void JIT::emit_op_add(Instruction* currentInstruction)
884 unsigned result = currentInstruction[1].u.operand;
885 unsigned op1 = currentInstruction[2].u.operand;
886 unsigned op2 = currentInstruction[3].u.operand;
887 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
889 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
890 JITStubCall stubCall(this, cti_op_add);
891 stubCall.addArgument(op1, regT2);
892 stubCall.addArgument(op2, regT2);
893 stubCall.call(result);
897 if (isOperandConstantImmediateInt(op1)) {
898 emitGetVirtualRegister(op2, regT0);
899 emitJumpSlowCaseIfNotImmediateInteger(regT0);
900 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
901 emitFastArithIntToImmNoCheck(regT0, regT0);
902 } else if (isOperandConstantImmediateInt(op2)) {
903 emitGetVirtualRegister(op1, regT0);
904 emitJumpSlowCaseIfNotImmediateInteger(regT0);
905 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
906 emitFastArithIntToImmNoCheck(regT0, regT0);
908 compileBinaryArithOp(op_add, result, op1, op2, types);
910 emitPutVirtualRegister(result);
913 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
915 unsigned result = currentInstruction[1].u.operand;
916 unsigned op1 = currentInstruction[2].u.operand;
917 unsigned op2 = currentInstruction[3].u.operand;
918 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
920 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
923 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
924 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
925 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
928 void JIT::emit_op_mul(Instruction* currentInstruction)
930 unsigned result = currentInstruction[1].u.operand;
931 unsigned op1 = currentInstruction[2].u.operand;
932 unsigned op2 = currentInstruction[3].u.operand;
933 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
935 // For now, only plant a fast int case if the constant operand is greater than zero.
937 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
938 emitGetVirtualRegister(op2, regT0);
939 emitJumpSlowCaseIfNotImmediateInteger(regT0);
940 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
941 emitFastArithReTagImmediate(regT0, regT0);
942 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
943 emitGetVirtualRegister(op1, regT0);
944 emitJumpSlowCaseIfNotImmediateInteger(regT0);
945 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
946 emitFastArithReTagImmediate(regT0, regT0);
948 compileBinaryArithOp(op_mul, result, op1, op2, types);
950 emitPutVirtualRegister(result);
953 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
955 unsigned result = currentInstruction[1].u.operand;
956 unsigned op1 = currentInstruction[2].u.operand;
957 unsigned op2 = currentInstruction[3].u.operand;
958 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
960 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
961 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
962 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
965 void JIT::emit_op_div(Instruction* currentInstruction)
967 unsigned dst = currentInstruction[1].u.operand;
968 unsigned op1 = currentInstruction[2].u.operand;
969 unsigned op2 = currentInstruction[3].u.operand;
970 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
972 if (isOperandConstantImmediateDouble(op1)) {
973 emitGetVirtualRegister(op1, regT0);
974 addPtr(tagTypeNumberRegister, regT0);
975 movePtrToDouble(regT0, fpRegT0);
976 } else if (isOperandConstantImmediateInt(op1)) {
977 emitLoadInt32ToDouble(op1, fpRegT0);
979 emitGetVirtualRegister(op1, regT0);
980 if (!types.first().definitelyIsNumber())
981 emitJumpSlowCaseIfNotImmediateNumber(regT0);
982 Jump notInt = emitJumpIfNotImmediateInteger(regT0);
983 convertInt32ToDouble(regT0, fpRegT0);
984 Jump skipDoubleLoad = jump();
986 addPtr(tagTypeNumberRegister, regT0);
987 movePtrToDouble(regT0, fpRegT0);
988 skipDoubleLoad.link(this);
991 if (isOperandConstantImmediateDouble(op2)) {
992 emitGetVirtualRegister(op2, regT1);
993 addPtr(tagTypeNumberRegister, regT1);
994 movePtrToDouble(regT1, fpRegT1);
995 } else if (isOperandConstantImmediateInt(op2)) {
996 emitLoadInt32ToDouble(op2, fpRegT1);
998 emitGetVirtualRegister(op2, regT1);
999 if (!types.second().definitelyIsNumber())
1000 emitJumpSlowCaseIfNotImmediateNumber(regT1);
1001 Jump notInt = emitJumpIfNotImmediateInteger(regT1);
1002 convertInt32ToDouble(regT1, fpRegT1);
1003 Jump skipDoubleLoad = jump();
1005 addPtr(tagTypeNumberRegister, regT1);
1006 movePtrToDouble(regT1, fpRegT1);
1007 skipDoubleLoad.link(this);
1009 divDouble(fpRegT1, fpRegT0);
1012 moveDoubleToPtr(fpRegT0, regT0);
1013 subPtr(tagTypeNumberRegister, regT0);
1015 emitPutVirtualRegister(dst, regT0);
1018 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1020 unsigned result = currentInstruction[1].u.operand;
1021 unsigned op1 = currentInstruction[2].u.operand;
1022 unsigned op2 = currentInstruction[3].u.operand;
1023 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1024 if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
1030 if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
1031 if (!types.first().definitelyIsNumber())
1034 if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
1035 if (!types.second().definitelyIsNumber())
1038 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1039 JITStubCall stubCall(this, cti_op_div);
1040 stubCall.addArgument(op1, regT2);
1041 stubCall.addArgument(op2, regT2);
1042 stubCall.call(result);
1045 void JIT::emit_op_sub(Instruction* currentInstruction)
1047 unsigned result = currentInstruction[1].u.operand;
1048 unsigned op1 = currentInstruction[2].u.operand;
1049 unsigned op2 = currentInstruction[3].u.operand;
1050 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1052 compileBinaryArithOp(op_sub, result, op1, op2, types);
1053 emitPutVirtualRegister(result);
1056 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1058 unsigned result = currentInstruction[1].u.operand;
1059 unsigned op1 = currentInstruction[2].u.operand;
1060 unsigned op2 = currentInstruction[3].u.operand;
1061 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1063 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
1066 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1068 #endif // USE(JSVALUE64)
1072 #endif // ENABLE(JIT)