Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(449)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6879081: Added type recording for unary minus and unary bitwise negation. Note that the (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Incorporated Florian's suggested changes Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 Register first, 315 Register first,
316 Register second, 316 Register second,
317 Register scratch1, 317 Register scratch1,
318 Register scratch2, 318 Register scratch2,
319 Register scratch3, 319 Register scratch3,
320 Label* on_success, 320 Label* on_success,
321 Label* on_not_smis); 321 Label* on_not_smis);
322 }; 322 };
323 323
324 324
325 // Get the integer part of a heap number.
326 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
327 void IntegerConvert(MacroAssembler* masm,
328 Register result,
329 Register source) {
330 // Result may be rcx. If result and source are the same register, source will
331 // be overwritten.
332 ASSERT(!result.is(rdi) && !result.is(rbx));
333 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
334 // cvttsd2si (32-bit version) directly.
335 Register double_exponent = rbx;
336 Register double_value = rdi;
337 NearLabel done, exponent_63_plus;
338 // Get double and extract exponent.
339 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
340 // Clear result preemptively, in case we need to return zero.
341 __ xorl(result, result);
342 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
343 // Double to remove sign bit, shift exponent down to least significant bits.
344 // and subtract bias to get the unshifted, unbiased exponent.
345 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
346 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
347 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
348 // Check whether the exponent is too big for a 63 bit unsigned integer.
349 __ cmpl(double_exponent, Immediate(63));
350 __ j(above_equal, &exponent_63_plus);
351 // Handle exponent range 0..62.
352 __ cvttsd2siq(result, xmm0);
353 __ jmp(&done);
354
355 __ bind(&exponent_63_plus);
356 // Exponent negative or 63+.
357 __ cmpl(double_exponent, Immediate(83));
358 // If exponent negative or above 83, number contains no significant bits in
359 // the range 0..2^31, so result is zero, and rcx already holds zero.
360 __ j(above, &done);
361
362 // Exponent in rage 63..83.
363 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
364 // the least significant exponent-52 bits.
365
366 // Negate low bits of mantissa if value is negative.
367 __ addq(double_value, double_value); // Move sign bit to carry.
368 __ sbbl(result, result); // And convert carry to -1 in result register.
369 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
370 __ addl(double_value, result);
371 // Do xor in opposite directions depending on where we want the result
372 // (depending on whether result is rcx or not).
373
374 if (result.is(rcx)) {
375 __ xorl(double_value, result);
376 // Left shift mantissa by (exponent - mantissabits - 1) to save the
377 // bits that have positional values below 2^32 (the extra -1 comes from the
378 // doubling done above to move the sign bit into the carry flag).
379 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
380 __ shll_cl(double_value);
381 __ movl(result, double_value);
382 } else {
383 // As the then-branch, but move double-value to result before shifting.
384 __ xorl(result, double_value);
385 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
386 __ shll_cl(result);
387 }
388
389 __ bind(&done);
390 }
391
392
393 Handle<Code> GetTypeRecordingUnaryOpStub(int key,
394 TRUnaryOpIC::TypeInfo type_info) {
395 TypeRecordingUnaryOpStub stub(key, type_info);
396 return stub.GetCode();
397 }
398
399
400 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
401 switch (operand_type_) {
402 case TRUnaryOpIC::UNINITIALIZED:
403 GenerateTypeTransition(masm);
404 break;
405 case TRUnaryOpIC::SMI:
406 GenerateSmiStub(masm);
407 break;
408 case TRUnaryOpIC::HEAP_NUMBER:
409 GenerateHeapNumberStub(masm);
410 break;
411 case TRUnaryOpIC::GENERIC:
412 GenerateGenericStub(masm);
413 break;
414 }
415 }
416
417
418 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
419 __ pop(rcx); // Save return address.
420 __ push(rax);
421 // Left and right arguments are now on top.
422 // Push this stub's key. Although the operation and the type info are
423 // encoded into the key, the encoding is opaque, so push them too.
424 __ Push(Smi::FromInt(MinorKey()));
425 __ Push(Smi::FromInt(op_));
426 __ Push(Smi::FromInt(operand_type_));
427
428 __ push(rcx); // Push return address.
429
430 // Patch the caller to an appropriate specialized stub and return the
431 // operation result to the caller of the stub.
432 __ TailCallExternalReference(
433 ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
434 masm->isolate()),
435 4,
436 1);
437 }
438
439
440 // TODO(svenpanne): Use virtual functions instead of switch.
441 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
442 switch (op_) {
443 case Token::SUB:
444 GenerateSmiStubSub(masm);
445 break;
446 case Token::BIT_NOT:
447 GenerateSmiStubBitNot(masm);
448 break;
449 default:
450 UNREACHABLE();
451 }
452 }
453
454
455 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
fschneider 2011/04/27 11:34:36 I'm not convinced that all the short helpers like
Sven Panne 2011/04/27 17:19:31 Perhaps they are a little bit small, but they avoi
456 NearLabel non_smi;
457 Label slow;
458 GenerateSmiCodeSub(masm, &non_smi, &slow);
459 __ bind(&non_smi);
460 __ bind(&slow);
461 GenerateTypeTransition(masm);
462 }
463
464
465 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
466 NearLabel non_smi;
467 GenerateSmiCodeBitNot(masm, &non_smi);
468 __ bind(&non_smi);
469 GenerateTypeTransition(masm);
470 }
471
472
473 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
474 NearLabel* non_smi,
475 Label* slow) {
476 NearLabel done;
477 __ JumpIfNotSmi(rax, non_smi);
478 __ SmiNeg(rax, rax, &done);
479 __ jmp(slow);
480 __ bind(&done);
481 __ ret(0);
482 }
483
484
485 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
486 NearLabel* non_smi) {
487 __ JumpIfNotSmi(rax, non_smi);
488 __ SmiNot(rax, rax);
489 __ ret(0);
490 }
491
492
493 // TODO(svenpanne): Use virtual functions instead of switch.
494 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
495 switch (op_) {
496 case Token::SUB:
497 GenerateHeapNumberStubSub(masm);
498 break;
499 case Token::BIT_NOT:
500 GenerateHeapNumberStubBitNot(masm);
501 break;
502 default:
503 UNREACHABLE();
504 }
505 }
506
507
508 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
509 NearLabel non_smi;
510 Label slow;
511 GenerateSmiCodeSub(masm, &non_smi, &slow);
512 __ bind(&non_smi);
513 GenerateHeapNumberCodeSub(masm, &slow);
514 __ bind(&slow);
515 GenerateTypeTransition(masm);
516 }
517
518
519 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
520 MacroAssembler* masm) {
521 NearLabel non_smi;
522 Label slow;
523 GenerateSmiCodeBitNot(masm, &non_smi);
524 __ bind(&non_smi);
525 GenerateHeapNumberCodeBitNot(masm, &slow);
526 __ bind(&slow);
527 GenerateTypeTransition(masm);
528 }
529
530
531 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
532 Label* slow) {
533 // Check if the operand is a heap number.
534 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
535 Heap::kHeapNumberMapRootIndex);
536 __ j(not_equal, slow);
537
538 // Operand is a float, negate its value by flipping sign bit.
539 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
540 __ Set(kScratchRegister, 0x01);
541 __ shl(kScratchRegister, Immediate(63));
542 __ xor_(rdx, kScratchRegister); // Flip sign.
543 // rdx is value to store.
544 if (mode_ == UNARY_OVERWRITE) {
545 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
546 } else {
547 Label slow_allocate_heapnumber, heapnumber_allocated;
548 __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
549 __ jmp(&heapnumber_allocated);
550
551 __ bind(&slow_allocate_heapnumber);
552 __ EnterInternalFrame();
553 __ push(rdx);
554 __ CallRuntime(Runtime::kNumberAlloc, 0);
555 __ movq(rcx, rax);
556 __ pop(rdx);
557 __ LeaveInternalFrame();
558
559 __ bind(&heapnumber_allocated);
560 // rcx: allocated 'empty' number
561 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
562 __ movq(rax, rcx);
563 }
564 __ ret(0);
565 }
566
567
568 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
569 MacroAssembler* masm,
570 Label* slow) {
571 // Check if the operand is a heap number.
572 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
573 Heap::kHeapNumberMapRootIndex);
574 __ j(not_equal, slow);
575
576 // Convert the heap number in rax to an untagged integer in rcx.
577 IntegerConvert(masm, rax, rax);
578
579 // Do the bitwise operation and smi tag the result.
580 __ notl(rax);
581 __ Integer32ToSmi(rax, rax);
582 __ ret(0);
583 }
584
585
586 // TODO(svenpanne): Use virtual functions instead of switch.
587 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
588 switch (op_) {
589 case Token::SUB:
590 GenerateGenericStubSub(masm);
591 break;
592 case Token::BIT_NOT:
593 GenerateGenericStubBitNot(masm);
594 break;
595 default:
596 UNREACHABLE();
597 }
598 }
599
600
601 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
602 NearLabel non_smi;
603 Label slow;
604 GenerateSmiCodeSub(masm, &non_smi, &slow);
605 __ bind(&non_smi);
606 GenerateHeapNumberCodeSub(masm, &slow);
607 __ bind(&slow);
608 GenerateGenericCodeFallback(masm);
609 }
610
611
612 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
613 NearLabel non_smi;
614 Label slow;
615 GenerateSmiCodeBitNot(masm, &non_smi);
616 __ bind(&non_smi);
617 GenerateHeapNumberCodeBitNot(masm, &slow);
618 __ bind(&slow);
619 GenerateGenericCodeFallback(masm);
620 }
621
622
623 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
624 MacroAssembler* masm) {
625 // Handle the slow case by jumping to the JavaScript builtin.
626 __ pop(rcx); // pop return address
627 __ push(rax);
628 __ push(rcx); // push return address
629 switch (op_) {
630 case Token::SUB:
631 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
632 break;
633 case Token::BIT_NOT:
634 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
635 break;
636 default:
637 UNREACHABLE();
638 }
639 }
640
641
642 const char* TypeRecordingUnaryOpStub::GetName() {
643 if (name_ != NULL) return name_;
644 const int kMaxNameLength = 100;
645 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
646 kMaxNameLength);
647 if (name_ == NULL) return "OOM";
648 const char* op_name = Token::Name(op_);
649 const char* overwrite_name;
650 switch (mode_) {
651 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
652 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
653 }
654
655 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
656 "TypeRecordingUnaryOpStub_%s_%s_%s",
657 op_name,
658 overwrite_name,
659 TRUnaryOpIC::GetName(operand_type_));
660 return name_;
661 }
662
663
325 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 664 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
326 TRBinaryOpIC::TypeInfo type_info, 665 TRBinaryOpIC::TypeInfo type_info,
327 TRBinaryOpIC::TypeInfo result_type_info) { 666 TRBinaryOpIC::TypeInfo result_type_info) {
328 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 667 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
329 return stub.GetCode(); 668 return stub.GetCode();
330 } 669 }
331 670
332 671
333 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 672 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
334 __ pop(rcx); // Save return address. 673 __ pop(rcx); // Save return address.
(...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after
1192 __ bind(&done); 1531 __ bind(&done);
1193 } else { 1532 } else {
1194 ASSERT(type_ == TranscendentalCache::LOG); 1533 ASSERT(type_ == TranscendentalCache::LOG);
1195 __ fldln2(); 1534 __ fldln2();
1196 __ fxch(); 1535 __ fxch();
1197 __ fyl2x(); 1536 __ fyl2x();
1198 } 1537 }
1199 } 1538 }
1200 1539
1201 1540
1202 // Get the integer part of a heap number.
1203 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
1204 void IntegerConvert(MacroAssembler* masm,
1205 Register result,
1206 Register source) {
1207 // Result may be rcx. If result and source are the same register, source will
1208 // be overwritten.
1209 ASSERT(!result.is(rdi) && !result.is(rbx));
1210 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
1211 // cvttsd2si (32-bit version) directly.
1212 Register double_exponent = rbx;
1213 Register double_value = rdi;
1214 NearLabel done, exponent_63_plus;
1215 // Get double and extract exponent.
1216 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
1217 // Clear result preemptively, in case we need to return zero.
1218 __ xorl(result, result);
1219 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
1220 // Double to remove sign bit, shift exponent down to least significant bits.
1221 // and subtract bias to get the unshifted, unbiased exponent.
1222 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
1223 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
1224 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
1225 // Check whether the exponent is too big for a 63 bit unsigned integer.
1226 __ cmpl(double_exponent, Immediate(63));
1227 __ j(above_equal, &exponent_63_plus);
1228 // Handle exponent range 0..62.
1229 __ cvttsd2siq(result, xmm0);
1230 __ jmp(&done);
1231
1232 __ bind(&exponent_63_plus);
1233 // Exponent negative or 63+.
1234 __ cmpl(double_exponent, Immediate(83));
1235 // If exponent negative or above 83, number contains no significant bits in
1236 // the range 0..2^31, so result is zero, and rcx already holds zero.
1237 __ j(above, &done);
1238
1239 // Exponent in rage 63..83.
1240 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
1241 // the least significant exponent-52 bits.
1242
1243 // Negate low bits of mantissa if value is negative.
1244 __ addq(double_value, double_value); // Move sign bit to carry.
1245 __ sbbl(result, result); // And convert carry to -1 in result register.
1246 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
1247 __ addl(double_value, result);
1248 // Do xor in opposite directions depending on where we want the result
1249 // (depending on whether result is rcx or not).
1250
1251 if (result.is(rcx)) {
1252 __ xorl(double_value, result);
1253 // Left shift mantissa by (exponent - mantissabits - 1) to save the
1254 // bits that have positional values below 2^32 (the extra -1 comes from the
1255 // doubling done above to move the sign bit into the carry flag).
1256 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1257 __ shll_cl(double_value);
1258 __ movl(result, double_value);
1259 } else {
1260 // As the then-branch, but move double-value to result before shifting.
1261 __ xorl(result, double_value);
1262 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1263 __ shll_cl(result);
1264 }
1265
1266 __ bind(&done);
1267 }
1268
1269
1270 // Input: rdx, rax are the left and right objects of a bit op. 1541 // Input: rdx, rax are the left and right objects of a bit op.
1271 // Output: rax, rcx are left and right integers for a bit op. 1542 // Output: rax, rcx are left and right integers for a bit op.
1272 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { 1543 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1273 // Check float operands. 1544 // Check float operands.
1274 Label done; 1545 Label done;
1275 Label rax_is_smi; 1546 Label rax_is_smi;
1276 Label rax_is_object; 1547 Label rax_is_object;
1277 Label rdx_is_object; 1548 Label rdx_is_object;
1278 1549
1279 __ JumpIfNotSmi(rdx, &rdx_is_object); 1550 __ JumpIfNotSmi(rdx, &rdx_is_object);
(...skipping 3264 matching lines...) Expand 10 before | Expand all | Expand 10 after
4544 // Do a tail call to the rewritten stub. 4815 // Do a tail call to the rewritten stub.
4545 __ jmp(rdi); 4816 __ jmp(rdi);
4546 } 4817 }
4547 4818
4548 4819
4549 #undef __ 4820 #undef __
4550 4821
4551 } } // namespace v8::internal 4822 } } // namespace v8::internal
4552 4823
4553 #endif // V8_TARGET_ARCH_X64 4824 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698