Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(285)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6879081: Added type recording for unary minus and unary bitwise negation. Note that the (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 Register first, 315 Register first,
316 Register second, 316 Register second,
317 Register scratch1, 317 Register scratch1,
318 Register scratch2, 318 Register scratch2,
319 Register scratch3, 319 Register scratch3,
320 Label* on_success, 320 Label* on_success,
321 Label* on_not_smis); 321 Label* on_not_smis);
322 }; 322 };
323 323
324 324
325 // Get the integer part of a heap number.
326 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
327 void IntegerConvert(MacroAssembler* masm,
328 Register result,
329 Register source) {
330 // Result may be rcx. If result and source are the same register, source will
331 // be overwritten.
332 ASSERT(!result.is(rdi) && !result.is(rbx));
333 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
334 // cvttsd2si (32-bit version) directly.
335 Register double_exponent = rbx;
336 Register double_value = rdi;
337 NearLabel done, exponent_63_plus;
338 // Get double and extract exponent.
339 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
340 // Clear result preemptively, in case we need to return zero.
341 __ xorl(result, result);
342 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
343 // Double to remove sign bit, shift exponent down to least significant bits.
344 // and subtract bias to get the unshifted, unbiased exponent.
345 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
346 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
347 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
348 // Check whether the exponent is too big for a 63 bit unsigned integer.
349 __ cmpl(double_exponent, Immediate(63));
350 __ j(above_equal, &exponent_63_plus);
351 // Handle exponent range 0..62.
352 __ cvttsd2siq(result, xmm0);
353 __ jmp(&done);
354
355 __ bind(&exponent_63_plus);
356 // Exponent negative or 63+.
357 __ cmpl(double_exponent, Immediate(83));
358 // If exponent negative or above 83, number contains no significant bits in
359 // the range 0..2^31, so result is zero, and rcx already holds zero.
360 __ j(above, &done);
361
362 // Exponent in rage 63..83.
363 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
364 // the least significant exponent-52 bits.
365
366 // Negate low bits of mantissa if value is negative.
367 __ addq(double_value, double_value); // Move sign bit to carry.
368 __ sbbl(result, result); // And convert carry to -1 in result register.
369 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
370 __ addl(double_value, result);
371 // Do xor in opposite directions depending on where we want the result
372 // (depending on whether result is rcx or not).
373
374 if (result.is(rcx)) {
375 __ xorl(double_value, result);
376 // Left shift mantissa by (exponent - mantissabits - 1) to save the
377 // bits that have positional values below 2^32 (the extra -1 comes from the
378 // doubling done above to move the sign bit into the carry flag).
379 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
380 __ shll_cl(double_value);
381 __ movl(result, double_value);
382 } else {
383 // As the then-branch, but move double-value to result before shifting.
384 __ xorl(result, double_value);
385 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
386 __ shll_cl(result);
387 }
388
389 __ bind(&done);
390 }
391
392
393 Handle<Code> GetTypeRecordingUnaryOpStub(int key,
394 TRUnaryOpIC::TypeInfo type_info) {
395 TypeRecordingUnaryOpStub stub(key, type_info);
396 return stub.GetCode();
397 }
398
399
400 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
401 switch (operand_type_) {
402 case TRUnaryOpIC::UNINITIALIZED:
403 GenerateTypeTransition(masm);
404 break;
405 case TRUnaryOpIC::SMI:
406 GenerateSmiStub(masm);
407 break;
408 case TRUnaryOpIC::HEAP_NUMBER:
409 GenerateHeapNumberStub(masm);
410 break;
411 case TRUnaryOpIC::GENERIC:
412 GenerateGenericStub(masm);
413 break;
414 }
415 }
416
417
418 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
419 __ pop(rcx); // Save return address.
420 __ push(rax);
421 // Left and right arguments are now on top.
422 // Push this stub's key. Although the operation and the type info are
423 // encoded into the key, the encoding is opaque, so push them too.
424 __ Push(Smi::FromInt(MinorKey()));
425 __ Push(Smi::FromInt(op_));
426 __ Push(Smi::FromInt(operand_type_));
427
428 __ push(rcx); // Push return address.
429
430 // Patch the caller to an appropriate specialized stub and return the
431 // operation result to the caller of the stub.
432 __ TailCallExternalReference(
433 ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
434 masm->isolate()),
435 4,
436 1);
437 }
438
439
440 // TODO(svenpanne): Use virtual functions instead of switch.
441 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
442 switch (op_) {
443 case Token::SUB:
444 GenerateSmiStubSub(masm);
445 break;
446 case Token::BIT_NOT:
447 GenerateSmiStubBitNot(masm);
448 break;
449 default:
450 UNREACHABLE();
451 }
452 }
453
454
455 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
456 NearLabel non_smi;
457 Label slow;
458 GenerateSmiCodeSub(masm, &non_smi, &slow);
459 __ bind(&non_smi);
460 __ bind(&slow);
461 GenerateTypeTransition(masm);
462 }
463
464
465 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
466 NearLabel non_smi;
467 GenerateSmiCodeBitNot(masm, &non_smi);
468 __ bind(&non_smi);
469 GenerateTypeTransition(masm);
470 }
471
472
473 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
474 NearLabel* non_smi,
475 Label* slow) {
476 NearLabel done;
477 __ JumpIfNotSmi(rax, non_smi);
478 __ SmiNeg(rax, rax, &done);
479 __ jmp(slow);
480 __ bind(&done);
481 __ ret(0);
482 }
483
484
485 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
486 NearLabel* non_smi) {
487 __ JumpIfNotSmi(rax, non_smi);
488 __ SmiNot(rax, rax);
489 __ ret(0);
490 }
491
492
493 // TODO(svenpanne): Use virtual functions instead of switch.
494 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
495 switch (op_) {
496 case Token::SUB:
497 GenerateHeapNumberStubSub(masm);
498 break;
499 case Token::BIT_NOT:
500 GenerateHeapNumberStubBitNot(masm);
501 break;
502 default:
503 UNREACHABLE();
504 }
505 }
506
507
508 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
509 NearLabel non_smi;
510 Label slow;
511 GenerateSmiCodeSub(masm, &non_smi, &slow);
512 GenerateHeapNumberCodeSub(masm, &non_smi, &slow);
513 __ bind(&slow);
514 GenerateTypeTransition(masm);
515 }
516
517
518 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
519 MacroAssembler* masm) {
520 NearLabel non_smi;
521 Label slow;
522 GenerateSmiCodeBitNot(masm, &non_smi);
523 __ bind(&non_smi);
524 GenerateHeapNumberCodeBitNot(masm, &slow);
525 __ bind(&slow);
526 GenerateTypeTransition(masm);
527 }
528
529
530 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
531 NearLabel* non_smi,
532 Label* slow) {
533 __ bind(non_smi);
534 // Check if the operand is a heap number.
535 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
536 Heap::kHeapNumberMapRootIndex);
537 __ j(not_equal, slow);
538
539 // Operand is a float, negate its value by flipping sign bit.
540 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
541 __ Set(kScratchRegister, 0x01);
542 __ shl(kScratchRegister, Immediate(63));
543 __ xor_(rdx, kScratchRegister); // Flip sign.
544 // rdx is value to store.
545 if (mode_ == UNARY_OVERWRITE) {
546 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
547 } else {
548 __ AllocateHeapNumber(rcx, rbx, slow);
549 // rcx: allocated 'empty' number
550 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
551 __ movq(rax, rcx);
552 }
553 __ ret(0);
554 }
555
556
557 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
558 MacroAssembler* masm,
559 Label* slow) {
560 // Check if the operand is a heap number.
561 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
562 Heap::kHeapNumberMapRootIndex);
563 __ j(not_equal, slow);
564
565 // Convert the heap number in rax to an untagged integer in rcx.
566 IntegerConvert(masm, rax, rax);
567
568 // Do the bitwise operation and smi tag the result.
569 __ notl(rax);
570 __ Integer32ToSmi(rax, rax);
571 __ ret(0);
572 }
573
574
575 // TODO(svenpanne): Use virtual functions instead of switch.
576 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
577 switch (op_) {
578 case Token::SUB:
579 GenerateGenericStubSub(masm);
580 break;
581 case Token::BIT_NOT:
582 GenerateGenericStubBitNot(masm);
583 break;
584 default:
585 UNREACHABLE();
586 }
587 }
588
589
590 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
591 NearLabel non_smi;
592 Label slow;
593 GenerateSmiCodeSub(masm, &non_smi, &slow);
594 GenerateHeapNumberCodeSub(masm, &non_smi, &slow);
595 __ bind(&slow);
596 GenerateGenericCodeFallback(masm);
597 }
598
599
600 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
601 NearLabel non_smi;
602 Label slow;
603 GenerateSmiCodeBitNot(masm, &non_smi);
604 __ bind(&non_smi);
605 GenerateHeapNumberCodeBitNot(masm, &slow);
606 __ bind(&slow);
607 GenerateGenericCodeFallback(masm);
608 }
609
610
611 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
612 MacroAssembler* masm) {
613 // Handle the slow case by jumping to the JavaScript builtin.
614 __ pop(rcx); // pop return address
615 __ push(rax);
616 __ push(rcx); // push return address
617 switch (op_) {
618 case Token::SUB:
619 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
620 break;
621 case Token::BIT_NOT:
622 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
623 break;
624 default:
625 UNREACHABLE();
626 }
627 }
628
629
630 const char* TypeRecordingUnaryOpStub::GetName() {
631 if (name_ != NULL) return name_;
632 const int kMaxNameLength = 100;
633 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
634 kMaxNameLength);
635 if (name_ == NULL) return "OOM";
636 const char* op_name = Token::Name(op_);
637 const char* overwrite_name;
638 switch (mode_) {
639 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
640 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
641 }
642
643 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
644 "TypeRecordingUnaryOpStub_%s_%s_%s",
645 op_name,
646 overwrite_name,
647 TRUnaryOpIC::GetName(operand_type_));
648 return name_;
649 }
650
651
325 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 652 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
326 TRBinaryOpIC::TypeInfo type_info, 653 TRBinaryOpIC::TypeInfo type_info,
327 TRBinaryOpIC::TypeInfo result_type_info) { 654 TRBinaryOpIC::TypeInfo result_type_info) {
328 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 655 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
329 return stub.GetCode(); 656 return stub.GetCode();
330 } 657 }
331 658
332 659
333 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 660 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
334 __ pop(rcx); // Save return address. 661 __ pop(rcx); // Save return address.
(...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after
1192 __ bind(&done); 1519 __ bind(&done);
1193 } else { 1520 } else {
1194 ASSERT(type_ == TranscendentalCache::LOG); 1521 ASSERT(type_ == TranscendentalCache::LOG);
1195 __ fldln2(); 1522 __ fldln2();
1196 __ fxch(); 1523 __ fxch();
1197 __ fyl2x(); 1524 __ fyl2x();
1198 } 1525 }
1199 } 1526 }
1200 1527
1201 1528
1202 // Get the integer part of a heap number.
1203 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
1204 void IntegerConvert(MacroAssembler* masm,
1205 Register result,
1206 Register source) {
1207 // Result may be rcx. If result and source are the same register, source will
1208 // be overwritten.
1209 ASSERT(!result.is(rdi) && !result.is(rbx));
1210 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
1211 // cvttsd2si (32-bit version) directly.
1212 Register double_exponent = rbx;
1213 Register double_value = rdi;
1214 NearLabel done, exponent_63_plus;
1215 // Get double and extract exponent.
1216 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
1217 // Clear result preemptively, in case we need to return zero.
1218 __ xorl(result, result);
1219 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
1220 // Double to remove sign bit, shift exponent down to least significant bits.
1221 // and subtract bias to get the unshifted, unbiased exponent.
1222 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
1223 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
1224 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
1225 // Check whether the exponent is too big for a 63 bit unsigned integer.
1226 __ cmpl(double_exponent, Immediate(63));
1227 __ j(above_equal, &exponent_63_plus);
1228 // Handle exponent range 0..62.
1229 __ cvttsd2siq(result, xmm0);
1230 __ jmp(&done);
1231
1232 __ bind(&exponent_63_plus);
1233 // Exponent negative or 63+.
1234 __ cmpl(double_exponent, Immediate(83));
1235 // If exponent negative or above 83, number contains no significant bits in
1236 // the range 0..2^31, so result is zero, and rcx already holds zero.
1237 __ j(above, &done);
1238
1239 // Exponent in rage 63..83.
1240 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
1241 // the least significant exponent-52 bits.
1242
1243 // Negate low bits of mantissa if value is negative.
1244 __ addq(double_value, double_value); // Move sign bit to carry.
1245 __ sbbl(result, result); // And convert carry to -1 in result register.
1246 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
1247 __ addl(double_value, result);
1248 // Do xor in opposite directions depending on where we want the result
1249 // (depending on whether result is rcx or not).
1250
1251 if (result.is(rcx)) {
1252 __ xorl(double_value, result);
1253 // Left shift mantissa by (exponent - mantissabits - 1) to save the
1254 // bits that have positional values below 2^32 (the extra -1 comes from the
1255 // doubling done above to move the sign bit into the carry flag).
1256 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1257 __ shll_cl(double_value);
1258 __ movl(result, double_value);
1259 } else {
1260 // As the then-branch, but move double-value to result before shifting.
1261 __ xorl(result, double_value);
1262 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1263 __ shll_cl(result);
1264 }
1265
1266 __ bind(&done);
1267 }
1268
1269
1270 // Input: rdx, rax are the left and right objects of a bit op. 1529 // Input: rdx, rax are the left and right objects of a bit op.
1271 // Output: rax, rcx are left and right integers for a bit op. 1530 // Output: rax, rcx are left and right integers for a bit op.
1272 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { 1531 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1273 // Check float operands. 1532 // Check float operands.
1274 Label done; 1533 Label done;
1275 Label rax_is_smi; 1534 Label rax_is_smi;
1276 Label rax_is_object; 1535 Label rax_is_object;
1277 Label rdx_is_object; 1536 Label rdx_is_object;
1278 1537
1279 __ JumpIfNotSmi(rdx, &rdx_is_object); 1538 __ JumpIfNotSmi(rdx, &rdx_is_object);
(...skipping 3264 matching lines...) Expand 10 before | Expand all | Expand 10 after
4544 // Do a tail call to the rewritten stub. 4803 // Do a tail call to the rewritten stub.
4545 __ jmp(rdi); 4804 __ jmp(rdi);
4546 } 4805 }
4547 4806
4548 4807
4549 #undef __ 4808 #undef __
4550 4809
4551 } } // namespace v8::internal 4810 } } // namespace v8::internal
4552 4811
4553 #endif // V8_TARGET_ARCH_X64 4812 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698