Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(361)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2843010: ARM: Load the heap number map into a register and keep it... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4688 matching lines...) Expand 10 before | Expand all | Expand 10 after
4699 4699
4700 4700
4701 void CodeGenerator::GenerateRandomHeapNumber( 4701 void CodeGenerator::GenerateRandomHeapNumber(
4702 ZoneList<Expression*>* args) { 4702 ZoneList<Expression*>* args) {
4703 VirtualFrame::SpilledScope spilled_scope(frame_); 4703 VirtualFrame::SpilledScope spilled_scope(frame_);
4704 ASSERT(args->length() == 0); 4704 ASSERT(args->length() == 0);
4705 4705
4706 Label slow_allocate_heapnumber; 4706 Label slow_allocate_heapnumber;
4707 Label heapnumber_allocated; 4707 Label heapnumber_allocated;
4708 4708
4709 __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber); 4709 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4710 __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
4710 __ jmp(&heapnumber_allocated); 4711 __ jmp(&heapnumber_allocated);
4711 4712
4712 __ bind(&slow_allocate_heapnumber); 4713 __ bind(&slow_allocate_heapnumber);
4713 // To allocate a heap number, and ensure that it is not a smi, we 4714 // To allocate a heap number, and ensure that it is not a smi, we
4714 // call the runtime function FUnaryMinus on 0, returning the double 4715 // call the runtime function FUnaryMinus on 0, returning the double
4715 // -0.0. A new, distinct heap number is returned each time. 4716 // -0.0. A new, distinct heap number is returned each time.
4716 __ mov(r0, Operand(Smi::FromInt(0))); 4717 __ mov(r0, Operand(Smi::FromInt(0)));
4717 __ push(r0); 4718 __ push(r0);
4718 __ CallRuntime(Runtime::kNumberUnaryMinus, 1); 4719 __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4719 __ mov(r4, Operand(r0)); 4720 __ mov(r4, Operand(r0));
(...skipping 2636 matching lines...) Expand 10 before | Expand all | Expand 10 after
7356 void GenericBinaryOpStub::HandleBinaryOpSlowCases( 7357 void GenericBinaryOpStub::HandleBinaryOpSlowCases(
7357 MacroAssembler* masm, 7358 MacroAssembler* masm,
7358 Label* not_smi, 7359 Label* not_smi,
7359 Register lhs, 7360 Register lhs,
7360 Register rhs, 7361 Register rhs,
7361 const Builtins::JavaScript& builtin) { 7362 const Builtins::JavaScript& builtin) {
7362 Label slow, slow_reverse, do_the_call; 7363 Label slow, slow_reverse, do_the_call;
7363 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; 7364 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
7364 7365
7365 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); 7366 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
7367 Register heap_number_map = r6;
7366 7368
7367 if (ShouldGenerateSmiCode()) { 7369 if (ShouldGenerateSmiCode()) {
7370 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7371
7368 // Smi-smi case (overflow). 7372 // Smi-smi case (overflow).
7369 // Since both are Smis there is no heap number to overwrite, so allocate. 7373 // Since both are Smis there is no heap number to overwrite, so allocate.
7370 // The new heap number is in r5. r6 and r7 are scratch. 7374 // The new heap number is in r5. r3 and r7 are scratch.
7371 __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow); 7375 __ AllocateHeapNumber(
7376 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
7372 7377
7373 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, 7378 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
7374 // using registers d7 and d6 for the double values. 7379 // using registers d7 and d6 for the double values.
7375 if (use_fp_registers) { 7380 if (use_fp_registers) {
7376 CpuFeatures::Scope scope(VFP3); 7381 CpuFeatures::Scope scope(VFP3);
7377 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); 7382 __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
7378 __ vmov(s15, r7); 7383 __ vmov(s15, r7);
7379 __ vcvt_f64_s32(d7, s15); 7384 __ vcvt_f64_s32(d7, s15);
7380 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); 7385 __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
7381 __ vmov(s13, r7); 7386 __ vmov(s13, r7);
7382 __ vcvt_f64_s32(d6, s13); 7387 __ vcvt_f64_s32(d6, s13);
7383 } else { 7388 } else {
7384 // Write Smi from rhs to r3 and r2 in double format. r6 is scratch. 7389 // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
7385 __ mov(r7, Operand(rhs)); 7390 __ mov(r7, Operand(rhs));
7386 ConvertToDoubleStub stub1(r3, r2, r7, r6); 7391 ConvertToDoubleStub stub1(r3, r2, r7, r9);
7387 __ push(lr); 7392 __ push(lr);
7388 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); 7393 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7389 // Write Smi from lhs to r1 and r0 in double format. r6 is scratch. 7394 // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
7390 __ mov(r7, Operand(lhs)); 7395 __ mov(r7, Operand(lhs));
7391 ConvertToDoubleStub stub2(r1, r0, r7, r6); 7396 ConvertToDoubleStub stub2(r1, r0, r7, r9);
7392 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 7397 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7393 __ pop(lr); 7398 __ pop(lr);
7394 } 7399 }
7395 __ jmp(&do_the_call); // Tail call. No return. 7400 __ jmp(&do_the_call); // Tail call. No return.
7396 } 7401 }
7397 7402
7398 // We branch here if at least one of r0 and r1 is not a Smi. 7403 // We branch here if at least one of r0 and r1 is not a Smi.
7399 __ bind(not_smi); 7404 __ bind(not_smi);
7405 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7400 7406
7401 // After this point we have the left hand side in r1 and the right hand side 7407 // After this point we have the left hand side in r1 and the right hand side
7402 // in r0. 7408 // in r0.
7403 if (lhs.is(r0)) { 7409 if (lhs.is(r0)) {
7404 __ Swap(r0, r1, ip); 7410 __ Swap(r0, r1, ip);
7405 } 7411 }
7406 7412
7407 if (ShouldGenerateFPCode()) { 7413 if (ShouldGenerateFPCode()) {
7408 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; 7414 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
7409 7415
7410 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { 7416 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
7411 switch (op_) { 7417 switch (op_) {
7412 case Token::ADD: 7418 case Token::ADD:
7413 case Token::SUB: 7419 case Token::SUB:
7414 case Token::MUL: 7420 case Token::MUL:
7415 case Token::DIV: 7421 case Token::DIV:
7416 GenerateTypeTransition(masm); 7422 GenerateTypeTransition(masm);
7417 break; 7423 break;
7418 7424
7419 default: 7425 default:
7420 break; 7426 break;
7421 } 7427 }
7428 // Restore heap number map register.
7429 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7422 } 7430 }
7423 7431
7424 if (mode_ == NO_OVERWRITE) { 7432 if (mode_ == NO_OVERWRITE) {
7425 // In the case where there is no chance of an overwritable float we may as 7433 // In the case where there is no chance of an overwritable float we may as
7426 // well do the allocation immediately while r0 and r1 are untouched. 7434 // well do the allocation immediately while r0 and r1 are untouched.
7427 __ AllocateHeapNumber(r5, r6, r7, &slow); 7435 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
7428 } 7436 }
7429 7437
7430 // Move r0 to a double in r2-r3. 7438 // Move r0 to a double in r2-r3.
7431 __ tst(r0, Operand(kSmiTagMask)); 7439 __ tst(r0, Operand(kSmiTagMask));
7432 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. 7440 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
7433 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 7441 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7442 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7443 __ cmp(r4, heap_number_map);
7434 __ b(ne, &slow); 7444 __ b(ne, &slow);
7435 if (mode_ == OVERWRITE_RIGHT) { 7445 if (mode_ == OVERWRITE_RIGHT) {
7436 __ mov(r5, Operand(r0)); // Overwrite this heap number. 7446 __ mov(r5, Operand(r0)); // Overwrite this heap number.
7437 } 7447 }
7438 if (use_fp_registers) { 7448 if (use_fp_registers) {
7439 CpuFeatures::Scope scope(VFP3); 7449 CpuFeatures::Scope scope(VFP3);
7440 // Load the double from tagged HeapNumber r0 to d7. 7450 // Load the double from tagged HeapNumber r0 to d7.
7441 __ sub(r7, r0, Operand(kHeapObjectTag)); 7451 __ sub(r7, r0, Operand(kHeapObjectTag));
7442 __ vldr(d7, r7, HeapNumber::kValueOffset); 7452 __ vldr(d7, r7, HeapNumber::kValueOffset);
7443 } else { 7453 } else {
7444 // Calling convention says that second double is in r2 and r3. 7454 // Calling convention says that second double is in r2 and r3.
7445 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); 7455 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
7446 } 7456 }
7447 __ jmp(&finished_loading_r0); 7457 __ jmp(&finished_loading_r0);
7448 __ bind(&r0_is_smi); 7458 __ bind(&r0_is_smi);
7449 if (mode_ == OVERWRITE_RIGHT) { 7459 if (mode_ == OVERWRITE_RIGHT) {
7450 // We can't overwrite a Smi so get address of new heap number into r5. 7460 // We can't overwrite a Smi so get address of new heap number into r5.
7451 __ AllocateHeapNumber(r5, r6, r7, &slow); 7461 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7452 } 7462 }
7453 7463
7454 if (use_fp_registers) { 7464 if (use_fp_registers) {
7455 CpuFeatures::Scope scope(VFP3); 7465 CpuFeatures::Scope scope(VFP3);
7456 // Convert smi in r0 to double in d7. 7466 // Convert smi in r0 to double in d7.
7457 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 7467 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
7458 __ vmov(s15, r7); 7468 __ vmov(s15, r7);
7459 __ vcvt_f64_s32(d7, s15); 7469 __ vcvt_f64_s32(d7, s15);
7460 } else { 7470 } else {
7461 // Write Smi from r0 to r3 and r2 in double format. 7471 // Write Smi from r0 to r3 and r2 in double format.
7462 __ mov(r7, Operand(r0)); 7472 __ mov(r7, Operand(r0));
7463 ConvertToDoubleStub stub3(r3, r2, r7, r6); 7473 ConvertToDoubleStub stub3(r3, r2, r7, r4);
7464 __ push(lr); 7474 __ push(lr);
7465 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); 7475 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
7466 __ pop(lr); 7476 __ pop(lr);
7467 } 7477 }
7468 7478
7469 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. 7479 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
7470 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. 7480 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
7471 Label r1_is_not_smi; 7481 Label r1_is_not_smi;
7472 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { 7482 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
7473 __ tst(r1, Operand(kSmiTagMask)); 7483 __ tst(r1, Operand(kSmiTagMask));
7474 __ b(ne, &r1_is_not_smi); 7484 __ b(ne, &r1_is_not_smi);
7475 GenerateTypeTransition(masm); 7485 GenerateTypeTransition(masm);
7486 // Restore heap number map register.
7487 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7476 __ jmp(&r1_is_smi); 7488 __ jmp(&r1_is_smi);
7477 } 7489 }
7478 7490
7479 __ bind(&finished_loading_r0); 7491 __ bind(&finished_loading_r0);
7480 7492
7481 // Move r1 to a double in r0-r1. 7493 // Move r1 to a double in r0-r1.
7482 __ tst(r1, Operand(kSmiTagMask)); 7494 __ tst(r1, Operand(kSmiTagMask));
7483 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. 7495 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
7484 __ bind(&r1_is_not_smi); 7496 __ bind(&r1_is_not_smi);
7485 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); 7497 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
7498 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7499 __ cmp(r4, heap_number_map);
7486 __ b(ne, &slow); 7500 __ b(ne, &slow);
7487 if (mode_ == OVERWRITE_LEFT) { 7501 if (mode_ == OVERWRITE_LEFT) {
7488 __ mov(r5, Operand(r1)); // Overwrite this heap number. 7502 __ mov(r5, Operand(r1)); // Overwrite this heap number.
7489 } 7503 }
7490 if (use_fp_registers) { 7504 if (use_fp_registers) {
7491 CpuFeatures::Scope scope(VFP3); 7505 CpuFeatures::Scope scope(VFP3);
7492 // Load the double from tagged HeapNumber r1 to d6. 7506 // Load the double from tagged HeapNumber r1 to d6.
7493 __ sub(r7, r1, Operand(kHeapObjectTag)); 7507 __ sub(r7, r1, Operand(kHeapObjectTag));
7494 __ vldr(d6, r7, HeapNumber::kValueOffset); 7508 __ vldr(d6, r7, HeapNumber::kValueOffset);
7495 } else { 7509 } else {
7496 // Calling convention says that first double is in r0 and r1. 7510 // Calling convention says that first double is in r0 and r1.
7497 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); 7511 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
7498 } 7512 }
7499 __ jmp(&finished_loading_r1); 7513 __ jmp(&finished_loading_r1);
7500 __ bind(&r1_is_smi); 7514 __ bind(&r1_is_smi);
7501 if (mode_ == OVERWRITE_LEFT) { 7515 if (mode_ == OVERWRITE_LEFT) {
7502 // We can't overwrite a Smi so get address of new heap number into r5. 7516 // We can't overwrite a Smi so get address of new heap number into r5.
7503 __ AllocateHeapNumber(r5, r6, r7, &slow); 7517 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7504 } 7518 }
7505 7519
7506 if (use_fp_registers) { 7520 if (use_fp_registers) {
7507 CpuFeatures::Scope scope(VFP3); 7521 CpuFeatures::Scope scope(VFP3);
7508 // Convert smi in r1 to double in d6. 7522 // Convert smi in r1 to double in d6.
7509 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); 7523 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7510 __ vmov(s13, r7); 7524 __ vmov(s13, r7);
7511 __ vcvt_f64_s32(d6, s13); 7525 __ vcvt_f64_s32(d6, s13);
7512 } else { 7526 } else {
7513 // Write Smi from r1 to r1 and r0 in double format. 7527 // Write Smi from r1 to r1 and r0 in double format.
7514 __ mov(r7, Operand(r1)); 7528 __ mov(r7, Operand(r1));
7515 ConvertToDoubleStub stub4(r1, r0, r7, r6); 7529 ConvertToDoubleStub stub4(r1, r0, r7, r9);
7516 __ push(lr); 7530 __ push(lr);
7517 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); 7531 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7518 __ pop(lr); 7532 __ pop(lr);
7519 } 7533 }
7520 7534
7521 __ bind(&finished_loading_r1); 7535 __ bind(&finished_loading_r1);
7522 7536
7523 __ bind(&do_the_call); 7537 __ bind(&do_the_call);
7524 // If we are inlining the operation using VFP3 instructions for 7538 // If we are inlining the operation using VFP3 instructions for
7525 // add, subtract, multiply, or divide, the arguments are in d6 and d7. 7539 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
7566 #else 7580 #else
7567 // Double returned in registers 0 and 1. 7581 // Double returned in registers 0 and 1.
7568 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); 7582 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
7569 #endif 7583 #endif
7570 __ mov(r0, Operand(r5)); 7584 __ mov(r0, Operand(r5));
7571 // And we are done. 7585 // And we are done.
7572 __ pop(pc); 7586 __ pop(pc);
7573 } 7587 }
7574 } 7588 }
7575 7589
7576
7577 if (lhs.is(r0)) { 7590 if (lhs.is(r0)) {
7578 __ b(&slow); 7591 __ b(&slow);
7579 __ bind(&slow_reverse); 7592 __ bind(&slow_reverse);
7580 __ Swap(r0, r1, ip); 7593 __ Swap(r0, r1, ip);
7581 } 7594 }
7582 7595
7596 heap_number_map = no_reg; // Don't use this any more from here on.
7597
7583 // We jump to here if something goes wrong (one param is not a number of any 7598 // We jump to here if something goes wrong (one param is not a number of any
7584 // sort or new-space allocation fails). 7599 // sort or new-space allocation fails).
7585 __ bind(&slow); 7600 __ bind(&slow);
7586 7601
7587 // Push arguments to the stack 7602 // Push arguments to the stack
7588 __ Push(r1, r0); 7603 __ Push(r1, r0);
7589 7604
7590 if (Token::ADD == op_) { 7605 if (Token::ADD == op_) {
7591 // Test for string arguments before calling runtime. 7606 // Test for string arguments before calling runtime.
7592 // r1 : first argument 7607 // r1 : first argument
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
7738 // result is a Smi. If so, great, otherwise we try to find a heap number to 7753 // result is a Smi. If so, great, otherwise we try to find a heap number to
7739 // write the answer into (either by allocating or by overwriting). 7754 // write the answer into (either by allocating or by overwriting).
7740 // On entry the operands are in lhs and rhs. On exit the answer is in r0. 7755 // On entry the operands are in lhs and rhs. On exit the answer is in r0.
7741 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, 7756 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
7742 Register lhs, 7757 Register lhs,
7743 Register rhs) { 7758 Register rhs) {
7744 Label slow, result_not_a_smi; 7759 Label slow, result_not_a_smi;
7745 Label rhs_is_smi, lhs_is_smi; 7760 Label rhs_is_smi, lhs_is_smi;
7746 Label done_checking_rhs, done_checking_lhs; 7761 Label done_checking_rhs, done_checking_lhs;
7747 7762
7763 Register heap_number_map = r6;
7764 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7765
7748 __ tst(lhs, Operand(kSmiTagMask)); 7766 __ tst(lhs, Operand(kSmiTagMask));
7749 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. 7767 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
7750 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); 7768 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
7769 __ cmp(r4, heap_number_map);
7751 __ b(ne, &slow); 7770 __ b(ne, &slow);
7752 GetInt32(masm, lhs, r3, r5, r4, &slow); 7771 GetInt32(masm, lhs, r3, r5, r4, &slow);
7753 __ jmp(&done_checking_lhs); 7772 __ jmp(&done_checking_lhs);
7754 __ bind(&lhs_is_smi); 7773 __ bind(&lhs_is_smi);
7755 __ mov(r3, Operand(lhs, ASR, 1)); 7774 __ mov(r3, Operand(lhs, ASR, 1));
7756 __ bind(&done_checking_lhs); 7775 __ bind(&done_checking_lhs);
7757 7776
7758 __ tst(rhs, Operand(kSmiTagMask)); 7777 __ tst(rhs, Operand(kSmiTagMask));
7759 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. 7778 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
7760 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); 7779 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
7780 __ cmp(r4, heap_number_map);
7761 __ b(ne, &slow); 7781 __ b(ne, &slow);
7762 GetInt32(masm, rhs, r2, r5, r4, &slow); 7782 GetInt32(masm, rhs, r2, r5, r4, &slow);
7763 __ jmp(&done_checking_rhs); 7783 __ jmp(&done_checking_rhs);
7764 __ bind(&rhs_is_smi); 7784 __ bind(&rhs_is_smi);
7765 __ mov(r2, Operand(rhs, ASR, 1)); 7785 __ mov(r2, Operand(rhs, ASR, 1));
7766 __ bind(&done_checking_rhs); 7786 __ bind(&done_checking_rhs);
7767 7787
7768 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); 7788 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
7769 7789
7770 // r0 and r1: Original operands (Smi or heap numbers). 7790 // r0 and r1: Original operands (Smi or heap numbers).
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
7810 __ mov(r5, Operand(rhs)); 7830 __ mov(r5, Operand(rhs));
7811 break; 7831 break;
7812 } 7832 }
7813 case OVERWRITE_LEFT: { 7833 case OVERWRITE_LEFT: {
7814 __ tst(lhs, Operand(kSmiTagMask)); 7834 __ tst(lhs, Operand(kSmiTagMask));
7815 __ b(eq, &have_to_allocate); 7835 __ b(eq, &have_to_allocate);
7816 __ mov(r5, Operand(lhs)); 7836 __ mov(r5, Operand(lhs));
7817 break; 7837 break;
7818 } 7838 }
7819 case NO_OVERWRITE: { 7839 case NO_OVERWRITE: {
7820 // Get a new heap number in r5. r6 and r7 are scratch. 7840 // Get a new heap number in r5. r4 and r7 are scratch.
7821 __ AllocateHeapNumber(r5, r6, r7, &slow); 7841 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7822 } 7842 }
7823 default: break; 7843 default: break;
7824 } 7844 }
7825 __ bind(&got_a_heap_number); 7845 __ bind(&got_a_heap_number);
7826 // r2: Answer as signed int32. 7846 // r2: Answer as signed int32.
7827 // r5: Heap number to write answer into. 7847 // r5: Heap number to write answer into.
7828 7848
7829 // Nothing can go wrong now, so move the heap number to r0, which is the 7849 // Nothing can go wrong now, so move the heap number to r0, which is the
7830 // result. 7850 // result.
7831 __ mov(r0, Operand(r5)); 7851 __ mov(r0, Operand(r5));
7832 7852
7833 // Tail call that writes the int32 in r2 to the heap number in r0, using 7853 // Tail call that writes the int32 in r2 to the heap number in r0, using
7834 // r3 as scratch. r0 is preserved and returned. 7854 // r3 as scratch. r0 is preserved and returned.
7835 WriteInt32ToHeapNumberStub stub(r2, r0, r3); 7855 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
7836 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 7856 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
7837 7857
7838 if (mode_ != NO_OVERWRITE) { 7858 if (mode_ != NO_OVERWRITE) {
7839 __ bind(&have_to_allocate); 7859 __ bind(&have_to_allocate);
7840 // Get a new heap number in r5. r6 and r7 are scratch. 7860 // Get a new heap number in r5. r4 and r7 are scratch.
7841 __ AllocateHeapNumber(r5, r6, r7, &slow); 7861 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7842 __ jmp(&got_a_heap_number); 7862 __ jmp(&got_a_heap_number);
7843 } 7863 }
7844 7864
7845 // If all else failed then we go to the runtime system. 7865 // If all else failed then we go to the runtime system.
7846 __ bind(&slow); 7866 __ bind(&slow);
7847 __ Push(lhs, rhs); // Restore stack. 7867 __ Push(lhs, rhs); // Restore stack.
7848 switch (op_) { 7868 switch (op_) {
7849 case Token::BIT_OR: 7869 case Token::BIT_OR:
7850 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); 7870 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
7851 break; 7871 break;
(...skipping 550 matching lines...) Expand 10 before | Expand all | Expand 10 after
8402 __ push(r0); 8422 __ push(r0);
8403 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); 8423 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
8404 8424
8405 __ StubReturn(1); 8425 __ StubReturn(1);
8406 } 8426 }
8407 8427
8408 8428
8409 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { 8429 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
8410 Label slow, done; 8430 Label slow, done;
8411 8431
8432 Register heap_number_map = r6;
8433 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8434
8412 if (op_ == Token::SUB) { 8435 if (op_ == Token::SUB) {
8413 // Check whether the value is a smi. 8436 // Check whether the value is a smi.
8414 Label try_float; 8437 Label try_float;
8415 __ tst(r0, Operand(kSmiTagMask)); 8438 __ tst(r0, Operand(kSmiTagMask));
8416 __ b(ne, &try_float); 8439 __ b(ne, &try_float);
8417 8440
8418 // Go slow case if the value of the expression is zero 8441 // Go slow case if the value of the expression is zero
8419 // to make sure that we switch between 0 and -0. 8442 // to make sure that we switch between 0 and -0.
8420 __ cmp(r0, Operand(0)); 8443 __ cmp(r0, Operand(0));
8421 __ b(eq, &slow); 8444 __ b(eq, &slow);
8422 8445
8423 // The value of the expression is a smi that is not zero. Try 8446 // The value of the expression is a smi that is not zero. Try
8424 // optimistic subtraction '0 - value'. 8447 // optimistic subtraction '0 - value'.
8425 __ rsb(r1, r0, Operand(0), SetCC); 8448 __ rsb(r1, r0, Operand(0), SetCC);
8426 __ b(vs, &slow); 8449 __ b(vs, &slow);
8427 8450
8428 __ mov(r0, Operand(r1)); // Set r0 to result. 8451 __ mov(r0, Operand(r1)); // Set r0 to result.
8429 __ b(&done); 8452 __ b(&done);
8430 8453
8431 __ bind(&try_float); 8454 __ bind(&try_float);
8432 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); 8455 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8456 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8457 __ cmp(r1, heap_number_map);
8433 __ b(ne, &slow); 8458 __ b(ne, &slow);
8434 // r0 is a heap number. Get a new heap number in r1. 8459 // r0 is a heap number. Get a new heap number in r1.
8435 if (overwrite_) { 8460 if (overwrite_) {
8436 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 8461 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8437 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. 8462 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
8438 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 8463 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8439 } else { 8464 } else {
8440 __ AllocateHeapNumber(r1, r2, r3, &slow); 8465 __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
8441 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 8466 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
8442 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 8467 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8443 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); 8468 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
8444 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. 8469 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
8445 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); 8470 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
8446 __ mov(r0, Operand(r1)); 8471 __ mov(r0, Operand(r1));
8447 } 8472 }
8448 } else if (op_ == Token::BIT_NOT) { 8473 } else if (op_ == Token::BIT_NOT) {
8449 // Check if the operand is a heap number. 8474 // Check if the operand is a heap number.
8450 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); 8475 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8476 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8477 __ cmp(r1, heap_number_map);
8451 __ b(ne, &slow); 8478 __ b(ne, &slow);
8452 8479
8453 // Convert the heap number is r0 to an untagged integer in r1. 8480 // Convert the heap number is r0 to an untagged integer in r1.
8454 GetInt32(masm, r0, r1, r2, r3, &slow); 8481 GetInt32(masm, r0, r1, r2, r3, &slow);
8455 8482
8456 // Do the bitwise operation (move negated) and check if the result 8483 // Do the bitwise operation (move negated) and check if the result
8457 // fits in a smi. 8484 // fits in a smi.
8458 Label try_float; 8485 Label try_float;
8459 __ mvn(r1, Operand(r1)); 8486 __ mvn(r1, Operand(r1));
8460 __ add(r2, r1, Operand(0x40000000), SetCC); 8487 __ add(r2, r1, Operand(0x40000000), SetCC);
8461 __ b(mi, &try_float); 8488 __ b(mi, &try_float);
8462 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); 8489 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
8463 __ b(&done); 8490 __ b(&done);
8464 8491
8465 __ bind(&try_float); 8492 __ bind(&try_float);
8466 if (!overwrite_) { 8493 if (!overwrite_) {
8467 // Allocate a fresh heap number, but don't overwrite r0 until 8494 // Allocate a fresh heap number, but don't overwrite r0 until
8468 // we're sure we can do it without going through the slow case 8495 // we're sure we can do it without going through the slow case
8469 // that needs the value in r0. 8496 // that needs the value in r0.
8470 __ AllocateHeapNumber(r2, r3, r4, &slow); 8497 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
8471 __ mov(r0, Operand(r2)); 8498 __ mov(r0, Operand(r2));
8472 } 8499 }
8473 8500
8474 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not 8501 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
8475 // have to set up a frame. 8502 // have to set up a frame.
8476 WriteInt32ToHeapNumberStub stub(r1, r0, r2); 8503 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
8477 __ push(lr); 8504 __ push(lr);
8478 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 8505 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
8479 __ pop(lr); 8506 __ pop(lr);
8480 } else { 8507 } else {
(...skipping 2209 matching lines...) Expand 10 before | Expand all | Expand 10 after
10690 __ bind(&string_add_runtime); 10717 __ bind(&string_add_runtime);
10691 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 10718 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10692 } 10719 }
10693 10720
10694 10721
10695 #undef __ 10722 #undef __
10696 10723
10697 } } // namespace v8::internal 10724 } } // namespace v8::internal
10698 10725
10699 #endif // V8_TARGET_ARCH_ARM 10726 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698