Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(314)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6342019: ARM: Initial type recording binary operation stub... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 // Compute lower part of fraction (last 12 bits). 336 // Compute lower part of fraction (last 12 bits).
337 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); 337 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
338 // And the top (top 20 bits). 338 // And the top (top 20 bits).
339 __ orr(exponent, 339 __ orr(exponent,
340 exponent, 340 exponent,
341 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); 341 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
342 __ Ret(); 342 __ Ret();
343 } 343 }
344 344
345 345
346 class FloatingPointHelper : public AllStatic {
347 public:
348
349 enum Destination {
350 kVFPRegisters,
351 kCoreRegisters
352 };
353
354
355 // Loads smis from r0 and r1 (right and left in binary operations) into
356 // floating point registers. Depending on the destination the values ends up
357 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
358 // floating point registers VFP3 must be supported. If core registers are
359 // requested when VFP3 is supported d6 and d7 will be scratched.
360 static void LoadSmis(MacroAssembler* masm,
361 Destination destination,
362 Register scratch1,
363 Register scratch2);
364
365 // Loads objects from r0 and r1 (right and left in binary operations) into
366 // floating point registers. Depending on the destination the values ends up
367 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
368 // floating point registers VFP3 must be supported. If core registers are
369 // requested when VFP3 is supported d6 and d7 will still be scratched. If
370 // either r0 or r1 is not a number (not smi and not heap number object) the
371 // not_number label is jumped to.
372 static void LoadOperands(MacroAssembler* masm,
373 FloatingPointHelper::Destination destination,
374 Register heap_number_map,
375 Register scratch1,
376 Register scratch2,
377 Label* not_number);
378 private:
379 static void LoadNumber(MacroAssembler* masm,
380 FloatingPointHelper::Destination destination,
381 Register object,
382 DwVfpRegister dst,
383 Register dst1,
384 Register dst2,
385 Register heap_number_map,
386 Register scratch1,
387 Register scratch2,
388 Label* not_number);
389 };
390
391
392 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
393 FloatingPointHelper::Destination destination,
394 Register scratch1,
395 Register scratch2) {
396 if (CpuFeatures::IsSupported(VFP3)) {
397 CpuFeatures::Scope scope(VFP3);
398 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
399 __ vmov(s15, scratch1);
400 __ vcvt_f64_s32(d7, s15);
401 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
402 __ vmov(s13, scratch1);
403 __ vcvt_f64_s32(d6, s13);
404 if (destination == kCoreRegisters) {
405 __ vmov(r2, r3, d7);
406 __ vmov(r0, r1, d6);
407 }
408 } else {
409 ASSERT(destination == kCoreRegisters);
410 // Write Smi from r0 to r3 and r2 in double format.
411 __ mov(scratch1, Operand(r0));
412 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
413 __ push(lr);
414 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
415 // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
416 __ mov(scratch1, Operand(r1));
417 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
418 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
419 __ pop(lr);
420 }
421 }
422
423
424 void FloatingPointHelper::LoadOperands(
425 MacroAssembler* masm,
426 FloatingPointHelper::Destination destination,
427 Register heap_number_map,
428 Register scratch1,
429 Register scratch2,
430 Label* slow) {
431
432 // Load right operand (r0) to d6 or r2/r3.
433 LoadNumber(masm, destination,
434 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
435
436 // Load left operand (r1) to d7 or r0/r1.
437 LoadNumber(masm, destination,
438 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
439 }
440
441
442 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
443 Destination destination,
444 Register object,
445 DwVfpRegister dst,
446 Register dst1,
447 Register dst2,
448 Register heap_number_map,
449 Register scratch1,
450 Register scratch2,
451 Label* not_number) {
452 Label is_smi, done;
453
454 __ BranchOnSmi(object, &is_smi);
455 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
456
457 // Handle loading a double from a heap number.
458 if (CpuFeatures::IsSupported(VFP3)) {
459 CpuFeatures::Scope scope(VFP3);
460 // Load the double from tagged HeapNumber to double register.
461 __ sub(scratch1, object, Operand(kHeapObjectTag));
462 __ vldr(dst, scratch1, HeapNumber::kValueOffset);
463 } else {
464 ASSERT(destination == kCoreRegisters);
465 // Load the double from heap number to dst1 and dst2 in double format.
466 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
467 }
468 __ jmp(&done);
469
470 // Handle loading a double from a smi.
471 __ bind(&is_smi);
472 if (CpuFeatures::IsSupported(VFP3)) {
473 CpuFeatures::Scope scope(VFP3);
474 // Convert smi to double.
475 __ SmiUntag(scratch1, object);
476 __ vmov(dst.high(), scratch1);
477 __ vcvt_f64_s32(dst, dst.high());
478 if (destination == kCoreRegisters) {
479 __ vmov(dst1, dst2, dst);
480 }
481 } else {
482 ASSERT(destination == kCoreRegisters);
483 // Write Smi to dst1 and dst2 double format.
484 __ mov(scratch1, Operand(object));
485 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
486 __ push(lr);
487 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
488 __ pop(lr);
489 }
490
491 __ bind(&done);
492 }
493
494
346 // See comment for class. 495 // See comment for class.
347 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 496 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
348 Label max_negative_int; 497 Label max_negative_int;
349 // the_int_ has the answer which is a signed int32 but not a Smi. 498 // the_int_ has the answer which is a signed int32 but not a Smi.
350 // We test for the special value that has a different exponent. This test 499 // We test for the special value that has a different exponent. This test
351 // has the neat side effect of setting the flags according to the sign. 500 // has the neat side effect of setting the flags according to the sign.
352 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 501 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
353 __ cmp(the_int_, Operand(0x80000000u)); 502 __ cmp(the_int_, Operand(0x80000000u));
354 __ b(eq, &max_negative_int); 503 __ b(eq, &max_negative_int);
355 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 504 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
(...skipping 1011 matching lines...) Expand 10 before | Expand all | Expand 10 after
1367 } else if (Token::ADD == op_) { 1516 } else if (Token::ADD == op_) {
1368 __ vadd(d5, d6, d7); 1517 __ vadd(d5, d6, d7);
1369 } else if (Token::SUB == op_) { 1518 } else if (Token::SUB == op_) {
1370 __ vsub(d5, d6, d7); 1519 __ vsub(d5, d6, d7);
1371 } else { 1520 } else {
1372 UNREACHABLE(); 1521 UNREACHABLE();
1373 } 1522 }
1374 __ sub(r0, r5, Operand(kHeapObjectTag)); 1523 __ sub(r0, r5, Operand(kHeapObjectTag));
1375 __ vstr(d5, r0, HeapNumber::kValueOffset); 1524 __ vstr(d5, r0, HeapNumber::kValueOffset);
1376 __ add(r0, r0, Operand(kHeapObjectTag)); 1525 __ add(r0, r0, Operand(kHeapObjectTag));
1377 __ mov(pc, lr); 1526 __ Ret();
1378 } else { 1527 } else {
1379 // If we did not inline the operation, then the arguments are in: 1528 // If we did not inline the operation, then the arguments are in:
1380 // r0: Left value (least significant part of mantissa). 1529 // r0: Left value (least significant part of mantissa).
1381 // r1: Left value (sign, exponent, top of mantissa). 1530 // r1: Left value (sign, exponent, top of mantissa).
1382 // r2: Right value (least significant part of mantissa). 1531 // r2: Right value (least significant part of mantissa).
1383 // r3: Right value (sign, exponent, top of mantissa). 1532 // r3: Right value (sign, exponent, top of mantissa).
1384 // r5: Address of heap number for result. 1533 // r5: Address of heap number for result.
1385 1534
1386 __ push(lr); // For later. 1535 __ push(lr); // For later.
1387 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. 1536 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
(...skipping 811 matching lines...) Expand 10 before | Expand all | Expand 10 after
2199 2348
2200 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { 2349 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
2201 GenericBinaryOpStub stub(key, type_info); 2350 GenericBinaryOpStub stub(key, type_info);
2202 return stub.GetCode(); 2351 return stub.GetCode();
2203 } 2352 }
2204 2353
2205 2354
2206 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 2355 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
2207 TRBinaryOpIC::TypeInfo type_info, 2356 TRBinaryOpIC::TypeInfo type_info,
2208 TRBinaryOpIC::TypeInfo result_type_info) { 2357 TRBinaryOpIC::TypeInfo result_type_info) {
2358 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
2359 return stub.GetCode();
2360 }
2361
2362
2363 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2364 Label get_result;
2365
2366 __ Push(r1, r0);
2367
2368 __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2369 __ mov(r1, Operand(Smi::FromInt(op_)));
2370 __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2371 __ Push(r2, r1, r0);
2372
2373 __ TailCallExternalReference(
2374 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
2375 5,
2376 1);
2377 }
2378
2379
2380 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2381 MacroAssembler* masm) {
2209 UNIMPLEMENTED(); 2382 UNIMPLEMENTED();
2210 return Handle<Code>::null(); 2383 }
2384
2385
2386 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
2387 switch (operands_type_) {
2388 case TRBinaryOpIC::UNINITIALIZED:
2389 GenerateTypeTransition(masm);
2390 break;
2391 case TRBinaryOpIC::SMI:
2392 GenerateSmiStub(masm);
2393 break;
2394 case TRBinaryOpIC::INT32:
2395 GenerateInt32Stub(masm);
2396 break;
2397 case TRBinaryOpIC::HEAP_NUMBER:
2398 GenerateHeapNumberStub(masm);
2399 break;
2400 case TRBinaryOpIC::STRING:
2401 GenerateStringStub(masm);
2402 break;
2403 case TRBinaryOpIC::GENERIC:
2404 GenerateGeneric(masm);
2405 break;
2406 default:
2407 UNREACHABLE();
2408 }
2409 }
2410
2411
2412 const char* TypeRecordingBinaryOpStub::GetName() {
2413 if (name_ != NULL) return name_;
2414 const int kMaxNameLength = 100;
2415 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
2416 if (name_ == NULL) return "OOM";
2417 const char* op_name = Token::Name(op_);
2418 const char* overwrite_name;
2419 switch (mode_) {
2420 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2421 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2422 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2423 default: overwrite_name = "UnknownOverwrite"; break;
2424 }
2425
2426 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2427 "TypeRecordingBinaryOpStub_%s_%s_%s",
2428 op_name,
2429 overwrite_name,
2430 TRBinaryOpIC::GetName(operands_type_));
2431 return name_;
2432 }
2433
2434
2435 // Generate the smi code. If the operation on smis are successful this return is
2436 // generated. If the result is not a smi and heap number allocation is not
2437 // requested the code falls through. If number allocation is requested but a
2438 // heap number cannot be allocated the code jumps to the lable gc_required.
2439 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2440 Label* gc_required,
2441 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2442 Label not_smis;
2443
2444 ASSERT(op_ == Token::ADD);
2445
2446 Register left = r1;
2447 Register right = r0;
2448 Register scratch1 = r7;
2449 Register scratch2 = r9;
2450
2451 // Perform combined smi check on both operands.
2452 __ orr(scratch1, left, Operand(right));
2453 STATIC_ASSERT(kSmiTag == 0);
2454 __ tst(scratch1, Operand(kSmiTagMask));
2455 __ b(ne, &not_smis);
2456
2457 __ add(right, right, Operand(left), SetCC); // Add optimistically.
2458
2459 // Return smi result if no overflow (r0 is the result).
2460 ASSERT(right.is(r0));
2461 __ Ret(vc);
2462
2463 // Result is not a smi. Revert the optimistic add.
2464 __ sub(right, right, Operand(left));
2465
2466 // If heap number results are possible generate the result in an allocated
2467 // heap number.
2468 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2469 FloatingPointHelper::Destination destination =
2470 CpuFeatures::IsSupported(VFP3) && Token::MOD != op_ ?
2471 FloatingPointHelper::kVFPRegisters :
2472 FloatingPointHelper::kCoreRegisters;
2473
2474 Register heap_number_map = r6;
2475 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2476
2477 // Allocate new heap number for result.
2478 Register heap_number = r5;
2479 __ AllocateHeapNumber(
2480 heap_number, scratch1, scratch2, heap_number_map, gc_required);
2481
2482 // Load the smis.
2483 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2484
2485 // Calculate the result.
2486 if (destination == FloatingPointHelper::kVFPRegisters) {
2487 // Using VFP registers:
2488 // d6: Left value
2489 // d7: Right value
2490 CpuFeatures::Scope scope(VFP3);
2491 __ vadd(d5, d6, d7);
2492
2493 __ sub(r0, heap_number, Operand(kHeapObjectTag));
2494 __ vstr(d5, r0, HeapNumber::kValueOffset);
2495 __ add(r0, r0, Operand(kHeapObjectTag));
2496 __ Ret();
2497 } else {
2498 // Using core registers:
2499 // r0: Left value (least significant part of mantissa).
2500 // r1: Left value (sign, exponent, top of mantissa).
2501 // r2: Right value (least significant part of mantissa).
2502 // r3: Right value (sign, exponent, top of mantissa).
2503
2504 __ push(lr); // For later.
2505 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2506 // Call C routine that may not cause GC or other trouble. r5 is callee
2507 // save.
2508 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2509 // Store answer in the overwritable heap number.
2510 #if !defined(USE_ARM_EABI)
2511 // Double returned in fp coprocessor register 0 and 1, encoded as
2512 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2513 // need to substract the tag from r5.
2514 __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
2515 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2516 #else
2517 // Double returned in registers 0 and 1.
2518 __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
2519 #endif
2520 __ mov(r0, Operand(heap_number));
2521 // And we are done.
2522 __ pop(pc);
2523 }
2524 }
2525 __ bind(&not_smis);
2526 }
2527
2528
2529 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2530 Label not_smis, call_runtime;
2531
2532 ASSERT(op_ == Token::ADD);
2533
2534 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2535 result_type_ == TRBinaryOpIC::SMI) {
2536 // Only allow smi results.
2537 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2538 } else {
2539 // Allow heap number result and don't make a transition if a heap number
2540 // cannot be allocated.
2541 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2542 }
2543
2544 // Code falls through if the result is not returned as either a smi or heap
2545 // number.
2546 GenerateTypeTransition(masm);
2547
2548 __ bind(&call_runtime);
2549 GenerateCallRuntime(masm);
2550 }
2551
2552
2553 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2554 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
2555 ASSERT(op_ == Token::ADD);
2556 // Try to add arguments as strings, otherwise, transition to the generic
2557 // TRBinaryOpIC type.
2558 GenerateAddStrings(masm);
2559 GenerateTypeTransition(masm);
2560 }
2561
2562
2563 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2564 ASSERT(op_ == Token::ADD);
2565
2566 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2567
2568 GenerateTypeTransition(masm);
2569 }
2570
2571
2572 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2573 ASSERT(op_ == Token::ADD);
2574
2575 Register scratch1 = r7;
2576 Register scratch2 = r9;
2577
2578 Label not_number, call_runtime;
2579 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2580
2581 Register heap_number_map = r6;
2582 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2583
2584 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on
2585 // whether VFP3 is available.
2586 FloatingPointHelper::Destination destination =
2587 CpuFeatures::IsSupported(VFP3) ?
2588 FloatingPointHelper::kVFPRegisters :
2589 FloatingPointHelper::kCoreRegisters;
2590 FloatingPointHelper::LoadOperands(masm,
2591 destination,
2592 heap_number_map,
2593 scratch1,
2594 scratch2,
2595 &not_number);
2596 if (destination == FloatingPointHelper::kVFPRegisters) {
2597 // Use floating point instructions for the binary operation.
2598 CpuFeatures::Scope scope(VFP3);
2599 __ vadd(d5, d6, d7);
2600
2601 // Get a heap number object for the result - might be left or right if one
2602 // of these are overwritable.
2603 GenerateHeapResultAllocation(
2604 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2605
2606 // Fill the result into the allocated heap number and return.
2607 __ sub(r0, r4, Operand(kHeapObjectTag));
2608 __ vstr(d5, r0, HeapNumber::kValueOffset);
2609 __ add(r0, r0, Operand(kHeapObjectTag));
2610 __ Ret();
2611
2612 } else {
2613 // Call a C function for the binary operation.
2614 // r0/r1: Left operand
2615 // r2/r3: Right operand
2616
2617 // Get a heap number object for the result - might be left or right if one
2618 // of these are overwritable. Uses a callee-save register to keep the value
2619 // across the c call.
2620 GenerateHeapResultAllocation(
2621 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2622
2623 __ push(lr); // For returning later (no GC after this point).
2624 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments.
2625 // Call C routine that may not cause GC or other trouble. r4 is callee
2626 // saved.
2627 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2628
2629 // Fill the result into the allocated heap number.
2630 #if !defined(USE_ARM_EABI)
2631 // Double returned in fp coprocessor register 0 and 1, encoded as
2632 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2633 // need to substract the tag from r5.
2634 __ sub(scratch1, r4, Operand(kHeapObjectTag));
2635 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2636 #else
2637 // Double returned in registers 0 and 1.
2638 __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
2639 #endif
2640 __ mov(r0, Operand(r4));
2641 __ pop(pc); // Return to the pushed lr.
2642 }
2643
2644 __ bind(&not_number);
2645 GenerateTypeTransition(masm);
2646
2647 __ bind(&call_runtime);
2648 GenerateCallRuntime(masm);
2649 }
2650
2651
2652 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2653 ASSERT(op_ == Token::ADD);
2654
2655 Label call_runtime;
2656
2657 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2658
2659 // If all else fails, use the runtime system to get the correct
2660 // result.
2661 __ bind(&call_runtime);
2662
2663 // Try to add strings before calling runtime.
2664 GenerateAddStrings(masm);
2665
2666 GenericBinaryOpStub stub(op_, mode_, r1, r0);
2667 __ TailCallStub(&stub);
2668 }
2669
2670
2671 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2672 Register left = r1;
2673 Register right = r0;
2674 Label call_runtime;
2675
2676 // Check if first argument is a string.
2677 __ BranchOnSmi(left, &call_runtime);
2678 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2679 __ b(ge, &call_runtime);
2680
2681 // First argument is a a string, test second.
2682 __ BranchOnSmi(right, &call_runtime);
2683 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2684 __ b(ge, &call_runtime);
2685
2686 // First and second argument are strings.
2687 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2688 GenerateRegisterArgsPush(masm);
2689 __ TailCallStub(&string_add_stub);
2690
2691 // At least one argument is not a string.
2692 __ bind(&call_runtime);
2693 }
2694
2695
2696 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
2697 switch (op_) {
2698 case Token::ADD:
2699 GenerateRegisterArgsPush(masm);
2700 __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
2701 break;
2702 default:
2703 UNREACHABLE();
2704 }
2705 }
2706
2707
2708 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2709 MacroAssembler* masm,
2710 Register result,
2711 Register heap_number_map,
2712 Register scratch1,
2713 Register scratch2,
2714 Label* gc_required) {
2715
2716 // Code below will scratch result if allocation fails. To keep both arguments
2717 // intact for the runtime call result cannot be one of these.
2718 ASSERT(!result.is(r0) && !result.is(r1));
2719
2720 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
2721 Label skip_allocation, allocated;
2722 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
2723 // If the overwritable operand is already an object, we skip the
2724 // allocation of a heap number.
2725 __ BranchOnNotSmi(overwritable_operand, &skip_allocation);
2726 // Allocate a heap number for the result.
2727 __ AllocateHeapNumber(
2728 result, scratch1, scratch2, heap_number_map, gc_required);
2729 __ b(&allocated);
2730 __ bind(&skip_allocation);
2731 // Use object holding the overwritable operand for result.
2732 __ mov(result, Operand(overwritable_operand));
2733 __ bind(&allocated);
2734 } else {
2735 ASSERT(mode_ == NO_OVERWRITE);
2736 __ AllocateHeapNumber(
2737 result, scratch1, scratch2, heap_number_map, gc_required);
2738 }
2739 }
2740
2741
2742 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2743 __ Push(r1, r0);
2211 } 2744 }
2212 2745
2213 2746
2214 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2747 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2215 // Argument is a number and is on stack and in r0. 2748 // Argument is a number and is on stack and in r0.
2216 Label runtime_call; 2749 Label runtime_call;
2217 Label input_not_smi; 2750 Label input_not_smi;
2218 Label loaded; 2751 Label loaded;
2219 2752
2220 if (CpuFeatures::IsSupported(VFP3)) { 2753 if (CpuFeatures::IsSupported(VFP3)) {
(...skipping 2865 matching lines...) Expand 10 before | Expand all | Expand 10 after
5086 __ pop(r1); 5619 __ pop(r1);
5087 __ Jump(r2); 5620 __ Jump(r2);
5088 } 5621 }
5089 5622
5090 5623
5091 #undef __ 5624 #undef __
5092 5625
5093 } } // namespace v8::internal 5626 } } // namespace v8::internal
5094 5627
5095 #endif // V8_TARGET_ARCH_ARM 5628 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698