Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 6062002: Merge 6006:6095 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 10 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1348 matching lines...) Expand 10 before | Expand all | Expand 10 after
1359 Label* slow, 1359 Label* slow,
1360 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 1360 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1361 // 1. Move arguments into edx, eax except for DIV and MOD, which need the 1361 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1362 // dividend in eax and edx free for the division. Use eax, ebx for those. 1362 // dividend in eax and edx free for the division. Use eax, ebx for those.
1363 Comment load_comment(masm, "-- Load arguments"); 1363 Comment load_comment(masm, "-- Load arguments");
1364 Register left = edx; 1364 Register left = edx;
1365 Register right = eax; 1365 Register right = eax;
1366 if (op_ == Token::DIV || op_ == Token::MOD) { 1366 if (op_ == Token::DIV || op_ == Token::MOD) {
1367 left = eax; 1367 left = eax;
1368 right = ebx; 1368 right = ebx;
1369 __ mov(ebx, eax); 1369 __ mov(ebx, eax);
1370 __ mov(eax, edx); 1370 __ mov(eax, edx);
1371 } 1371 }
1372 1372
1373 1373
1374 // 2. Prepare the smi check of both operands by oring them together. 1374 // 2. Prepare the smi check of both operands by oring them together.
1375 Comment smi_check_comment(masm, "-- Smi check arguments"); 1375 Comment smi_check_comment(masm, "-- Smi check arguments");
1376 Label not_smis; 1376 Label not_smis;
1377 Register combined = ecx; 1377 Register combined = ecx;
1378 ASSERT(!left.is(combined) && !right.is(combined)); 1378 ASSERT(!left.is(combined) && !right.is(combined));
1379 switch (op_) { 1379 switch (op_) {
1380 case Token::BIT_OR: 1380 case Token::BIT_OR:
(...skipping 1084 matching lines...) Expand 10 before | Expand all | Expand 10 after
2465 2465
2466 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 2466 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2467 __ pop(ecx); 2467 __ pop(ecx);
2468 __ push(edx); 2468 __ push(edx);
2469 __ push(eax); 2469 __ push(eax);
2470 __ push(ecx); 2470 __ push(ecx);
2471 } 2471 }
2472 2472
2473 2473
2474 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2474 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2475 // Input on stack: 2475 // TAGGED case:
2476 // esp[4]: argument (should be number). 2476 // Input:
2477 // esp[0]: return address. 2477 // esp[4]: tagged number input argument (should be number).
2478 // Test that eax is a number. 2478 // esp[0]: return address.
2479 // Output:
2480 // eax: tagged double result.
2481 // UNTAGGED case:
2482 // Input::
2483 // esp[0]: return address.
2484 // xmm1: untagged double input argument
2485 // Output:
2486 // xmm1: untagged double result.
2487
2479 Label runtime_call; 2488 Label runtime_call;
2480 Label runtime_call_clear_stack; 2489 Label runtime_call_clear_stack;
2481 NearLabel input_not_smi; 2490 Label skip_cache;
2482 NearLabel loaded; 2491 const bool tagged = (argument_type_ == TAGGED);
2483 __ mov(eax, Operand(esp, kPointerSize)); 2492 if (tagged) {
2484 __ test(eax, Immediate(kSmiTagMask)); 2493 // Test that eax is a number.
2485 __ j(not_zero, &input_not_smi); 2494 NearLabel input_not_smi;
2486 // Input is a smi. Untag and load it onto the FPU stack. 2495 NearLabel loaded;
2487 // Then load the low and high words of the double into ebx, edx. 2496 __ mov(eax, Operand(esp, kPointerSize));
2488 STATIC_ASSERT(kSmiTagSize == 1); 2497 __ test(eax, Immediate(kSmiTagMask));
2489 __ sar(eax, 1); 2498 __ j(not_zero, &input_not_smi);
2490 __ sub(Operand(esp), Immediate(2 * kPointerSize)); 2499 // Input is a smi. Untag and load it onto the FPU stack.
2491 __ mov(Operand(esp, 0), eax); 2500 // Then load the low and high words of the double into ebx, edx.
2492 __ fild_s(Operand(esp, 0)); 2501 STATIC_ASSERT(kSmiTagSize == 1);
2493 __ fst_d(Operand(esp, 0)); 2502 __ sar(eax, 1);
2494 __ pop(edx); 2503 __ sub(Operand(esp), Immediate(2 * kPointerSize));
2495 __ pop(ebx); 2504 __ mov(Operand(esp, 0), eax);
2496 __ jmp(&loaded); 2505 __ fild_s(Operand(esp, 0));
2497 __ bind(&input_not_smi); 2506 __ fst_d(Operand(esp, 0));
2498 // Check if input is a HeapNumber. 2507 __ pop(edx);
2499 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2508 __ pop(ebx);
2500 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); 2509 __ jmp(&loaded);
2501 __ j(not_equal, &runtime_call); 2510 __ bind(&input_not_smi);
2502 // Input is a HeapNumber. Push it on the FPU stack and load its 2511 // Check if input is a HeapNumber.
2503 // low and high words into ebx, edx. 2512 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2504 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2513 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
2505 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); 2514 __ j(not_equal, &runtime_call);
2506 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); 2515 // Input is a HeapNumber. Push it on the FPU stack and load its
2516 // low and high words into ebx, edx.
2517 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2518 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2519 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2507 2520
2508 __ bind(&loaded); 2521 __ bind(&loaded);
2509 // ST[0] == double value 2522 } else { // UNTAGGED.
2523 if (CpuFeatures::IsSupported(SSE4_1)) {
2524 CpuFeatures::Scope sse4_scope(SSE4_1);
2525 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
2526 } else {
2527 __ pshufd(xmm0, xmm1, 0x1);
2528 __ movd(Operand(edx), xmm0);
2529 }
2530 __ movd(Operand(ebx), xmm1);
2531 }
2532
2533 // ST[0] or xmm1 == double value
2510 // ebx = low 32 bits of double value 2534 // ebx = low 32 bits of double value
2511 // edx = high 32 bits of double value 2535 // edx = high 32 bits of double value
2512 // Compute hash (the shifts are arithmetic): 2536 // Compute hash (the shifts are arithmetic):
2513 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 2537 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2514 __ mov(ecx, ebx); 2538 __ mov(ecx, ebx);
2515 __ xor_(ecx, Operand(edx)); 2539 __ xor_(ecx, Operand(edx));
2516 __ mov(eax, ecx); 2540 __ mov(eax, ecx);
2517 __ sar(eax, 16); 2541 __ sar(eax, 16);
2518 __ xor_(ecx, Operand(eax)); 2542 __ xor_(ecx, Operand(eax));
2519 __ mov(eax, ecx); 2543 __ mov(eax, ecx);
2520 __ sar(eax, 8); 2544 __ sar(eax, 8);
2521 __ xor_(ecx, Operand(eax)); 2545 __ xor_(ecx, Operand(eax));
2522 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); 2546 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
2523 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); 2547 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
2524 2548
2525 // ST[0] == double value. 2549 // ST[0] or xmm1 == double value.
2526 // ebx = low 32 bits of double value. 2550 // ebx = low 32 bits of double value.
2527 // edx = high 32 bits of double value. 2551 // edx = high 32 bits of double value.
2528 // ecx = TranscendentalCache::hash(double value). 2552 // ecx = TranscendentalCache::hash(double value).
2529 __ mov(eax, 2553 __ mov(eax,
2530 Immediate(ExternalReference::transcendental_cache_array_address())); 2554 Immediate(ExternalReference::transcendental_cache_array_address()));
2531 // Eax points to cache array. 2555 // Eax points to cache array.
2532 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); 2556 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
2533 // Eax points to the cache for the type type_. 2557 // Eax points to the cache for the type type_.
2534 // If NULL, the cache hasn't been initialized yet, so go through runtime. 2558 // If NULL, the cache hasn't been initialized yet, so go through runtime.
2535 __ test(eax, Operand(eax)); 2559 __ test(eax, Operand(eax));
(...skipping 16 matching lines...) Expand all
2552 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); 2576 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2553 __ lea(ecx, Operand(eax, ecx, times_4, 0)); 2577 __ lea(ecx, Operand(eax, ecx, times_4, 0));
2554 // Check if cache matches: Double value is stored in uint32_t[2] array. 2578 // Check if cache matches: Double value is stored in uint32_t[2] array.
2555 NearLabel cache_miss; 2579 NearLabel cache_miss;
2556 __ cmp(ebx, Operand(ecx, 0)); 2580 __ cmp(ebx, Operand(ecx, 0));
2557 __ j(not_equal, &cache_miss); 2581 __ j(not_equal, &cache_miss);
2558 __ cmp(edx, Operand(ecx, kIntSize)); 2582 __ cmp(edx, Operand(ecx, kIntSize));
2559 __ j(not_equal, &cache_miss); 2583 __ j(not_equal, &cache_miss);
2560 // Cache hit! 2584 // Cache hit!
2561 __ mov(eax, Operand(ecx, 2 * kIntSize)); 2585 __ mov(eax, Operand(ecx, 2 * kIntSize));
2562 __ fstp(0); 2586 if (tagged) {
2563 __ ret(kPointerSize); 2587 __ fstp(0);
2588 __ ret(kPointerSize);
2589 } else { // UNTAGGED.
2590 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2591 __ Ret();
2592 }
2564 2593
2565 __ bind(&cache_miss); 2594 __ bind(&cache_miss);
2566 // Update cache with new value. 2595 // Update cache with new value.
2567 // We are short on registers, so use no_reg as scratch. 2596 // We are short on registers, so use no_reg as scratch.
2568 // This gives slightly larger code. 2597 // This gives slightly larger code.
2569 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); 2598 if (tagged) {
2599 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2600 } else { // UNTAGGED.
2601 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2602 __ sub(Operand(esp), Immediate(kDoubleSize));
2603 __ movdbl(Operand(esp, 0), xmm1);
2604 __ fld_d(Operand(esp, 0));
2605 __ add(Operand(esp), Immediate(kDoubleSize));
2606 }
2570 GenerateOperation(masm); 2607 GenerateOperation(masm);
2571 __ mov(Operand(ecx, 0), ebx); 2608 __ mov(Operand(ecx, 0), ebx);
2572 __ mov(Operand(ecx, kIntSize), edx); 2609 __ mov(Operand(ecx, kIntSize), edx);
2573 __ mov(Operand(ecx, 2 * kIntSize), eax); 2610 __ mov(Operand(ecx, 2 * kIntSize), eax);
2574 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2611 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2575 __ ret(kPointerSize); 2612 if (tagged) {
2613 __ ret(kPointerSize);
2614 } else { // UNTAGGED.
2615 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2616 __ Ret();
2576 2617
2577 __ bind(&runtime_call_clear_stack); 2618 // Skip cache and return answer directly, only in untagged case.
2578 __ fstp(0); 2619 __ bind(&skip_cache);
2579 __ bind(&runtime_call); 2620 __ sub(Operand(esp), Immediate(kDoubleSize));
2580 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); 2621 __ movdbl(Operand(esp, 0), xmm1);
2622 __ fld_d(Operand(esp, 0));
2623 GenerateOperation(masm);
2624 __ fstp_d(Operand(esp, 0));
2625 __ movdbl(xmm1, Operand(esp, 0));
2626 __ add(Operand(esp), Immediate(kDoubleSize));
2627 // We return the value in xmm1 without adding it to the cache, but
2628 // we cause a scavenging GC so that future allocations will succeed.
2629 __ EnterInternalFrame();
2630 // Allocate an unused object bigger than a HeapNumber.
2631 __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2632 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2633 __ LeaveInternalFrame();
2634 __ Ret();
2635 }
2636
2637 // Call runtime, doing whatever allocation and cleanup is necessary.
2638 if (tagged) {
2639 __ bind(&runtime_call_clear_stack);
2640 __ fstp(0);
2641 __ bind(&runtime_call);
2642 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
2643 } else { // UNTAGGED.
2644 __ bind(&runtime_call_clear_stack);
2645 __ bind(&runtime_call);
2646 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2647 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2648 __ EnterInternalFrame();
2649 __ push(eax);
2650 __ CallRuntime(RuntimeFunction(), 1);
2651 __ LeaveInternalFrame();
2652 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2653 __ Ret();
2654 }
2581 } 2655 }
2582 2656
2583 2657
2584 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 2658 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2585 switch (type_) { 2659 switch (type_) {
2586 // Add more cases when necessary.
2587 case TranscendentalCache::SIN: return Runtime::kMath_sin; 2660 case TranscendentalCache::SIN: return Runtime::kMath_sin;
2588 case TranscendentalCache::COS: return Runtime::kMath_cos; 2661 case TranscendentalCache::COS: return Runtime::kMath_cos;
2589 case TranscendentalCache::LOG: return Runtime::kMath_log; 2662 case TranscendentalCache::LOG: return Runtime::kMath_log;
2590 default: 2663 default:
2591 UNIMPLEMENTED(); 2664 UNIMPLEMENTED();
2592 return Runtime::kAbort; 2665 return Runtime::kAbort;
2593 } 2666 }
2594 } 2667 }
2595 2668
2596 2669
2597 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { 2670 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2598 // Only free register is edi. 2671 // Only free register is edi.
2599 // Input value is on FP stack, and also in ebx/edx. Address of result 2672 // Input value is on FP stack, and also in ebx/edx.
2600 // (a newly allocated HeapNumber) is in eax. 2673 // Input value is possibly in xmm1.
2601 NearLabel done; 2674 // Address of result (a newly allocated HeapNumber) may be in eax.
2602 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) { 2675 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2603 // Both fsin and fcos require arguments in the range +/-2^63 and 2676 // Both fsin and fcos require arguments in the range +/-2^63 and
2604 // return NaN for infinities and NaN. They can share all code except 2677 // return NaN for infinities and NaN. They can share all code except
2605 // the actual fsin/fcos operation. 2678 // the actual fsin/fcos operation.
2606 NearLabel in_range; 2679 NearLabel in_range, done;
2607 // If argument is outside the range -2^63..2^63, fsin/cos doesn't 2680 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2608 // work. We must reduce it to the appropriate range. 2681 // work. We must reduce it to the appropriate range.
2609 __ mov(edi, edx); 2682 __ mov(edi, edx);
2610 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. 2683 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
2611 int supported_exponent_limit = 2684 int supported_exponent_limit =
2612 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; 2685 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2613 __ cmp(Operand(edi), Immediate(supported_exponent_limit)); 2686 __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2614 __ j(below, &in_range, taken); 2687 __ j(below, &in_range, taken);
2615 // Check for infinity and NaN. Both return NaN for sin. 2688 // Check for infinity and NaN. Both return NaN for sin.
2616 __ cmp(Operand(edi), Immediate(0x7ff00000)); 2689 __ cmp(Operand(edi), Immediate(0x7ff00000));
(...skipping 2281 matching lines...) Expand 10 before | Expand all | Expand 10 after
4898 __ pop(edi); 4971 __ pop(edi);
4899 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers 4972 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
4900 4973
4901 // Restore frame pointer and return. 4974 // Restore frame pointer and return.
4902 __ pop(ebp); 4975 __ pop(ebp);
4903 __ ret(0); 4976 __ ret(0);
4904 } 4977 }
4905 4978
4906 4979
4907 void InstanceofStub::Generate(MacroAssembler* masm) { 4980 void InstanceofStub::Generate(MacroAssembler* masm) {
4908 // Get the object - go slow case if it's a smi. 4981 // Fixed register usage throughout the stub.
4909 Label slow; 4982 Register object = eax; // Object (lhs).
4910 __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function 4983 Register map = ebx; // Map of the object.
4911 __ test(eax, Immediate(kSmiTagMask)); 4984 Register function = edx; // Function (rhs).
4912 __ j(zero, &slow, not_taken); 4985 Register prototype = edi; // Prototype of the function.
4986 Register scratch = ecx;
4987
4988 // Get the object and function - they are always both needed.
4989 Label slow, not_js_object;
4990 if (!args_in_registers()) {
4991 __ mov(object, Operand(esp, 2 * kPointerSize));
4992 __ mov(function, Operand(esp, 1 * kPointerSize));
4993 }
4913 4994
4914 // Check that the left hand is a JS object. 4995 // Check that the left hand is a JS object.
4915 __ IsObjectJSObjectType(eax, eax, edx, &slow); 4996 __ test(object, Immediate(kSmiTagMask));
4916 4997 __ j(zero, &not_js_object, not_taken);
4917 // Get the prototype of the function. 4998 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4918 __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
4919 // edx is function, eax is map.
4920 4999
4921 // Look up the function and the map in the instanceof cache. 5000 // Look up the function and the map in the instanceof cache.
4922 NearLabel miss; 5001 NearLabel miss;
4923 ExternalReference roots_address = ExternalReference::roots_address(); 5002 ExternalReference roots_address = ExternalReference::roots_address();
4924 __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 5003 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4925 __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5004 __ cmp(function,
5005 Operand::StaticArray(scratch, times_pointer_size, roots_address));
4926 __ j(not_equal, &miss); 5006 __ j(not_equal, &miss);
4927 __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); 5007 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4928 __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5008 __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
4929 __ j(not_equal, &miss); 5009 __ j(not_equal, &miss);
4930 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5010 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4931 __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5011 __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
4932 __ ret(2 * kPointerSize); 5012 __ IncrementCounter(&Counters::instance_of_cache, 1);
5013 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4933 5014
4934 __ bind(&miss); 5015 __ bind(&miss);
4935 __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); 5016 // Get the prototype of the function.
5017 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
4936 5018
4937 // Check that the function prototype is a JS object. 5019 // Check that the function prototype is a JS object.
4938 __ test(ebx, Immediate(kSmiTagMask)); 5020 __ test(prototype, Immediate(kSmiTagMask));
4939 __ j(zero, &slow, not_taken); 5021 __ j(zero, &slow, not_taken);
4940 __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); 5022 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4941 5023
4942 // Register mapping: 5024 // Update the golbal instanceof cache with the current map and function. The
4943 // eax is object map. 5025 // cached answer will be set when it is known.
4944 // edx is function. 5026 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4945 // ebx is function prototype. 5027 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
4946 __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); 5028 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4947 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5029 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
4948 __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 5030 function);
4949 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
4950 5031
4951 __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); 5032 // Loop through the prototype chain of the object looking for the function
4952 5033 // prototype.
4953 // Loop through the prototype chain looking for the function prototype. 5034 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
4954 NearLabel loop, is_instance, is_not_instance; 5035 NearLabel loop, is_instance, is_not_instance;
4955 __ bind(&loop); 5036 __ bind(&loop);
4956 __ cmp(ecx, Operand(ebx)); 5037 __ cmp(scratch, Operand(prototype));
4957 __ j(equal, &is_instance); 5038 __ j(equal, &is_instance);
4958 __ cmp(Operand(ecx), Immediate(Factory::null_value())); 5039 __ cmp(Operand(scratch), Immediate(Factory::null_value()));
4959 __ j(equal, &is_not_instance); 5040 __ j(equal, &is_not_instance);
4960 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); 5041 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4961 __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); 5042 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
4962 __ jmp(&loop); 5043 __ jmp(&loop);
4963 5044
4964 __ bind(&is_instance); 5045 __ bind(&is_instance);
5046 __ IncrementCounter(&Counters::instance_of_stub_true, 1);
4965 __ Set(eax, Immediate(0)); 5047 __ Set(eax, Immediate(0));
4966 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5048 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4967 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5049 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
4968 __ ret(2 * kPointerSize); 5050 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4969 5051
4970 __ bind(&is_not_instance); 5052 __ bind(&is_not_instance);
5053 __ IncrementCounter(&Counters::instance_of_stub_false, 1);
4971 __ Set(eax, Immediate(Smi::FromInt(1))); 5054 __ Set(eax, Immediate(Smi::FromInt(1)));
4972 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5055 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4973 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5056 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
4974 __ ret(2 * kPointerSize); 5057 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5058
5059 Label object_not_null, object_not_null_or_smi;
5060 __ bind(&not_js_object);
5061 // Before null, smi and string value checks, check that the rhs is a function
5062 // as for a non-function rhs an exception needs to be thrown.
5063 __ test(function, Immediate(kSmiTagMask));
5064 __ j(zero, &slow, not_taken);
5065 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5066 __ j(not_equal, &slow, not_taken);
5067
5068 // Null is not instance of anything.
5069 __ cmp(object, Factory::null_value());
5070 __ j(not_equal, &object_not_null);
5071 __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
5072 __ Set(eax, Immediate(Smi::FromInt(1)));
5073 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5074
5075 __ bind(&object_not_null);
5076 // Smi values is not instance of anything.
5077 __ test(object, Immediate(kSmiTagMask));
5078 __ j(not_zero, &object_not_null_or_smi, not_taken);
5079 __ Set(eax, Immediate(Smi::FromInt(1)));
5080 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5081
5082 __ bind(&object_not_null_or_smi);
5083 // String values is not instance of anything.
5084 Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5085 __ j(NegateCondition(is_string), &slow);
5086 __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
5087 __ Set(eax, Immediate(Smi::FromInt(1)));
5088 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4975 5089
4976 // Slow-case: Go through the JavaScript implementation. 5090 // Slow-case: Go through the JavaScript implementation.
4977 __ bind(&slow); 5091 __ bind(&slow);
5092 if (args_in_registers()) {
5093 // Push arguments below return address.
5094 __ pop(scratch);
5095 __ push(object);
5096 __ push(function);
5097 __ push(scratch);
5098 }
5099 __ IncrementCounter(&Counters::instance_of_slow, 1);
4978 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 5100 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4979 } 5101 }
4980 5102
4981 5103
4982 int CompareStub::MinorKey() { 5104 int CompareStub::MinorKey() {
4983 // Encode the three parameters in a unique 16 bit value. To avoid duplicate 5105 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4984 // stubs the never NaN NaN condition is only taken into account if the 5106 // stubs the never NaN NaN condition is only taken into account if the
4985 // condition is equals. 5107 // condition is equals.
4986 ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); 5108 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4987 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); 5109 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
(...skipping 1299 matching lines...) Expand 10 before | Expand all | Expand 10 after
6287 // Do a tail call to the rewritten stub. 6409 // Do a tail call to the rewritten stub.
6288 __ jmp(Operand(edi)); 6410 __ jmp(Operand(edi));
6289 } 6411 }
6290 6412
6291 6413
6292 #undef __ 6414 #undef __
6293 6415
6294 } } // namespace v8::internal 6416 } } // namespace v8::internal
6295 6417
6296 #endif // V8_TARGET_ARCH_IA32 6418 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698