Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 6580038: [Isolates] Merge from bleeding_edge, revisions 5934-6100. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1351 matching lines...) Expand 10 before | Expand all | Expand 10 after
1362 Label* slow, 1362 Label* slow,
1363 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 1363 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1364 // 1. Move arguments into edx, eax except for DIV and MOD, which need the 1364 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1365 // dividend in eax and edx free for the division. Use eax, ebx for those. 1365 // dividend in eax and edx free for the division. Use eax, ebx for those.
1366 Comment load_comment(masm, "-- Load arguments"); 1366 Comment load_comment(masm, "-- Load arguments");
1367 Register left = edx; 1367 Register left = edx;
1368 Register right = eax; 1368 Register right = eax;
1369 if (op_ == Token::DIV || op_ == Token::MOD) { 1369 if (op_ == Token::DIV || op_ == Token::MOD) {
1370 left = eax; 1370 left = eax;
1371 right = ebx; 1371 right = ebx;
1372 __ mov(ebx, eax); 1372 __ mov(ebx, eax);
1373 __ mov(eax, edx); 1373 __ mov(eax, edx);
1374 } 1374 }
1375 1375
1376 1376
1377 // 2. Prepare the smi check of both operands by oring them together. 1377 // 2. Prepare the smi check of both operands by oring them together.
1378 Comment smi_check_comment(masm, "-- Smi check arguments"); 1378 Comment smi_check_comment(masm, "-- Smi check arguments");
1379 Label not_smis; 1379 Label not_smis;
1380 Register combined = ecx; 1380 Register combined = ecx;
1381 ASSERT(!left.is(combined) && !right.is(combined)); 1381 ASSERT(!left.is(combined) && !right.is(combined));
1382 switch (op_) { 1382 switch (op_) {
1383 case Token::BIT_OR: 1383 case Token::BIT_OR:
(...skipping 1084 matching lines...) Expand 10 before | Expand all | Expand 10 after
2468 2468
2469 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 2469 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2470 __ pop(ecx); 2470 __ pop(ecx);
2471 __ push(edx); 2471 __ push(edx);
2472 __ push(eax); 2472 __ push(eax);
2473 __ push(ecx); 2473 __ push(ecx);
2474 } 2474 }
2475 2475
2476 2476
2477 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2477 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2478 // Input on stack: 2478 // TAGGED case:
2479 // esp[4]: argument (should be number). 2479 // Input:
2480 // esp[0]: return address. 2480 // esp[4]: tagged number input argument (should be number).
2481 // Test that eax is a number. 2481 // esp[0]: return address.
2482 // Output:
2483 // eax: tagged double result.
2484 // UNTAGGED case:
2485 // Input::
2486 // esp[0]: return address.
2487 // xmm1: untagged double input argument
2488 // Output:
2489 // xmm1: untagged double result.
2490
2482 Label runtime_call; 2491 Label runtime_call;
2483 Label runtime_call_clear_stack; 2492 Label runtime_call_clear_stack;
2484 NearLabel input_not_smi; 2493 Label skip_cache;
2485 NearLabel loaded; 2494 const bool tagged = (argument_type_ == TAGGED);
2486 __ mov(eax, Operand(esp, kPointerSize)); 2495 if (tagged) {
2487 __ test(eax, Immediate(kSmiTagMask)); 2496 // Test that eax is a number.
2488 __ j(not_zero, &input_not_smi); 2497 NearLabel input_not_smi;
2489 // Input is a smi. Untag and load it onto the FPU stack. 2498 NearLabel loaded;
2490 // Then load the low and high words of the double into ebx, edx. 2499 __ mov(eax, Operand(esp, kPointerSize));
2491 STATIC_ASSERT(kSmiTagSize == 1); 2500 __ test(eax, Immediate(kSmiTagMask));
2492 __ sar(eax, 1); 2501 __ j(not_zero, &input_not_smi);
2493 __ sub(Operand(esp), Immediate(2 * kPointerSize)); 2502 // Input is a smi. Untag and load it onto the FPU stack.
2494 __ mov(Operand(esp, 0), eax); 2503 // Then load the low and high words of the double into ebx, edx.
2495 __ fild_s(Operand(esp, 0)); 2504 STATIC_ASSERT(kSmiTagSize == 1);
2496 __ fst_d(Operand(esp, 0)); 2505 __ sar(eax, 1);
2497 __ pop(edx); 2506 __ sub(Operand(esp), Immediate(2 * kPointerSize));
2498 __ pop(ebx); 2507 __ mov(Operand(esp, 0), eax);
2499 __ jmp(&loaded); 2508 __ fild_s(Operand(esp, 0));
2500 __ bind(&input_not_smi); 2509 __ fst_d(Operand(esp, 0));
2501 // Check if input is a HeapNumber. 2510 __ pop(edx);
2502 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2511 __ pop(ebx);
2503 __ cmp(Operand(ebx), Immediate(FACTORY->heap_number_map())); 2512 __ jmp(&loaded);
2504 __ j(not_equal, &runtime_call); 2513 __ bind(&input_not_smi);
2505 // Input is a HeapNumber. Push it on the FPU stack and load its 2514 // Check if input is a HeapNumber.
2506 // low and high words into ebx, edx. 2515 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2507 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2516 __ cmp(Operand(ebx), Immediate(FACTORY->heap_number_map()));
2508 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); 2517 __ j(not_equal, &runtime_call);
2509 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); 2518 // Input is a HeapNumber. Push it on the FPU stack and load its
2519 // low and high words into ebx, edx.
2520 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2521 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2522 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2510 2523
2511 __ bind(&loaded); 2524 __ bind(&loaded);
2512 // ST[0] == double value 2525 } else { // UNTAGGED.
2526 if (Isolate::Current()->cpu_features()->IsSupported(SSE4_1)) {
2527 CpuFeatures::Scope sse4_scope(SSE4_1);
2528 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
2529 } else {
2530 __ pshufd(xmm0, xmm1, 0x1);
2531 __ movd(Operand(edx), xmm0);
2532 }
2533 __ movd(Operand(ebx), xmm1);
2534 }
2535
2536 // ST[0] or xmm1 == double value
2513 // ebx = low 32 bits of double value 2537 // ebx = low 32 bits of double value
2514 // edx = high 32 bits of double value 2538 // edx = high 32 bits of double value
2515 // Compute hash (the shifts are arithmetic): 2539 // Compute hash (the shifts are arithmetic):
2516 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 2540 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2517 __ mov(ecx, ebx); 2541 __ mov(ecx, ebx);
2518 __ xor_(ecx, Operand(edx)); 2542 __ xor_(ecx, Operand(edx));
2519 __ mov(eax, ecx); 2543 __ mov(eax, ecx);
2520 __ sar(eax, 16); 2544 __ sar(eax, 16);
2521 __ xor_(ecx, Operand(eax)); 2545 __ xor_(ecx, Operand(eax));
2522 __ mov(eax, ecx); 2546 __ mov(eax, ecx);
2523 __ sar(eax, 8); 2547 __ sar(eax, 8);
2524 __ xor_(ecx, Operand(eax)); 2548 __ xor_(ecx, Operand(eax));
2525 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 2549 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2526 __ and_(Operand(ecx), 2550 __ and_(Operand(ecx),
2527 Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); 2551 Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2528 2552
2529 // ST[0] == double value. 2553 // ST[0] or xmm1 == double value.
2530 // ebx = low 32 bits of double value. 2554 // ebx = low 32 bits of double value.
2531 // edx = high 32 bits of double value. 2555 // edx = high 32 bits of double value.
2532 // ecx = TranscendentalCache::hash(double value). 2556 // ecx = TranscendentalCache::hash(double value).
2533 __ mov(eax, 2557 __ mov(eax,
2534 Immediate(ExternalReference::transcendental_cache_array_address())); 2558 Immediate(ExternalReference::transcendental_cache_array_address()));
2535 // Eax points to cache array. 2559 // Eax points to cache array.
2536 __ mov(eax, Operand(eax, type_ * sizeof( 2560 __ mov(eax, Operand(eax, type_ * sizeof(
2537 Isolate::Current()->transcendental_cache()->caches_[0]))); 2561 Isolate::Current()->transcendental_cache()->caches_[0])));
2538 // Eax points to the cache for the type type_. 2562 // Eax points to the cache for the type type_.
2539 // If NULL, the cache hasn't been initialized yet, so go through runtime. 2563 // If NULL, the cache hasn't been initialized yet, so go through runtime.
(...skipping 17 matching lines...) Expand all
2557 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); 2581 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2558 __ lea(ecx, Operand(eax, ecx, times_4, 0)); 2582 __ lea(ecx, Operand(eax, ecx, times_4, 0));
2559 // Check if cache matches: Double value is stored in uint32_t[2] array. 2583 // Check if cache matches: Double value is stored in uint32_t[2] array.
2560 NearLabel cache_miss; 2584 NearLabel cache_miss;
2561 __ cmp(ebx, Operand(ecx, 0)); 2585 __ cmp(ebx, Operand(ecx, 0));
2562 __ j(not_equal, &cache_miss); 2586 __ j(not_equal, &cache_miss);
2563 __ cmp(edx, Operand(ecx, kIntSize)); 2587 __ cmp(edx, Operand(ecx, kIntSize));
2564 __ j(not_equal, &cache_miss); 2588 __ j(not_equal, &cache_miss);
2565 // Cache hit! 2589 // Cache hit!
2566 __ mov(eax, Operand(ecx, 2 * kIntSize)); 2590 __ mov(eax, Operand(ecx, 2 * kIntSize));
2567 __ fstp(0); 2591 if (tagged) {
2568 __ ret(kPointerSize); 2592 __ fstp(0);
2593 __ ret(kPointerSize);
2594 } else { // UNTAGGED.
2595 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2596 __ Ret();
2597 }
2569 2598
2570 __ bind(&cache_miss); 2599 __ bind(&cache_miss);
2571 // Update cache with new value. 2600 // Update cache with new value.
2572 // We are short on registers, so use no_reg as scratch. 2601 // We are short on registers, so use no_reg as scratch.
2573 // This gives slightly larger code. 2602 // This gives slightly larger code.
2574 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); 2603 if (tagged) {
2604 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2605 } else { // UNTAGGED.
2606 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2607 __ sub(Operand(esp), Immediate(kDoubleSize));
2608 __ movdbl(Operand(esp, 0), xmm1);
2609 __ fld_d(Operand(esp, 0));
2610 __ add(Operand(esp), Immediate(kDoubleSize));
2611 }
2575 GenerateOperation(masm); 2612 GenerateOperation(masm);
2576 __ mov(Operand(ecx, 0), ebx); 2613 __ mov(Operand(ecx, 0), ebx);
2577 __ mov(Operand(ecx, kIntSize), edx); 2614 __ mov(Operand(ecx, kIntSize), edx);
2578 __ mov(Operand(ecx, 2 * kIntSize), eax); 2615 __ mov(Operand(ecx, 2 * kIntSize), eax);
2579 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2616 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2580 __ ret(kPointerSize); 2617 if (tagged) {
2618 __ ret(kPointerSize);
2619 } else { // UNTAGGED.
2620 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2621 __ Ret();
2581 2622
2582 __ bind(&runtime_call_clear_stack); 2623 // Skip cache and return answer directly, only in untagged case.
2583 __ fstp(0); 2624 __ bind(&skip_cache);
2584 __ bind(&runtime_call); 2625 __ sub(Operand(esp), Immediate(kDoubleSize));
2585 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); 2626 __ movdbl(Operand(esp, 0), xmm1);
2627 __ fld_d(Operand(esp, 0));
2628 GenerateOperation(masm);
2629 __ fstp_d(Operand(esp, 0));
2630 __ movdbl(xmm1, Operand(esp, 0));
2631 __ add(Operand(esp), Immediate(kDoubleSize));
2632 // We return the value in xmm1 without adding it to the cache, but
2633 // we cause a scavenging GC so that future allocations will succeed.
2634 __ EnterInternalFrame();
2635 // Allocate an unused object bigger than a HeapNumber.
2636 __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2637 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2638 __ LeaveInternalFrame();
2639 __ Ret();
2640 }
2641
2642 // Call runtime, doing whatever allocation and cleanup is necessary.
2643 if (tagged) {
2644 __ bind(&runtime_call_clear_stack);
2645 __ fstp(0);
2646 __ bind(&runtime_call);
2647 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
2648 } else { // UNTAGGED.
2649 __ bind(&runtime_call_clear_stack);
2650 __ bind(&runtime_call);
2651 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2652 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2653 __ EnterInternalFrame();
2654 __ push(eax);
2655 __ CallRuntime(RuntimeFunction(), 1);
2656 __ LeaveInternalFrame();
2657 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2658 __ Ret();
2659 }
2586 } 2660 }
2587 2661
2588 2662
2589 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 2663 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2590 switch (type_) { 2664 switch (type_) {
2591 // Add more cases when necessary.
2592 case TranscendentalCache::SIN: return Runtime::kMath_sin; 2665 case TranscendentalCache::SIN: return Runtime::kMath_sin;
2593 case TranscendentalCache::COS: return Runtime::kMath_cos; 2666 case TranscendentalCache::COS: return Runtime::kMath_cos;
2594 case TranscendentalCache::LOG: return Runtime::kMath_log; 2667 case TranscendentalCache::LOG: return Runtime::kMath_log;
2595 default: 2668 default:
2596 UNIMPLEMENTED(); 2669 UNIMPLEMENTED();
2597 return Runtime::kAbort; 2670 return Runtime::kAbort;
2598 } 2671 }
2599 } 2672 }
2600 2673
2601 2674
2602 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { 2675 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2603 // Only free register is edi. 2676 // Only free register is edi.
2604 // Input value is on FP stack, and also in ebx/edx. Address of result 2677 // Input value is on FP stack, and also in ebx/edx.
2605 // (a newly allocated HeapNumber) is in eax. 2678 // Input value is possibly in xmm1.
2606 NearLabel done; 2679 // Address of result (a newly allocated HeapNumber) may be in eax.
2607 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) { 2680 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2608 // Both fsin and fcos require arguments in the range +/-2^63 and 2681 // Both fsin and fcos require arguments in the range +/-2^63 and
2609 // return NaN for infinities and NaN. They can share all code except 2682 // return NaN for infinities and NaN. They can share all code except
2610 // the actual fsin/fcos operation. 2683 // the actual fsin/fcos operation.
2611 NearLabel in_range; 2684 NearLabel in_range, done;
2612 // If argument is outside the range -2^63..2^63, fsin/cos doesn't 2685 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2613 // work. We must reduce it to the appropriate range. 2686 // work. We must reduce it to the appropriate range.
2614 __ mov(edi, edx); 2687 __ mov(edi, edx);
2615 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. 2688 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
2616 int supported_exponent_limit = 2689 int supported_exponent_limit =
2617 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; 2690 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2618 __ cmp(Operand(edi), Immediate(supported_exponent_limit)); 2691 __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2619 __ j(below, &in_range, taken); 2692 __ j(below, &in_range, taken);
2620 // Check for infinity and NaN. Both return NaN for sin. 2693 // Check for infinity and NaN. Both return NaN for sin.
2621 __ cmp(Operand(edi), Immediate(0x7ff00000)); 2694 __ cmp(Operand(edi), Immediate(0x7ff00000));
(...skipping 2285 matching lines...) Expand 10 before | Expand all | Expand 10 after
4907 __ pop(edi); 4980 __ pop(edi);
4908 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers 4981 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
4909 4982
4910 // Restore frame pointer and return. 4983 // Restore frame pointer and return.
4911 __ pop(ebp); 4984 __ pop(ebp);
4912 __ ret(0); 4985 __ ret(0);
4913 } 4986 }
4914 4987
4915 4988
4916 void InstanceofStub::Generate(MacroAssembler* masm) { 4989 void InstanceofStub::Generate(MacroAssembler* masm) {
4917 // Get the object - go slow case if it's a smi. 4990 // Fixed register usage throughout the stub.
4918 Label slow; 4991 Register object = eax; // Object (lhs).
4919 __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function 4992 Register map = ebx; // Map of the object.
4920 __ test(eax, Immediate(kSmiTagMask)); 4993 Register function = edx; // Function (rhs).
4921 __ j(zero, &slow, not_taken); 4994 Register prototype = edi; // Prototype of the function.
4995 Register scratch = ecx;
4996
4997 // Get the object and function - they are always both needed.
4998 Label slow, not_js_object;
4999 if (!args_in_registers()) {
5000 __ mov(object, Operand(esp, 2 * kPointerSize));
5001 __ mov(function, Operand(esp, 1 * kPointerSize));
5002 }
4922 5003
4923 // Check that the left hand is a JS object. 5004 // Check that the left hand is a JS object.
4924 __ IsObjectJSObjectType(eax, eax, edx, &slow); 5005 __ test(object, Immediate(kSmiTagMask));
4925 5006 __ j(zero, &not_js_object, not_taken);
4926 // Get the prototype of the function. 5007 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4927 __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
4928 // edx is function, eax is map.
4929 5008
4930 // Look up the function and the map in the instanceof cache. 5009 // Look up the function and the map in the instanceof cache.
4931 NearLabel miss; 5010 NearLabel miss;
4932 ExternalReference roots_address = ExternalReference::roots_address(); 5011 ExternalReference roots_address = ExternalReference::roots_address();
4933 __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 5012 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4934 __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5013 __ cmp(function,
5014 Operand::StaticArray(scratch, times_pointer_size, roots_address));
4935 __ j(not_equal, &miss); 5015 __ j(not_equal, &miss);
4936 __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); 5016 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4937 __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5017 __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
4938 __ j(not_equal, &miss); 5018 __ j(not_equal, &miss);
4939 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5019 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4940 __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); 5020 __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
4941 __ ret(2 * kPointerSize); 5021 __ IncrementCounter(COUNTERS->instance_of_cache(), 1);
5022 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4942 5023
4943 __ bind(&miss); 5024 __ bind(&miss);
4944 __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); 5025 // Get the prototype of the function.
5026 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
4945 5027
4946 // Check that the function prototype is a JS object. 5028 // Check that the function prototype is a JS object.
4947 __ test(ebx, Immediate(kSmiTagMask)); 5029 __ test(prototype, Immediate(kSmiTagMask));
4948 __ j(zero, &slow, not_taken); 5030 __ j(zero, &slow, not_taken);
4949 __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); 5031 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4950 5032
4951 // Register mapping: 5033 // Update the golbal instanceof cache with the current map and function. The
4952 // eax is object map. 5034 // cached answer will be set when it is known.
4953 // edx is function. 5035 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4954 // ebx is function prototype. 5036 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
4955 __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); 5037 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4956 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5038 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
4957 __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 5039 function);
4958 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
4959 5040
4960 __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); 5041 // Loop through the prototype chain of the object looking for the function
4961 5042 // prototype.
4962 // Loop through the prototype chain looking for the function prototype. 5043 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
4963 NearLabel loop, is_instance, is_not_instance; 5044 NearLabel loop, is_instance, is_not_instance;
4964 __ bind(&loop); 5045 __ bind(&loop);
4965 __ cmp(ecx, Operand(ebx)); 5046 __ cmp(scratch, Operand(prototype));
4966 __ j(equal, &is_instance); 5047 __ j(equal, &is_instance);
4967 __ cmp(Operand(ecx), Immediate(FACTORY->null_value())); 5048 __ cmp(Operand(scratch), Immediate(FACTORY->null_value()));
4968 __ j(equal, &is_not_instance); 5049 __ j(equal, &is_not_instance);
4969 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); 5050 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4970 __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); 5051 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
4971 __ jmp(&loop); 5052 __ jmp(&loop);
4972 5053
4973 __ bind(&is_instance); 5054 __ bind(&is_instance);
5055 __ IncrementCounter(COUNTERS->instance_of_stub_true(), 1);
4974 __ Set(eax, Immediate(0)); 5056 __ Set(eax, Immediate(0));
4975 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5057 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4976 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5058 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
4977 __ ret(2 * kPointerSize); 5059 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4978 5060
4979 __ bind(&is_not_instance); 5061 __ bind(&is_not_instance);
5062 __ IncrementCounter(COUNTERS->instance_of_stub_false(), 1);
4980 __ Set(eax, Immediate(Smi::FromInt(1))); 5063 __ Set(eax, Immediate(Smi::FromInt(1)));
4981 __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 5064 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4982 __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); 5065 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
4983 __ ret(2 * kPointerSize); 5066 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5067
5068 Label object_not_null, object_not_null_or_smi;
5069 __ bind(&not_js_object);
5070 // Before null, smi and string value checks, check that the rhs is a function
5071 // as for a non-function rhs an exception needs to be thrown.
5072 __ test(function, Immediate(kSmiTagMask));
5073 __ j(zero, &slow, not_taken);
5074 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5075 __ j(not_equal, &slow, not_taken);
5076
5077 // Null is not instance of anything.
5078 __ cmp(object, FACTORY->null_value());
5079 __ j(not_equal, &object_not_null);
5080 __ IncrementCounter(COUNTERS->instance_of_stub_false_null(), 1);
5081 __ Set(eax, Immediate(Smi::FromInt(1)));
5082 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5083
5084 __ bind(&object_not_null);
5085 // Smi values is not instance of anything.
5086 __ test(object, Immediate(kSmiTagMask));
5087 __ j(not_zero, &object_not_null_or_smi, not_taken);
5088 __ Set(eax, Immediate(Smi::FromInt(1)));
5089 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5090
5091 __ bind(&object_not_null_or_smi);
5092 // String values is not instance of anything.
5093 Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5094 __ j(NegateCondition(is_string), &slow);
5095 __ IncrementCounter(COUNTERS->instance_of_stub_false_string(), 1);
5096 __ Set(eax, Immediate(Smi::FromInt(1)));
5097 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
4984 5098
4985 // Slow-case: Go through the JavaScript implementation. 5099 // Slow-case: Go through the JavaScript implementation.
4986 __ bind(&slow); 5100 __ bind(&slow);
5101 if (args_in_registers()) {
5102 // Push arguments below return address.
5103 __ pop(scratch);
5104 __ push(object);
5105 __ push(function);
5106 __ push(scratch);
5107 }
5108 __ IncrementCounter(COUNTERS->instance_of_slow(), 1);
4987 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 5109 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4988 } 5110 }
4989 5111
4990 5112
4991 int CompareStub::MinorKey() { 5113 int CompareStub::MinorKey() {
4992 // Encode the three parameters in a unique 16 bit value. To avoid duplicate 5114 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4993 // stubs the never NaN NaN condition is only taken into account if the 5115 // stubs the never NaN NaN condition is only taken into account if the
4994 // condition is equals. 5116 // condition is equals.
4995 ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); 5117 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4996 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); 5118 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
(...skipping 1301 matching lines...) Expand 10 before | Expand all | Expand 10 after
6298 // Do a tail call to the rewritten stub. 6420 // Do a tail call to the rewritten stub.
6299 __ jmp(Operand(edi)); 6421 __ jmp(Operand(edi));
6300 } 6422 }
6301 6423
6302 6424
6303 #undef __ 6425 #undef __
6304 6426
6305 } } // namespace v8::internal 6427 } } // namespace v8::internal
6306 6428
6307 #endif // V8_TARGET_ARCH_IA32 6429 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698