Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Side by Side Diff: src/mips/stub-cache-mips.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/regexp-macro-assembler-mips.cc ('k') | src/mksnapshot.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after
547 } 547 }
548 } 548 }
549 549
550 550
551 // Undoes the effects of ReserveSpaceForFastApiCall. 551 // Undoes the effects of ReserveSpaceForFastApiCall.
552 static void FreeSpaceForFastApiCall(MacroAssembler* masm) { 552 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
553 __ Drop(kFastApiCallArguments); 553 __ Drop(kFastApiCallArguments);
554 } 554 }
555 555
556 556
557 static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, 557 static MaybeObject* GenerateFastApiDirectCall(
558 const CallOptimization& optimization, 558 MacroAssembler* masm,
559 int argc) { 559 const CallOptimization& optimization,
560 int argc) {
560 // ----------- S t a t e ------------- 561 // ----------- S t a t e -------------
561 // -- sp[0] : holder (set by CheckPrototypes) 562 // -- sp[0] : holder (set by CheckPrototypes)
562 // -- sp[4] : callee js function 563 // -- sp[4] : callee js function
563 // -- sp[8] : call data 564 // -- sp[8] : call data
564 // -- sp[12] : last js argument 565 // -- sp[12] : last js argument
565 // -- ... 566 // -- ...
566 // -- sp[(argc + 3) * 4] : first js argument 567 // -- sp[(argc + 3) * 4] : first js argument
567 // -- sp[(argc + 4) * 4] : receiver 568 // -- sp[(argc + 4) * 4] : receiver
568 // ----------------------------------- 569 // -----------------------------------
569 // Get the function and setup the context. 570 // Get the function and setup the context.
(...skipping 18 matching lines...) Expand all
588 // a2 points to call data as expected by Arguments 589 // a2 points to call data as expected by Arguments
589 // (refer to layout above). 590 // (refer to layout above).
590 __ Addu(a2, sp, Operand(2 * kPointerSize)); 591 __ Addu(a2, sp, Operand(2 * kPointerSize));
591 592
592 Object* callback = optimization.api_call_info()->callback(); 593 Object* callback = optimization.api_call_info()->callback();
593 Address api_function_address = v8::ToCData<Address>(callback); 594 Address api_function_address = v8::ToCData<Address>(callback);
594 ApiFunction fun(api_function_address); 595 ApiFunction fun(api_function_address);
595 596
596 const int kApiStackSpace = 4; 597 const int kApiStackSpace = 4;
597 598
599 FrameScope frame_scope(masm, StackFrame::MANUAL);
598 __ EnterExitFrame(false, kApiStackSpace); 600 __ EnterExitFrame(false, kApiStackSpace);
599 601
600 // NOTE: the O32 abi requires a0 to hold a special pointer when returning a 602 // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
601 // struct from the function (which is currently the case). This means we pass 603 // struct from the function (which is currently the case). This means we pass
602 // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn 604 // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
603 // will handle setting up a0. 605 // will handle setting up a0.
604 606
605 // a1 = v8::Arguments& 607 // a1 = v8::Arguments&
606 // Arguments is built at sp + 1 (sp is a reserved spot for ra). 608 // Arguments is built at sp + 1 (sp is a reserved spot for ra).
607 __ Addu(a1, sp, kPointerSize); 609 __ Addu(a1, sp, kPointerSize);
(...skipping 11 matching lines...) Expand all
619 621
620 // Emitting a stub call may try to allocate (if the code is not 622 // Emitting a stub call may try to allocate (if the code is not
621 // already generated). Do not allow the assembler to perform a 623 // already generated). Do not allow the assembler to perform a
622 // garbage collection but instead return the allocation failure 624 // garbage collection but instead return the allocation failure
623 // object. 625 // object.
624 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; 626 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
625 ExternalReference ref = 627 ExternalReference ref =
626 ExternalReference(&fun, 628 ExternalReference(&fun,
627 ExternalReference::DIRECT_API_CALL, 629 ExternalReference::DIRECT_API_CALL,
628 masm->isolate()); 630 masm->isolate());
631 AllowExternalCallThatCantCauseGC scope(masm);
629 return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); 632 return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
630 } 633 }
631 634
632 class CallInterceptorCompiler BASE_EMBEDDED { 635 class CallInterceptorCompiler BASE_EMBEDDED {
633 public: 636 public:
634 CallInterceptorCompiler(StubCompiler* stub_compiler, 637 CallInterceptorCompiler(StubCompiler* stub_compiler,
635 const ParameterCount& arguments, 638 const ParameterCount& arguments,
636 Register name, 639 Register name,
637 Code::ExtraICState extra_ic_state) 640 Code::ExtraICState extra_ic_state)
638 : stub_compiler_(stub_compiler), 641 : stub_compiler_(stub_compiler),
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
797 Register scratch3, 800 Register scratch3,
798 String* name, 801 String* name,
799 JSObject* interceptor_holder, 802 JSObject* interceptor_holder,
800 Label* miss_label) { 803 Label* miss_label) {
801 Register holder = 804 Register holder =
802 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, 805 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
803 scratch1, scratch2, scratch3, name, 806 scratch1, scratch2, scratch3, name,
804 miss_label); 807 miss_label);
805 808
806 // Call a runtime function to load the interceptor property. 809 // Call a runtime function to load the interceptor property.
807 __ EnterInternalFrame(); 810 FrameScope scope(masm, StackFrame::INTERNAL);
808 // Save the name_ register across the call. 811 // Save the name_ register across the call.
809 __ push(name_); 812 __ push(name_);
810 813
811 PushInterceptorArguments(masm, 814 PushInterceptorArguments(masm,
812 receiver, 815 receiver,
813 holder, 816 holder,
814 name_, 817 name_,
815 interceptor_holder); 818 interceptor_holder);
816 819
817 __ CallExternalReference( 820 __ CallExternalReference(
818 ExternalReference( 821 ExternalReference(
819 IC_Utility(IC::kLoadPropertyWithInterceptorForCall), 822 IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
820 masm->isolate()), 823 masm->isolate()),
821 5); 824 5);
822 825
823 // Restore the name_ register. 826 // Restore the name_ register.
824 __ pop(name_); 827 __ pop(name_);
825 __ LeaveInternalFrame(); 828
829 // Leave the internal frame.
826 } 830 }
827 831
828 void LoadWithInterceptor(MacroAssembler* masm, 832 void LoadWithInterceptor(MacroAssembler* masm,
829 Register receiver, 833 Register receiver,
830 Register holder, 834 Register holder,
831 JSObject* holder_obj, 835 JSObject* holder_obj,
832 Register scratch, 836 Register scratch,
833 Label* interceptor_succeeded) { 837 Label* interceptor_succeeded) {
834 __ EnterInternalFrame(); 838 {
839 FrameScope scope(masm, StackFrame::INTERNAL);
835 840
836 __ Push(holder, name_); 841 __ Push(holder, name_);
837 842
838 CompileCallLoadPropertyWithInterceptor(masm, 843 CompileCallLoadPropertyWithInterceptor(masm,
839 receiver, 844 receiver,
840 holder, 845 holder,
841 name_, 846 name_,
842 holder_obj); 847 holder_obj);
843 848
844 __ pop(name_); // Restore the name. 849 __ pop(name_); // Restore the name.
845 __ pop(receiver); // Restore the holder. 850 __ pop(receiver); // Restore the holder.
846 __ LeaveInternalFrame(); 851 }
847 852
848 // If interceptor returns no-result sentinel, call the constant function. 853 // If interceptor returns no-result sentinel, call the constant function.
849 __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); 854 __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
850 __ Branch(interceptor_succeeded, ne, v0, Operand(scratch)); 855 __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
851 } 856 }
852 857
853 StubCompiler* stub_compiler_; 858 StubCompiler* stub_compiler_;
854 const ParameterCount& arguments_; 859 const ParameterCount& arguments_;
855 Register name_; 860 Register name_;
856 Code::ExtraICState extra_ic_state_; 861 Code::ExtraICState extra_ic_state_;
(...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after
1249 Address getter_address = v8::ToCData<Address>(callback->getter()); 1254 Address getter_address = v8::ToCData<Address>(callback->getter());
1250 ApiFunction fun(getter_address); 1255 ApiFunction fun(getter_address);
1251 1256
1252 // NOTE: the O32 abi requires a0 to hold a special pointer when returning a 1257 // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
1253 // struct from the function (which is currently the case). This means we pass 1258 // struct from the function (which is currently the case). This means we pass
1254 // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn 1259 // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
1255 // will handle setting up a0. 1260 // will handle setting up a0.
1256 1261
1257 const int kApiStackSpace = 1; 1262 const int kApiStackSpace = 1;
1258 1263
1264 FrameScope frame_scope(masm(), StackFrame::MANUAL);
1259 __ EnterExitFrame(false, kApiStackSpace); 1265 __ EnterExitFrame(false, kApiStackSpace);
1266
1260 // Create AccessorInfo instance on the stack above the exit frame with 1267 // Create AccessorInfo instance on the stack above the exit frame with
1261 // scratch2 (internal::Object **args_) as the data. 1268 // scratch2 (internal::Object **args_) as the data.
1262 __ sw(a2, MemOperand(sp, kPointerSize)); 1269 __ sw(a2, MemOperand(sp, kPointerSize));
1263 // a2 (second argument - see note above) = AccessorInfo& 1270 // a2 (second argument - see note above) = AccessorInfo&
1264 __ Addu(a2, sp, kPointerSize); 1271 __ Addu(a2, sp, kPointerSize);
1265 1272
1266 // Emitting a stub call may try to allocate (if the code is not 1273 // Emitting a stub call may try to allocate (if the code is not
1267 // already generated). Do not allow the assembler to perform a 1274 // already generated). Do not allow the assembler to perform a
1268 // garbage collection but instead return the allocation failure 1275 // garbage collection but instead return the allocation failure
1269 // object. 1276 // object.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1310 // Compile the interceptor call, followed by inline code to load the 1317 // Compile the interceptor call, followed by inline code to load the
1311 // property from further up the prototype chain if the call fails. 1318 // property from further up the prototype chain if the call fails.
1312 // Check that the maps haven't changed. 1319 // Check that the maps haven't changed.
1313 Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, 1320 Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1314 scratch1, scratch2, scratch3, 1321 scratch1, scratch2, scratch3,
1315 name, miss); 1322 name, miss);
1316 ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); 1323 ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1317 1324
1318 // Save necessary data before invoking an interceptor. 1325 // Save necessary data before invoking an interceptor.
1319 // Requires a frame to make GC aware of pushed pointers. 1326 // Requires a frame to make GC aware of pushed pointers.
1320 __ EnterInternalFrame(); 1327 {
1328 FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1321 1329
1322 if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { 1330 if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1323 // CALLBACKS case needs a receiver to be passed into C++ callback. 1331 // CALLBACKS case needs a receiver to be passed into C++ callback.
1324 __ Push(receiver, holder_reg, name_reg); 1332 __ Push(receiver, holder_reg, name_reg);
1325 } else { 1333 } else {
1326 __ Push(holder_reg, name_reg); 1334 __ Push(holder_reg, name_reg);
1335 }
1336
1337 // Invoke an interceptor. Note: map checks from receiver to
1338 // interceptor's holder has been compiled before (see a caller
1339 // of this method).
1340 CompileCallLoadPropertyWithInterceptor(masm(),
1341 receiver,
1342 holder_reg,
1343 name_reg,
1344 interceptor_holder);
1345
1346 // Check if interceptor provided a value for property. If it's
1347 // the case, return immediately.
1348 Label interceptor_failed;
1349 __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1350 __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
1351 frame_scope.GenerateLeaveFrame();
1352 __ Ret();
1353
1354 __ bind(&interceptor_failed);
1355 __ pop(name_reg);
1356 __ pop(holder_reg);
1357 if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1358 __ pop(receiver);
1359 }
1360
1361 // Leave the internal frame.
1327 } 1362 }
1328 1363
1329 // Invoke an interceptor. Note: map checks from receiver to
1330 // interceptor's holder has been compiled before (see a caller
1331 // of this method).
1332 CompileCallLoadPropertyWithInterceptor(masm(),
1333 receiver,
1334 holder_reg,
1335 name_reg,
1336 interceptor_holder);
1337
1338 // Check if interceptor provided a value for property. If it's
1339 // the case, return immediately.
1340 Label interceptor_failed;
1341 __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1342 __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
1343 __ LeaveInternalFrame();
1344 __ Ret();
1345
1346 __ bind(&interceptor_failed);
1347 __ pop(name_reg);
1348 __ pop(holder_reg);
1349 if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1350 __ pop(receiver);
1351 }
1352
1353 __ LeaveInternalFrame();
1354
1355 // Check that the maps from interceptor's holder to lookup's holder 1364 // Check that the maps from interceptor's holder to lookup's holder
1356 // haven't changed. And load lookup's holder into |holder| register. 1365 // haven't changed. And load lookup's holder into |holder| register.
1357 if (interceptor_holder != lookup->holder()) { 1366 if (interceptor_holder != lookup->holder()) {
1358 holder_reg = CheckPrototypes(interceptor_holder, 1367 holder_reg = CheckPrototypes(interceptor_holder,
1359 holder_reg, 1368 holder_reg,
1360 lookup->holder(), 1369 lookup->holder(),
1361 scratch1, 1370 scratch1,
1362 scratch2, 1371 scratch2,
1363 scratch3, 1372 scratch3,
1364 name, 1373 name,
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
1598 __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1607 __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1599 1608
1600 // Push the element. 1609 // Push the element.
1601 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); 1610 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
1602 // We may need a register containing the address end_elements below, 1611 // We may need a register containing the address end_elements below,
1603 // so write back the value in end_elements. 1612 // so write back the value in end_elements.
1604 __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize); 1613 __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1605 __ Addu(end_elements, elements, end_elements); 1614 __ Addu(end_elements, elements, end_elements);
1606 const int kEndElementsOffset = 1615 const int kEndElementsOffset =
1607 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; 1616 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1608 __ sw(t0, MemOperand(end_elements, kEndElementsOffset)); 1617 __ Addu(end_elements, end_elements, kEndElementsOffset);
1609 __ Addu(end_elements, end_elements, kPointerSize); 1618 __ sw(t0, MemOperand(end_elements));
1610 1619
1611 // Check for a smi. 1620 // Check for a smi.
1612 __ JumpIfNotSmi(t0, &with_write_barrier); 1621 __ JumpIfNotSmi(t0, &with_write_barrier);
1613 __ bind(&exit); 1622 __ bind(&exit);
1614 __ Drop(argc + 1); 1623 __ Drop(argc + 1);
1615 __ Ret(); 1624 __ Ret();
1616 1625
1617 __ bind(&with_write_barrier); 1626 __ bind(&with_write_barrier);
1618 __ InNewSpace(elements, t0, eq, &exit); 1627 __ InNewSpace(elements, t0, eq, &exit);
1619 __ RecordWriteHelper(elements, end_elements, t0); 1628 __ RecordWriteHelper(elements, end_elements, t0);
(...skipping 924 matching lines...) Expand 10 before | Expand all | Expand 10 after
2544 // Jump to the cached code (tail call). 2553 // Jump to the cached code (tail call).
2545 Counters* counters = masm()->isolate()->counters(); 2554 Counters* counters = masm()->isolate()->counters();
2546 __ IncrementCounter(counters->call_global_inline(), 1, a3, t0); 2555 __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
2547 ASSERT(function->is_compiled()); 2556 ASSERT(function->is_compiled());
2548 Handle<Code> code(function->code()); 2557 Handle<Code> code(function->code());
2549 ParameterCount expected(function->shared()->formal_parameter_count()); 2558 ParameterCount expected(function->shared()->formal_parameter_count());
2550 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) 2559 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
2551 ? CALL_AS_FUNCTION 2560 ? CALL_AS_FUNCTION
2552 : CALL_AS_METHOD; 2561 : CALL_AS_METHOD;
2553 if (V8::UseCrankshaft()) { 2562 if (V8::UseCrankshaft()) {
2554 UNIMPLEMENTED_MIPS(); 2563 // TODO(kasperl): For now, we always call indirectly through the
2564 // code field in the function to allow recompilation to take effect
2565 // without changing any of the call sites.
2566 __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2567 __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
2568 NullCallWrapper(), call_kind);
2555 } else { 2569 } else {
2556 __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET, 2570 __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
2557 JUMP_FUNCTION, call_kind); 2571 JUMP_FUNCTION, call_kind);
2558 } 2572 }
2559 2573
2560 // Handle call cache miss. 2574 // Handle call cache miss.
2561 __ bind(&miss); 2575 __ bind(&miss);
2562 __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3); 2576 __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
2563 MaybeObject* maybe_result = GenerateMissBranch(); 2577 MaybeObject* maybe_result = GenerateMissBranch();
2564 if (maybe_result->IsFailure()) return maybe_result; 2578 if (maybe_result->IsFailure()) return maybe_result;
(...skipping 885 matching lines...) Expand 10 before | Expand all | Expand 10 after
3450 return true; 3464 return true;
3451 3465
3452 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3466 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3453 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 3467 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3454 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 3468 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3455 case EXTERNAL_PIXEL_ELEMENTS: 3469 case EXTERNAL_PIXEL_ELEMENTS:
3456 return false; 3470 return false;
3457 3471
3458 case EXTERNAL_FLOAT_ELEMENTS: 3472 case EXTERNAL_FLOAT_ELEMENTS:
3459 case EXTERNAL_DOUBLE_ELEMENTS: 3473 case EXTERNAL_DOUBLE_ELEMENTS:
3474 case FAST_SMI_ELEMENTS:
3460 case FAST_ELEMENTS: 3475 case FAST_ELEMENTS:
3461 case FAST_DOUBLE_ELEMENTS: 3476 case FAST_DOUBLE_ELEMENTS:
3462 case DICTIONARY_ELEMENTS: 3477 case DICTIONARY_ELEMENTS:
3463 case NON_STRICT_ARGUMENTS_ELEMENTS: 3478 case NON_STRICT_ARGUMENTS_ELEMENTS:
3464 UNREACHABLE(); 3479 UNREACHABLE();
3465 return false; 3480 return false;
3466 } 3481 }
3467 return false; 3482 return false;
3468 } 3483 }
3469 3484
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after
3821 3836
3822 // This stub is meant to be tail-jumped to, the receiver must already 3837 // This stub is meant to be tail-jumped to, the receiver must already
3823 // have been verified by the caller to not be a smi. 3838 // have been verified by the caller to not be a smi.
3824 3839
3825 // Check that the key is a smi. 3840 // Check that the key is a smi.
3826 __ JumpIfNotSmi(key, &miss_force_generic); 3841 __ JumpIfNotSmi(key, &miss_force_generic);
3827 3842
3828 __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset)); 3843 __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3829 3844
3830 // Check that the index is in range. 3845 // Check that the index is in range.
3831 __ SmiUntag(t0, key);
3832 __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset)); 3846 __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3833 // Unsigned comparison catches both negative and too-large values. 3847 // Unsigned comparison catches both negative and too-large values.
3834 __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1)); 3848 __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
3835 3849
3836 // Handle both smis and HeapNumbers in the fast path. Go to the 3850 // Handle both smis and HeapNumbers in the fast path. Go to the
3837 // runtime for all other kinds of values. 3851 // runtime for all other kinds of values.
3838 // a3: external array. 3852 // a3: external array.
3839 // t0: key (integer).
3840 3853
3841 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { 3854 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
3842 // Double to pixel conversion is only implemented in the runtime for now. 3855 // Double to pixel conversion is only implemented in the runtime for now.
3843 __ JumpIfNotSmi(value, &slow); 3856 __ JumpIfNotSmi(value, &slow);
3844 } else { 3857 } else {
3845 __ JumpIfNotSmi(value, &check_heap_number); 3858 __ JumpIfNotSmi(value, &check_heap_number);
3846 } 3859 }
3847 __ SmiUntag(t1, value); 3860 __ SmiUntag(t1, value);
3848 __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); 3861 __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3849 3862
3850 // a3: base pointer of external storage. 3863 // a3: base pointer of external storage.
3851 // t0: key (integer).
3852 // t1: value (integer). 3864 // t1: value (integer).
3853 3865
3854 switch (elements_kind) { 3866 switch (elements_kind) {
3855 case EXTERNAL_PIXEL_ELEMENTS: { 3867 case EXTERNAL_PIXEL_ELEMENTS: {
3856 // Clamp the value to [0..255]. 3868 // Clamp the value to [0..255].
3857 // v0 is used as a scratch register here. 3869 // v0 is used as a scratch register here.
3858 Label done; 3870 Label done;
3859 __ li(v0, Operand(255)); 3871 __ li(v0, Operand(255));
3860 // Normal branch: nop in delay slot. 3872 // Normal branch: nop in delay slot.
3861 __ Branch(&done, gt, t1, Operand(v0)); 3873 __ Branch(&done, gt, t1, Operand(v0));
3862 // Use delay slot in this branch. 3874 // Use delay slot in this branch.
3863 __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg)); 3875 __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
3864 __ mov(v0, zero_reg); // In delay slot. 3876 __ mov(v0, zero_reg); // In delay slot.
3865 __ mov(v0, t1); // Value is in range 0..255. 3877 __ mov(v0, t1); // Value is in range 0..255.
3866 __ bind(&done); 3878 __ bind(&done);
3867 __ mov(t1, v0); 3879 __ mov(t1, v0);
3868 __ addu(t8, a3, t0); 3880
3881 __ srl(t8, key, 1);
3882 __ addu(t8, a3, t8);
3869 __ sb(t1, MemOperand(t8, 0)); 3883 __ sb(t1, MemOperand(t8, 0));
3870 } 3884 }
3871 break; 3885 break;
3872 case EXTERNAL_BYTE_ELEMENTS: 3886 case EXTERNAL_BYTE_ELEMENTS:
3873 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3887 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3874 __ addu(t8, a3, t0); 3888 __ srl(t8, key, 1);
3889 __ addu(t8, a3, t8);
3875 __ sb(t1, MemOperand(t8, 0)); 3890 __ sb(t1, MemOperand(t8, 0));
3876 break; 3891 break;
3877 case EXTERNAL_SHORT_ELEMENTS: 3892 case EXTERNAL_SHORT_ELEMENTS:
3878 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 3893 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3879 __ sll(t8, t0, 1); 3894 __ addu(t8, a3, key);
3880 __ addu(t8, a3, t8);
3881 __ sh(t1, MemOperand(t8, 0)); 3895 __ sh(t1, MemOperand(t8, 0));
3882 break; 3896 break;
3883 case EXTERNAL_INT_ELEMENTS: 3897 case EXTERNAL_INT_ELEMENTS:
3884 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 3898 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3885 __ sll(t8, t0, 2); 3899 __ sll(t8, key, 1);
3886 __ addu(t8, a3, t8); 3900 __ addu(t8, a3, t8);
3887 __ sw(t1, MemOperand(t8, 0)); 3901 __ sw(t1, MemOperand(t8, 0));
3888 break; 3902 break;
3889 case EXTERNAL_FLOAT_ELEMENTS: 3903 case EXTERNAL_FLOAT_ELEMENTS:
3890 // Perform int-to-float conversion and store to memory. 3904 // Perform int-to-float conversion and store to memory.
3905 __ SmiUntag(t0, key);
3891 StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4); 3906 StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
3892 break; 3907 break;
3893 case EXTERNAL_DOUBLE_ELEMENTS: 3908 case EXTERNAL_DOUBLE_ELEMENTS:
3894 __ sll(t8, t0, 3); 3909 __ sll(t8, key, 2);
3895 __ addu(a3, a3, t8); 3910 __ addu(a3, a3, t8);
3896 // a3: effective address of the double element 3911 // a3: effective address of the double element
3897 FloatingPointHelper::Destination destination; 3912 FloatingPointHelper::Destination destination;
3898 if (CpuFeatures::IsSupported(FPU)) { 3913 if (CpuFeatures::IsSupported(FPU)) {
3899 destination = FloatingPointHelper::kFPURegisters; 3914 destination = FloatingPointHelper::kFPURegisters;
3900 } else { 3915 } else {
3901 destination = FloatingPointHelper::kCoreRegisters; 3916 destination = FloatingPointHelper::kCoreRegisters;
3902 } 3917 }
3903 FloatingPointHelper::ConvertIntToDouble( 3918 FloatingPointHelper::ConvertIntToDouble(
3904 masm, t1, destination, 3919 masm, t1, destination,
3905 f0, t2, t3, // These are: double_dst, dst1, dst2. 3920 f0, t2, t3, // These are: double_dst, dst1, dst2.
3906 t0, f2); // These are: scratch2, single_scratch. 3921 t0, f2); // These are: scratch2, single_scratch.
3907 if (destination == FloatingPointHelper::kFPURegisters) { 3922 if (destination == FloatingPointHelper::kFPURegisters) {
3908 CpuFeatures::Scope scope(FPU); 3923 CpuFeatures::Scope scope(FPU);
3909 __ sdc1(f0, MemOperand(a3, 0)); 3924 __ sdc1(f0, MemOperand(a3, 0));
3910 } else { 3925 } else {
3911 __ sw(t2, MemOperand(a3, 0)); 3926 __ sw(t2, MemOperand(a3, 0));
3912 __ sw(t3, MemOperand(a3, Register::kSizeInBytes)); 3927 __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
3913 } 3928 }
3914 break; 3929 break;
3915 case FAST_ELEMENTS: 3930 case FAST_ELEMENTS:
3916 case FAST_DOUBLE_ELEMENTS: 3931 case FAST_DOUBLE_ELEMENTS:
3917 case DICTIONARY_ELEMENTS: 3932 case DICTIONARY_ELEMENTS:
3918 case NON_STRICT_ARGUMENTS_ELEMENTS: 3933 case NON_STRICT_ARGUMENTS_ELEMENTS:
3919 UNREACHABLE(); 3934 UNREACHABLE();
3920 break; 3935 break;
3921 } 3936 }
3922 3937
3923 // Entry registers are intact, a0 holds the value which is the return value. 3938 // Entry registers are intact, a0 holds the value which is the return value.
3924 __ mov(v0, value); 3939 __ mov(v0, a0);
3925 __ Ret(); 3940 __ Ret();
3926 3941
3927 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { 3942 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
3928 // a3: external array. 3943 // a3: external array.
3929 // t0: index (integer).
3930 __ bind(&check_heap_number); 3944 __ bind(&check_heap_number);
3931 __ GetObjectType(value, t1, t2); 3945 __ GetObjectType(value, t1, t2);
3932 __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE)); 3946 __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
3933 3947
3934 __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); 3948 __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3935 3949
3936 // a3: base pointer of external storage. 3950 // a3: base pointer of external storage.
3937 // t0: key (integer).
3938 3951
3939 // The WebGL specification leaves the behavior of storing NaN and 3952 // The WebGL specification leaves the behavior of storing NaN and
3940 // +/-Infinity into integer arrays basically undefined. For more 3953 // +/-Infinity into integer arrays basically undefined. For more
3941 // reproducible behavior, convert these to zero. 3954 // reproducible behavior, convert these to zero.
3942 3955
3943 if (CpuFeatures::IsSupported(FPU)) { 3956 if (CpuFeatures::IsSupported(FPU)) {
3944 CpuFeatures::Scope scope(FPU); 3957 CpuFeatures::Scope scope(FPU);
3945 3958
3946 __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3959 __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
3947 3960
3948 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3961 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3949 __ cvt_s_d(f0, f0); 3962 __ cvt_s_d(f0, f0);
3950 __ sll(t8, t0, 2); 3963 __ sll(t8, key, 1);
3951 __ addu(t8, a3, t8); 3964 __ addu(t8, a3, t8);
3952 __ swc1(f0, MemOperand(t8, 0)); 3965 __ swc1(f0, MemOperand(t8, 0));
3953 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3966 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3954 __ sll(t8, t0, 3); 3967 __ sll(t8, key, 2);
3955 __ addu(t8, a3, t8); 3968 __ addu(t8, a3, t8);
3956 __ sdc1(f0, MemOperand(t8, 0)); 3969 __ sdc1(f0, MemOperand(t8, 0));
3957 } else { 3970 } else {
3958 __ EmitECMATruncate(t3, f0, f2, t2, t1, t5); 3971 __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
3959 3972
3960 switch (elements_kind) { 3973 switch (elements_kind) {
3961 case EXTERNAL_BYTE_ELEMENTS: 3974 case EXTERNAL_BYTE_ELEMENTS:
3962 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3975 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3963 __ addu(t8, a3, t0); 3976 __ srl(t8, key, 1);
3977 __ addu(t8, a3, t8);
3964 __ sb(t3, MemOperand(t8, 0)); 3978 __ sb(t3, MemOperand(t8, 0));
3965 break; 3979 break;
3966 case EXTERNAL_SHORT_ELEMENTS: 3980 case EXTERNAL_SHORT_ELEMENTS:
3967 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 3981 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3968 __ sll(t8, t0, 1); 3982 __ addu(t8, a3, key);
3969 __ addu(t8, a3, t8);
3970 __ sh(t3, MemOperand(t8, 0)); 3983 __ sh(t3, MemOperand(t8, 0));
3971 break; 3984 break;
3972 case EXTERNAL_INT_ELEMENTS: 3985 case EXTERNAL_INT_ELEMENTS:
3973 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 3986 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3974 __ sll(t8, t0, 2); 3987 __ sll(t8, key, 1);
3975 __ addu(t8, a3, t8); 3988 __ addu(t8, a3, t8);
3976 __ sw(t3, MemOperand(t8, 0)); 3989 __ sw(t3, MemOperand(t8, 0));
3977 break; 3990 break;
3978 case EXTERNAL_PIXEL_ELEMENTS: 3991 case EXTERNAL_PIXEL_ELEMENTS:
3979 case EXTERNAL_FLOAT_ELEMENTS: 3992 case EXTERNAL_FLOAT_ELEMENTS:
3980 case EXTERNAL_DOUBLE_ELEMENTS: 3993 case EXTERNAL_DOUBLE_ELEMENTS:
3981 case FAST_ELEMENTS: 3994 case FAST_ELEMENTS:
3982 case FAST_DOUBLE_ELEMENTS: 3995 case FAST_DOUBLE_ELEMENTS:
3983 case DICTIONARY_ELEMENTS: 3996 case DICTIONARY_ELEMENTS:
3984 case NON_STRICT_ARGUMENTS_ELEMENTS: 3997 case NON_STRICT_ARGUMENTS_ELEMENTS:
3985 UNREACHABLE(); 3998 UNREACHABLE();
3986 break; 3999 break;
3987 } 4000 }
3988 } 4001 }
3989 4002
3990 // Entry registers are intact, a0 holds the value 4003 // Entry registers are intact, a0 holds the value
3991 // which is the return value. 4004 // which is the return value.
3992 __ mov(v0, value); 4005 __ mov(v0, a0);
3993 __ Ret(); 4006 __ Ret();
3994 } else { 4007 } else {
3995 // FPU is not available, do manual conversions. 4008 // FPU is not available, do manual conversions.
3996 4009
3997 __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset)); 4010 __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
3998 __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset)); 4011 __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3999 4012
4000 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 4013 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4001 Label done, nan_or_infinity_or_zero; 4014 Label done, nan_or_infinity_or_zero;
4002 static const int kMantissaInHiWordShift = 4015 static const int kMantissaInHiWordShift =
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
4037 __ And(t7, t3, Operand(HeapNumber::kSignMask)); 4050 __ And(t7, t3, Operand(HeapNumber::kSignMask));
4038 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); 4051 __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4039 __ sll(t3, t3, kMantissaInHiWordShift); 4052 __ sll(t3, t3, kMantissaInHiWordShift);
4040 __ or_(t7, t7, t3); 4053 __ or_(t7, t7, t3);
4041 __ srl(t4, t4, kMantissaInLoWordShift); 4054 __ srl(t4, t4, kMantissaInLoWordShift);
4042 __ or_(t7, t7, t4); 4055 __ or_(t7, t7, t4);
4043 __ sll(t6, t6, kBinary32ExponentShift); 4056 __ sll(t6, t6, kBinary32ExponentShift);
4044 __ or_(t3, t7, t6); 4057 __ or_(t3, t7, t6);
4045 4058
4046 __ bind(&done); 4059 __ bind(&done);
4047 __ sll(t9, a1, 2); 4060 __ sll(t9, key, 1);
4048 __ addu(t9, a2, t9); 4061 __ addu(t9, a2, t9);
4049 __ sw(t3, MemOperand(t9, 0)); 4062 __ sw(t3, MemOperand(t9, 0));
4050 4063
4051 // Entry registers are intact, a0 holds the value which is the return 4064 // Entry registers are intact, a0 holds the value which is the return
4052 // value. 4065 // value.
4053 __ mov(v0, value); 4066 __ mov(v0, a0);
4054 __ Ret(); 4067 __ Ret();
4055 4068
4056 __ bind(&nan_or_infinity_or_zero); 4069 __ bind(&nan_or_infinity_or_zero);
4057 __ And(t7, t3, Operand(HeapNumber::kSignMask)); 4070 __ And(t7, t3, Operand(HeapNumber::kSignMask));
4058 __ And(t3, t3, Operand(HeapNumber::kMantissaMask)); 4071 __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4059 __ or_(t6, t6, t7); 4072 __ or_(t6, t6, t7);
4060 __ sll(t3, t3, kMantissaInHiWordShift); 4073 __ sll(t3, t3, kMantissaInHiWordShift);
4061 __ or_(t6, t6, t3); 4074 __ or_(t6, t6, t3);
4062 __ srl(t4, t4, kMantissaInLoWordShift); 4075 __ srl(t4, t4, kMantissaInLoWordShift);
4063 __ or_(t3, t6, t4); 4076 __ or_(t3, t6, t4);
4064 __ Branch(&done); 4077 __ Branch(&done);
4065 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 4078 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4066 __ sll(t8, t0, 3); 4079 __ sll(t8, t0, 3);
4067 __ addu(t8, a3, t8); 4080 __ addu(t8, a3, t8);
4068 // t8: effective address of destination element. 4081 // t8: effective address of destination element.
4069 __ sw(t4, MemOperand(t8, 0)); 4082 __ sw(t4, MemOperand(t8, 0));
4070 __ sw(t3, MemOperand(t8, Register::kSizeInBytes)); 4083 __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
4084 __ mov(v0, a0);
4071 __ Ret(); 4085 __ Ret();
4072 } else { 4086 } else {
4073 bool is_signed_type = IsElementTypeSigned(elements_kind); 4087 bool is_signed_type = IsElementTypeSigned(elements_kind);
4074 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; 4088 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
4075 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; 4089 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
4076 4090
4077 Label done, sign; 4091 Label done, sign;
4078 4092
4079 // Test for all special exponent values: zeros, subnormal numbers, NaNs 4093 // Test for all special exponent values: zeros, subnormal numbers, NaNs
4080 // and infinities. All these should be converted to 0. 4094 // and infinities. All these should be converted to 0.
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4123 __ subu(t2, t3, zero_reg); 4137 __ subu(t2, t3, zero_reg);
4124 __ movz(t3, t2, t5); // Only if t5 is zero. 4138 __ movz(t3, t2, t5); // Only if t5 is zero.
4125 4139
4126 __ bind(&done); 4140 __ bind(&done);
4127 4141
4128 // Result is in t3. 4142 // Result is in t3.
4129 // This switch block should be exactly the same as above (FPU mode). 4143 // This switch block should be exactly the same as above (FPU mode).
4130 switch (elements_kind) { 4144 switch (elements_kind) {
4131 case EXTERNAL_BYTE_ELEMENTS: 4145 case EXTERNAL_BYTE_ELEMENTS:
4132 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4146 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4133 __ addu(t8, a3, t0); 4147 __ srl(t8, key, 1);
4148 __ addu(t8, a3, t8);
4134 __ sb(t3, MemOperand(t8, 0)); 4149 __ sb(t3, MemOperand(t8, 0));
4135 break; 4150 break;
4136 case EXTERNAL_SHORT_ELEMENTS: 4151 case EXTERNAL_SHORT_ELEMENTS:
4137 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 4152 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4138 __ sll(t8, t0, 1); 4153 __ addu(t8, a3, key);
4139 __ addu(t8, a3, t8);
4140 __ sh(t3, MemOperand(t8, 0)); 4154 __ sh(t3, MemOperand(t8, 0));
4141 break; 4155 break;
4142 case EXTERNAL_INT_ELEMENTS: 4156 case EXTERNAL_INT_ELEMENTS:
4143 case EXTERNAL_UNSIGNED_INT_ELEMENTS: 4157 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4144 __ sll(t8, t0, 2); 4158 __ sll(t8, key, 1);
4145 __ addu(t8, a3, t8); 4159 __ addu(t8, a3, t8);
4146 __ sw(t3, MemOperand(t8, 0)); 4160 __ sw(t3, MemOperand(t8, 0));
4147 break; 4161 break;
4148 case EXTERNAL_PIXEL_ELEMENTS: 4162 case EXTERNAL_PIXEL_ELEMENTS:
4149 case EXTERNAL_FLOAT_ELEMENTS: 4163 case EXTERNAL_FLOAT_ELEMENTS:
4150 case EXTERNAL_DOUBLE_ELEMENTS: 4164 case EXTERNAL_DOUBLE_ELEMENTS:
4151 case FAST_ELEMENTS: 4165 case FAST_ELEMENTS:
4152 case FAST_DOUBLE_ELEMENTS: 4166 case FAST_DOUBLE_ELEMENTS:
4153 case DICTIONARY_ELEMENTS: 4167 case DICTIONARY_ELEMENTS:
4154 case NON_STRICT_ARGUMENTS_ELEMENTS: 4168 case NON_STRICT_ARGUMENTS_ELEMENTS:
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after
4488 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); 4502 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4489 __ Jump(ic, RelocInfo::CODE_TARGET); 4503 __ Jump(ic, RelocInfo::CODE_TARGET);
4490 } 4504 }
4491 4505
4492 4506
4493 #undef __ 4507 #undef __
4494 4508
4495 } } // namespace v8::internal 4509 } } // namespace v8::internal
4496 4510
4497 #endif // V8_TARGET_ARCH_MIPS 4511 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/regexp-macro-assembler-mips.cc ('k') | src/mksnapshot.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698