Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(176)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 7891042: Add asserts to ensure that we: (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 863 matching lines...) Expand 10 before | Expand all | Expand 10 after
874 if (!IsMipsSoftFloatABI) { 874 if (!IsMipsSoftFloatABI) {
875 CpuFeatures::Scope scope(FPU); 875 CpuFeatures::Scope scope(FPU);
876 // We are not using MIPS FPU instructions, and parameters for the runtime 876 // We are not using MIPS FPU instructions, and parameters for the runtime
877 // function call are prepaired in a0-a3 registers, but function we are 877 // function call are prepaired in a0-a3 registers, but function we are
878 // calling is compiled with hard-float flag and expecting hard float ABI 878 // calling is compiled with hard-float flag and expecting hard float ABI
879 // (parameters in f12/f14 registers). We need to copy parameters from 879 // (parameters in f12/f14 registers). We need to copy parameters from
880 // a0-a3 registers to f12/f14 register pairs. 880 // a0-a3 registers to f12/f14 register pairs.
881 __ Move(f12, a0, a1); 881 __ Move(f12, a0, a1);
882 __ Move(f14, a2, a3); 882 __ Move(f14, a2, a3);
883 } 883 }
884 // Call C routine that may not cause GC or other trouble. 884 {
885 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), 885 AllowExternalCallThatCantCauseGC scope(masm);
886 4); 886 __ CallCFunction(
887 ExternalReference::double_fp_operation(op, masm->isolate()), 4);
888 }
887 // Store answer in the overwritable heap number. 889 // Store answer in the overwritable heap number.
888 if (!IsMipsSoftFloatABI) { 890 if (!IsMipsSoftFloatABI) {
889 CpuFeatures::Scope scope(FPU); 891 CpuFeatures::Scope scope(FPU);
890 // Double returned in register f0. 892 // Double returned in register f0.
891 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 893 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
892 } else { 894 } else {
893 // Double returned in registers v0 and v1. 895 // Double returned in registers v0 and v1.
894 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); 896 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
895 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); 897 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
896 } 898 }
(...skipping 1047 matching lines...) Expand 10 before | Expand all | Expand 10 after
1944 if (mode_ == UNARY_OVERWRITE) { 1946 if (mode_ == UNARY_OVERWRITE) {
1945 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1947 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1946 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 1948 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1947 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1949 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1948 } else { 1950 } else {
1949 Label slow_allocate_heapnumber, heapnumber_allocated; 1951 Label slow_allocate_heapnumber, heapnumber_allocated;
1950 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); 1952 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1951 __ jmp(&heapnumber_allocated); 1953 __ jmp(&heapnumber_allocated);
1952 1954
1953 __ bind(&slow_allocate_heapnumber); 1955 __ bind(&slow_allocate_heapnumber);
1954 __ EnterInternalFrame(); 1956 {
1955 __ push(a0); 1957 FrameScope scope(masm, StackFrame::INTERNAL);
1956 __ CallRuntime(Runtime::kNumberAlloc, 0); 1958 __ push(a0);
1957 __ mov(a1, v0); 1959 __ CallRuntime(Runtime::kNumberAlloc, 0);
1958 __ pop(a0); 1960 __ mov(a1, v0);
1959 __ LeaveInternalFrame(); 1961 __ pop(a0);
1962 }
1960 1963
1961 __ bind(&heapnumber_allocated); 1964 __ bind(&heapnumber_allocated);
1962 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); 1965 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1963 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1966 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1964 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); 1967 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1965 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 1968 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1966 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); 1969 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1967 __ mov(v0, a1); 1970 __ mov(v0, a1);
1968 } 1971 }
1969 __ Ret(); 1972 __ Ret();
(...skipping 21 matching lines...) Expand all
1991 1994
1992 // Try to store the result in a heap number. 1995 // Try to store the result in a heap number.
1993 __ bind(&try_float); 1996 __ bind(&try_float);
1994 if (mode_ == UNARY_NO_OVERWRITE) { 1997 if (mode_ == UNARY_NO_OVERWRITE) {
1995 Label slow_allocate_heapnumber, heapnumber_allocated; 1998 Label slow_allocate_heapnumber, heapnumber_allocated;
1996 // Allocate a new heap number without zapping v0, which we need if it fails. 1999 // Allocate a new heap number without zapping v0, which we need if it fails.
1997 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); 2000 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
1998 __ jmp(&heapnumber_allocated); 2001 __ jmp(&heapnumber_allocated);
1999 2002
2000 __ bind(&slow_allocate_heapnumber); 2003 __ bind(&slow_allocate_heapnumber);
2001 __ EnterInternalFrame(); 2004 {
2002 __ push(v0); // Push the heap number, not the untagged int32. 2005 FrameScope scope(masm, StackFrame::INTERNAL);
2003 __ CallRuntime(Runtime::kNumberAlloc, 0); 2006 __ push(v0); // Push the heap number, not the untagged int32.
2004 __ mov(a2, v0); // Move the new heap number into a2. 2007 __ CallRuntime(Runtime::kNumberAlloc, 0);
2005 // Get the heap number into v0, now that the new heap number is in a2. 2008 __ mov(a2, v0); // Move the new heap number into a2.
2006 __ pop(v0); 2009 // Get the heap number into v0, now that the new heap number is in a2.
2007 __ LeaveInternalFrame(); 2010 __ pop(v0);
2011 }
2008 2012
2009 // Convert the heap number in v0 to an untagged integer in a1. 2013 // Convert the heap number in v0 to an untagged integer in a1.
2010 // This can't go slow-case because it's the same number we already 2014 // This can't go slow-case because it's the same number we already
2011 // converted once again. 2015 // converted once again.
2012 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); 2016 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2013 // Negate the result. 2017 // Negate the result.
2014 __ Xor(a1, a1, -1); 2018 __ Xor(a1, a1, -1);
2015 2019
2016 __ bind(&heapnumber_allocated); 2020 __ bind(&heapnumber_allocated);
2017 __ mov(v0, a2); // Move newly allocated heap number to v0. 2021 __ mov(v0, a2); // Move newly allocated heap number to v0.
(...skipping 1258 matching lines...) Expand 10 before | Expand all | Expand 10 after
3276 3280
3277 __ mov(v0, cache_entry); 3281 __ mov(v0, cache_entry);
3278 __ Ret(); 3282 __ Ret();
3279 3283
3280 __ bind(&invalid_cache); 3284 __ bind(&invalid_cache);
3281 // The cache is invalid. Call runtime which will recreate the 3285 // The cache is invalid. Call runtime which will recreate the
3282 // cache. 3286 // cache.
3283 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3287 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3284 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); 3288 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3285 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3289 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3286 __ EnterInternalFrame(); 3290 {
3287 __ push(a0); 3291 FrameScope scope(masm, StackFrame::INTERNAL);
3288 __ CallRuntime(RuntimeFunction(), 1); 3292 __ push(a0);
3289 __ LeaveInternalFrame(); 3293 __ CallRuntime(RuntimeFunction(), 1);
3294 }
3290 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); 3295 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3291 __ Ret(); 3296 __ Ret();
3292 3297
3293 __ bind(&skip_cache); 3298 __ bind(&skip_cache);
3294 // Call C function to calculate the result and answer directly 3299 // Call C function to calculate the result and answer directly
3295 // without updating the cache. 3300 // without updating the cache.
3296 GenerateCallCFunction(masm, scratch0); 3301 GenerateCallCFunction(masm, scratch0);
3297 __ GetCFunctionDoubleResult(f4); 3302 __ GetCFunctionDoubleResult(f4);
3298 __ bind(&no_update); 3303 __ bind(&no_update);
3299 3304
3300 // We return the value in f4 without adding it to the cache, but 3305 // We return the value in f4 without adding it to the cache, but
3301 // we cause a scavenging GC so that future allocations will succeed. 3306 // we cause a scavenging GC so that future allocations will succeed.
3302 __ EnterInternalFrame(); 3307 {
3308 FrameScope scope(masm, StackFrame::INTERNAL);
3303 3309
3304 // Allocate an aligned object larger than a HeapNumber. 3310 // Allocate an aligned object larger than a HeapNumber.
3305 ASSERT(4 * kPointerSize >= HeapNumber::kSize); 3311 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3306 __ li(scratch0, Operand(4 * kPointerSize)); 3312 __ li(scratch0, Operand(4 * kPointerSize));
3307 __ push(scratch0); 3313 __ push(scratch0);
3308 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 3314 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3309 __ LeaveInternalFrame(); 3315 }
3310 __ Ret(); 3316 __ Ret();
3311 } 3317 }
3312 } 3318 }
3313 3319
3314 3320
3315 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 3321 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3316 Register scratch) { 3322 Register scratch) {
3317 __ push(ra); 3323 __ push(ra);
3318 __ PrepareCallCFunction(2, scratch); 3324 __ PrepareCallCFunction(2, scratch);
3319 if (IsMipsSoftFloatABI) { 3325 if (IsMipsSoftFloatABI) {
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
3410 // C function for integer exponents. The register containing 3416 // C function for integer exponents. The register containing
3411 // the heap number is callee-saved. 3417 // the heap number is callee-saved.
3412 __ AllocateHeapNumber(heapnumber, 3418 __ AllocateHeapNumber(heapnumber,
3413 scratch, 3419 scratch,
3414 scratch2, 3420 scratch2,
3415 heapnumbermap, 3421 heapnumbermap,
3416 &call_runtime); 3422 &call_runtime);
3417 __ push(ra); 3423 __ push(ra);
3418 __ PrepareCallCFunction(3, scratch); 3424 __ PrepareCallCFunction(3, scratch);
3419 __ SetCallCDoubleArguments(double_base, exponent); 3425 __ SetCallCDoubleArguments(double_base, exponent);
3420 __ CallCFunction( 3426 {
3421 ExternalReference::power_double_int_function(masm->isolate()), 3); 3427 AllowExternalCallThatCantCauseGC scope(masm);
3422 __ pop(ra); 3428 __ CallCFunction(
3423 __ GetCFunctionDoubleResult(double_result); 3429 ExternalReference::power_double_int_function(masm->isolate()), 3);
3430 __ pop(ra);
3431 __ GetCFunctionDoubleResult(double_result);
3432 }
3424 __ sdc1(double_result, 3433 __ sdc1(double_result,
3425 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 3434 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3426 __ mov(v0, heapnumber); 3435 __ mov(v0, heapnumber);
3427 __ DropAndRet(2 * kPointerSize); 3436 __ DropAndRet(2 * kPointerSize);
3428 3437
3429 __ bind(&exponent_not_smi); 3438 __ bind(&exponent_not_smi);
3430 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); 3439 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3431 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); 3440 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3432 // Exponent is a heapnumber. Load it into double register. 3441 // Exponent is a heapnumber. Load it into double register.
3433 __ ldc1(double_exponent, 3442 __ ldc1(double_exponent,
3434 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 3443 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3435 3444
3436 // The base and the exponent are in double registers. 3445 // The base and the exponent are in double registers.
3437 // Allocate a heap number and call a C function for 3446 // Allocate a heap number and call a C function for
3438 // double exponents. The register containing 3447 // double exponents. The register containing
3439 // the heap number is callee-saved. 3448 // the heap number is callee-saved.
3440 __ AllocateHeapNumber(heapnumber, 3449 __ AllocateHeapNumber(heapnumber,
3441 scratch, 3450 scratch,
3442 scratch2, 3451 scratch2,
3443 heapnumbermap, 3452 heapnumbermap,
3444 &call_runtime); 3453 &call_runtime);
3445 __ push(ra); 3454 __ push(ra);
3446 __ PrepareCallCFunction(4, scratch); 3455 __ PrepareCallCFunction(4, scratch);
3447 // ABI (o32) for func(double a, double b): a in f12, b in f14. 3456 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3448 ASSERT(double_base.is(f12)); 3457 ASSERT(double_base.is(f12));
3449 ASSERT(double_exponent.is(f14)); 3458 ASSERT(double_exponent.is(f14));
3450 __ SetCallCDoubleArguments(double_base, double_exponent); 3459 __ SetCallCDoubleArguments(double_base, double_exponent);
3451 __ CallCFunction( 3460 {
3452 ExternalReference::power_double_double_function(masm->isolate()), 4); 3461 AllowExternalCallThatCantCauseGC scope(masm);
3453 __ pop(ra); 3462 __ CallCFunction(
3454 __ GetCFunctionDoubleResult(double_result); 3463 ExternalReference::power_double_double_function(masm->isolate()), 4);
3464 __ pop(ra);
3465 __ GetCFunctionDoubleResult(double_result);
3466 }
3455 __ sdc1(double_result, 3467 __ sdc1(double_result,
3456 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 3468 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3457 __ mov(v0, heapnumber); 3469 __ mov(v0, heapnumber);
3458 __ DropAndRet(2 * kPointerSize); 3470 __ DropAndRet(2 * kPointerSize);
3459 } 3471 }
3460 3472
3461 __ bind(&call_runtime); 3473 __ bind(&call_runtime);
3462 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 3474 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3463 } 3475 }
3464 3476
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
3621 // instead of a proper result. The builtin entry handles 3633 // instead of a proper result. The builtin entry handles
3622 // this by performing a garbage collection and retrying the 3634 // this by performing a garbage collection and retrying the
3623 // builtin once. 3635 // builtin once.
3624 3636
3625 // Compute the argv pointer in a callee-saved register. 3637 // Compute the argv pointer in a callee-saved register.
3626 __ sll(s1, a0, kPointerSizeLog2); 3638 __ sll(s1, a0, kPointerSizeLog2);
3627 __ Addu(s1, sp, s1); 3639 __ Addu(s1, sp, s1);
3628 __ Subu(s1, s1, Operand(kPointerSize)); 3640 __ Subu(s1, s1, Operand(kPointerSize));
3629 3641
3630 // Enter the exit frame that transitions from JavaScript to C++. 3642 // Enter the exit frame that transitions from JavaScript to C++.
3643 FrameScope scope(masm, StackFrame::MANUAL);
3631 __ EnterExitFrame(save_doubles_); 3644 __ EnterExitFrame(save_doubles_);
3632 3645
3633 // Setup argc and the builtin function in callee-saved registers. 3646 // Setup argc and the builtin function in callee-saved registers.
3634 __ mov(s0, a0); 3647 __ mov(s0, a0);
3635 __ mov(s2, a1); 3648 __ mov(s2, a1);
3636 3649
3637 // s0: number of arguments (C callee-saved) 3650 // s0: number of arguments (C callee-saved)
3638 // s1: pointer to first argument (C callee-saved) 3651 // s1: pointer to first argument (C callee-saved)
3639 // s2: pointer to builtin function (C callee-saved) 3652 // s2: pointer to builtin function (C callee-saved)
3640 3653
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
3979 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 3992 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3980 3993
3981 // Slow-case. Tail call builtin. 3994 // Slow-case. Tail call builtin.
3982 __ bind(&slow); 3995 __ bind(&slow);
3983 if (!ReturnTrueFalseObject()) { 3996 if (!ReturnTrueFalseObject()) {
3984 if (HasArgsInRegisters()) { 3997 if (HasArgsInRegisters()) {
3985 __ Push(a0, a1); 3998 __ Push(a0, a1);
3986 } 3999 }
3987 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 4000 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3988 } else { 4001 } else {
3989 __ EnterInternalFrame(); 4002 {
3990 __ Push(a0, a1); 4003 FrameScope scope(masm, StackFrame::INTERNAL);
3991 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 4004 __ Push(a0, a1);
3992 __ LeaveInternalFrame(); 4005 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4006 }
3993 __ mov(a0, v0); 4007 __ mov(a0, v0);
3994 __ LoadRoot(v0, Heap::kTrueValueRootIndex); 4008 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
3995 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); 4009 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
3996 __ LoadRoot(v0, Heap::kFalseValueRootIndex); 4010 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
3997 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4011 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3998 } 4012 }
3999 } 4013 }
4000 4014
4001 4015
4002 Register InstanceofStub::left() { return a0; } 4016 Register InstanceofStub::left() { return a0; }
(...skipping 2636 matching lines...) Expand 10 before | Expand all | Expand 10 after
6639 } 6653 }
6640 6654
6641 6655
6642 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 6656 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6643 __ Push(a1, a0); 6657 __ Push(a1, a0);
6644 __ push(ra); 6658 __ push(ra);
6645 6659
6646 // Call the runtime system in a fresh internal frame. 6660 // Call the runtime system in a fresh internal frame.
6647 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), 6661 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6648 masm->isolate()); 6662 masm->isolate());
6649 __ EnterInternalFrame(); 6663 {
6650 __ Push(a1, a0); 6664 FrameScope scope(masm, StackFrame::INTERNAL);
6651 __ li(t0, Operand(Smi::FromInt(op_))); 6665 __ Push(a1, a0);
6652 __ push(t0); 6666 __ li(t0, Operand(Smi::FromInt(op_)));
6653 __ CallExternalReference(miss, 3); 6667 __ push(t0);
6654 __ LeaveInternalFrame(); 6668 __ CallExternalReference(miss, 3);
6669 }
6655 // Compute the entry point of the rewritten stub. 6670 // Compute the entry point of the rewritten stub.
6656 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 6671 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6657 // Restore registers. 6672 // Restore registers.
6658 __ pop(ra); 6673 __ pop(ra);
6659 __ pop(a0); 6674 __ pop(a0);
6660 __ pop(a1); 6675 __ pop(a1);
6661 __ Jump(a2); 6676 __ Jump(a2);
6662 } 6677 }
6663 6678
6664 6679
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
6860 __ CallStub(&stub); 6875 __ CallStub(&stub);
6861 __ mov(scratch2, a2); 6876 __ mov(scratch2, a2);
6862 __ MultiPop(spill_mask); 6877 __ MultiPop(spill_mask);
6863 6878
6864 __ Branch(done, ne, v0, Operand(zero_reg)); 6879 __ Branch(done, ne, v0, Operand(zero_reg));
6865 __ Branch(miss, eq, v0, Operand(zero_reg)); 6880 __ Branch(miss, eq, v0, Operand(zero_reg));
6866 } 6881 }
6867 6882
6868 6883
6869 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 6884 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6885 // This stub overrides SometimesSetsUpAFrame() to return false. That means
6886 // we cannot call anything that could cause a GC from this stub.
6870 // Registers: 6887 // Registers:
6871 // result: StringDictionary to probe 6888 // result: StringDictionary to probe
6872 // a1: key 6889 // a1: key
6873 // : StringDictionary to probe. 6890 // : StringDictionary to probe.
6874 // index_: will hold an index of entry if lookup is successful. 6891 // index_: will hold an index of entry if lookup is successful.
6875 // might alias with result_. 6892 // might alias with result_.
6876 // Returns: 6893 // Returns:
6877 // result_ is zero if lookup failed, non zero otherwise. 6894 // result_ is zero if lookup failed, non zero otherwise.
6878 6895
6879 Register result = v0; 6896 Register result = v0;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
6958 __ mov(result, zero_reg); 6975 __ mov(result, zero_reg);
6959 __ Ret(); 6976 __ Ret();
6960 } 6977 }
6961 6978
6962 6979
6963 #undef __ 6980 #undef __
6964 6981
6965 } } // namespace v8::internal 6982 } } // namespace v8::internal
6966 6983
6967 #endif // V8_TARGET_ARCH_MIPS 6984 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698