Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(311)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 7084032: Add asserts and state tracking to ensure that we do not call (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1599 matching lines...) Expand 10 before | Expand all | Expand 10 after
1610 1610
1611 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1611 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1612 // tagged as a small integer. 1612 // tagged as a small integer.
1613 __ InvokeBuiltin(native, JUMP_FUNCTION); 1613 __ InvokeBuiltin(native, JUMP_FUNCTION);
1614 } 1614 }
1615 1615
1616 1616
1617 // This stub does not handle the inlined cases (Smis, Booleans, undefined). 1617 // This stub does not handle the inlined cases (Smis, Booleans, undefined).
1618 // The stub returns zero for false, and a non-zero value for true. 1618 // The stub returns zero for false, and a non-zero value for true.
1619 void ToBooleanStub::Generate(MacroAssembler* masm) { 1619 void ToBooleanStub::Generate(MacroAssembler* masm) {
1620 // This stub overrides SometimesSetsUpAFrame() to return false. That means
1621 // we cannot call anything that could cause a GC from this stub.
1620 // This stub uses VFP3 instructions. 1622 // This stub uses VFP3 instructions.
1621 CpuFeatures::Scope scope(VFP3); 1623 CpuFeatures::Scope scope(VFP3);
1622 1624
1623 Label false_result; 1625 Label false_result;
1624 Label not_heap_number; 1626 Label not_heap_number;
1625 Register scratch = r9.is(tos_) ? r7 : r9; 1627 Register scratch = r9.is(tos_) ? r7 : r9;
1626 1628
1627 // undefined -> false 1629 // undefined -> false
1628 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 1630 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1629 __ cmp(tos_, ip); 1631 __ cmp(tos_, ip);
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
1875 if (mode_ == UNARY_OVERWRITE) { 1877 if (mode_ == UNARY_OVERWRITE) {
1876 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 1878 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1877 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. 1879 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1878 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 1880 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1879 } else { 1881 } else {
1880 Label slow_allocate_heapnumber, heapnumber_allocated; 1882 Label slow_allocate_heapnumber, heapnumber_allocated;
1881 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber); 1883 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
1882 __ jmp(&heapnumber_allocated); 1884 __ jmp(&heapnumber_allocated);
1883 1885
1884 __ bind(&slow_allocate_heapnumber); 1886 __ bind(&slow_allocate_heapnumber);
1885 __ EnterInternalFrame(); 1887 {
1886 __ push(r0); 1888 FrameScope scope(masm, StackFrame::INTERNAL);
1887 __ CallRuntime(Runtime::kNumberAlloc, 0); 1889 __ push(r0);
1888 __ mov(r1, Operand(r0)); 1890 __ CallRuntime(Runtime::kNumberAlloc, 0);
1889 __ pop(r0); 1891 __ mov(r1, Operand(r0));
1890 __ LeaveInternalFrame(); 1892 __ pop(r0);
1893 }
1891 1894
1892 __ bind(&heapnumber_allocated); 1895 __ bind(&heapnumber_allocated);
1893 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 1896 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1894 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 1897 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1895 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); 1898 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
1896 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. 1899 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1897 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); 1900 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
1898 __ mov(r0, Operand(r1)); 1901 __ mov(r0, Operand(r1));
1899 } 1902 }
1900 __ Ret(); 1903 __ Ret();
(...skipping 20 matching lines...) Expand all
1921 1924
1922 // Try to store the result in a heap number. 1925 // Try to store the result in a heap number.
1923 __ bind(&try_float); 1926 __ bind(&try_float);
1924 if (mode_ == UNARY_NO_OVERWRITE) { 1927 if (mode_ == UNARY_NO_OVERWRITE) {
1925 Label slow_allocate_heapnumber, heapnumber_allocated; 1928 Label slow_allocate_heapnumber, heapnumber_allocated;
1926 // Allocate a new heap number without zapping r0, which we need if it fails. 1929 // Allocate a new heap number without zapping r0, which we need if it fails.
1927 __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber); 1930 __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
1928 __ jmp(&heapnumber_allocated); 1931 __ jmp(&heapnumber_allocated);
1929 1932
1930 __ bind(&slow_allocate_heapnumber); 1933 __ bind(&slow_allocate_heapnumber);
1931 __ EnterInternalFrame(); 1934 {
1932 __ push(r0); // Push the heap number, not the untagged int32. 1935 FrameScope scope(masm, StackFrame::INTERNAL);
1933 __ CallRuntime(Runtime::kNumberAlloc, 0); 1936 __ push(r0); // Push the heap number, not the untagged int32.
1934 __ mov(r2, r0); // Move the new heap number into r2. 1937 __ CallRuntime(Runtime::kNumberAlloc, 0);
1935 // Get the heap number into r0, now that the new heap number is in r2. 1938 __ mov(r2, r0); // Move the new heap number into r2.
1936 __ pop(r0); 1939 // Get the heap number into r0, now that the new heap number is in r2.
1937 __ LeaveInternalFrame(); 1940 __ pop(r0);
1941 }
1938 1942
1939 // Convert the heap number in r0 to an untagged integer in r1. 1943 // Convert the heap number in r0 to an untagged integer in r1.
1940 // This can't go slow-case because it's the same number we already 1944 // This can't go slow-case because it's the same number we already
1941 // converted once again. 1945 // converted once again.
1942 __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible); 1946 __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
1943 __ mvn(r1, Operand(r1)); 1947 __ mvn(r1, Operand(r1));
1944 1948
1945 __ bind(&heapnumber_allocated); 1949 __ bind(&heapnumber_allocated);
1946 __ mov(r0, r2); // Move newly allocated heap number to r0. 1950 __ mov(r0, r2); // Move newly allocated heap number to r0.
1947 } 1951 }
(...skipping 1208 matching lines...) Expand 10 before | Expand all | Expand 10 after
3156 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); 3160 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3157 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); 3161 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3158 __ Ret(); 3162 __ Ret();
3159 3163
3160 __ bind(&invalid_cache); 3164 __ bind(&invalid_cache);
3161 // The cache is invalid. Call runtime which will recreate the 3165 // The cache is invalid. Call runtime which will recreate the
3162 // cache. 3166 // cache.
3163 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); 3167 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3164 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); 3168 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3165 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 3169 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3166 __ EnterInternalFrame(); 3170 {
3167 __ push(r0); 3171 FrameScope scope(masm, StackFrame::INTERNAL);
3168 __ CallRuntime(RuntimeFunction(), 1); 3172 __ push(r0);
3169 __ LeaveInternalFrame(); 3173 __ CallRuntime(RuntimeFunction(), 1);
3174 }
3170 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 3175 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3171 __ Ret(); 3176 __ Ret();
3172 3177
3173 __ bind(&skip_cache); 3178 __ bind(&skip_cache);
3174 // Call C function to calculate the result and answer directly 3179 // Call C function to calculate the result and answer directly
3175 // without updating the cache. 3180 // without updating the cache.
3176 GenerateCallCFunction(masm, scratch0); 3181 GenerateCallCFunction(masm, scratch0);
3177 __ GetCFunctionDoubleResult(d2); 3182 __ GetCFunctionDoubleResult(d2);
3178 __ bind(&no_update); 3183 __ bind(&no_update);
3179 3184
3180 // We return the value in d2 without adding it to the cache, but 3185 // We return the value in d2 without adding it to the cache, but
3181 // we cause a scavenging GC so that future allocations will succeed. 3186 // we cause a scavenging GC so that future allocations will succeed.
3182 __ EnterInternalFrame(); 3187 {
3188 FrameScope scope(masm, StackFrame::INTERNAL);
3183 3189
3184 // Allocate an aligned object larger than a HeapNumber. 3190 // Allocate an aligned object larger than a HeapNumber.
3185 ASSERT(4 * kPointerSize >= HeapNumber::kSize); 3191 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3186 __ mov(scratch0, Operand(4 * kPointerSize)); 3192 __ mov(scratch0, Operand(4 * kPointerSize));
3187 __ push(scratch0); 3193 __ push(scratch0);
3188 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 3194 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3189 __ LeaveInternalFrame(); 3195 }
3190 __ Ret(); 3196 __ Ret();
3191 } 3197 }
3192 } 3198 }
3193 3199
3194 3200
3195 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 3201 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3196 Register scratch) { 3202 Register scratch) {
3197 Isolate* isolate = masm->isolate(); 3203 Isolate* isolate = masm->isolate();
3198 3204
3199 __ push(lr); 3205 __ push(lr);
(...skipping 662 matching lines...) Expand 10 before | Expand all | Expand 10 after
3862 __ Ret(HasArgsInRegisters() ? 0 : 2); 3868 __ Ret(HasArgsInRegisters() ? 0 : 2);
3863 3869
3864 // Slow-case. Tail call builtin. 3870 // Slow-case. Tail call builtin.
3865 __ bind(&slow); 3871 __ bind(&slow);
3866 if (!ReturnTrueFalseObject()) { 3872 if (!ReturnTrueFalseObject()) {
3867 if (HasArgsInRegisters()) { 3873 if (HasArgsInRegisters()) {
3868 __ Push(r0, r1); 3874 __ Push(r0, r1);
3869 } 3875 }
3870 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 3876 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3871 } else { 3877 } else {
3872 __ EnterInternalFrame(); 3878 {
3873 __ Push(r0, r1); 3879 FrameScope scope(masm, StackFrame::INTERNAL);
3874 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 3880 __ Push(r0, r1);
3875 __ LeaveInternalFrame(); 3881 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
3882 }
3876 __ cmp(r0, Operand(0)); 3883 __ cmp(r0, Operand(0));
3877 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); 3884 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
3878 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); 3885 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
3879 __ Ret(HasArgsInRegisters() ? 0 : 2); 3886 __ Ret(HasArgsInRegisters() ? 0 : 2);
3880 } 3887 }
3881 } 3888 }
3882 3889
3883 3890
3884 Register InstanceofStub::left() { return r0; } 3891 Register InstanceofStub::left() { return r0; }
3885 3892
(...skipping 2244 matching lines...) Expand 10 before | Expand all | Expand 10 after
6130 } 6137 }
6131 6138
6132 6139
6133 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 6140 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6134 __ Push(r1, r0); 6141 __ Push(r1, r0);
6135 __ push(lr); 6142 __ push(lr);
6136 6143
6137 // Call the runtime system in a fresh internal frame. 6144 // Call the runtime system in a fresh internal frame.
6138 ExternalReference miss = 6145 ExternalReference miss =
6139 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); 6146 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
6140 __ EnterInternalFrame(); 6147 {
6141 __ Push(r1, r0); 6148 FrameScope scope(masm, StackFrame::INTERNAL);
6142 __ mov(ip, Operand(Smi::FromInt(op_))); 6149 __ Push(r1, r0);
6143 __ push(ip); 6150 __ mov(ip, Operand(Smi::FromInt(op_)));
6144 __ CallExternalReference(miss, 3); 6151 __ push(ip);
6145 __ LeaveInternalFrame(); 6152 __ CallExternalReference(miss, 3);
6153 }
6146 // Compute the entry point of the rewritten stub. 6154 // Compute the entry point of the rewritten stub.
6147 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); 6155 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6148 // Restore registers. 6156 // Restore registers.
6149 __ pop(lr); 6157 __ pop(lr);
6150 __ pop(r0); 6158 __ pop(r0);
6151 __ pop(r1); 6159 __ pop(r1);
6152 __ Jump(r2); 6160 __ Jump(r2);
6153 } 6161 }
6154 6162
6155 6163
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
6316 __ tst(r0, Operand(r0)); 6324 __ tst(r0, Operand(r0));
6317 __ mov(scratch2, Operand(r2)); 6325 __ mov(scratch2, Operand(r2));
6318 __ ldm(ia_w, sp, spill_mask); 6326 __ ldm(ia_w, sp, spill_mask);
6319 6327
6320 __ b(ne, done); 6328 __ b(ne, done);
6321 __ b(eq, miss); 6329 __ b(eq, miss);
6322 } 6330 }
6323 6331
6324 6332
6325 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 6333 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6334 // This stub overrides SometimesSetsUpAFrame() to return false. That means
6335 // we cannot call anything that could cause a GC from this stub.
6326 // Registers: 6336 // Registers:
6327 // result: StringDictionary to probe 6337 // result: StringDictionary to probe
6328 // r1: key 6338 // r1: key
6329 // : StringDictionary to probe. 6339 // : StringDictionary to probe.
6330 // index_: will hold an index of entry if lookup is successful. 6340 // index_: will hold an index of entry if lookup is successful.
6331 // might alias with result_. 6341 // might alias with result_.
6332 // Returns: 6342 // Returns:
6333 // result_ is zero if lookup failed, non zero otherwise. 6343 // result_ is zero if lookup failed, non zero otherwise.
6334 6344
6335 Register result = r0; 6345 Register result = r0;
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
6410 __ mov(result, Operand(0)); 6420 __ mov(result, Operand(0));
6411 __ Ret(); 6421 __ Ret();
6412 } 6422 }
6413 6423
6414 6424
6415 #undef __ 6425 #undef __
6416 6426
6417 } } // namespace v8::internal 6427 } } // namespace v8::internal
6418 6428
6419 #endif // V8_TARGET_ARCH_ARM 6429 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698