Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(842)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 12393008: [v8-dev] Split and replace the EmitVFPTruncate routine to only do what is needed. Floor (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2117 matching lines...) Expand 10 before | Expand all | Expand 10 after
2128 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); 2128 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2129 2129
2130 // If the argument is a smi, just return. 2130 // If the argument is a smi, just return.
2131 STATIC_ASSERT(kSmiTag == 0); 2131 STATIC_ASSERT(kSmiTag == 0);
2132 __ tst(r0, Operand(kSmiTagMask)); 2132 __ tst(r0, Operand(kSmiTagMask));
2133 __ Drop(argc + 1, eq); 2133 __ Drop(argc + 1, eq);
2134 __ Ret(eq); 2134 __ Ret(eq);
2135 2135
2136 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); 2136 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2137 2137
2138 Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; 2138 Label smi_check, just_return;
2139
2140 // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
2141 // minus infinity) mode.
2142 2139
2143 // Load the HeapNumber value. 2140 // Load the HeapNumber value.
2144 // We will need access to the value in the core registers, so we load it 2141 // We will need access to the value in the core registers, so we load it
2145 // with ldrd and move it to the fpu. It also spares a sub instruction for 2142 // with ldrd and move it to the fpu. It also spares a sub instruction for
2146 // updating the HeapNumber value address, as vldr expects a multiple 2143 // updating the HeapNumber value address, as vldr expects a multiple
2147 // of 4 offset. 2144 // of 4 offset.
2148 __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); 2145 __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
2149 __ vmov(d1, r4, r5); 2146 __ vmov(d1, r4, r5);
2150 2147
2151 // Backup FPSCR. 2148 // Check for NaN, Infinities and -0.
2152 __ vmrs(r3);
2153 // Set custom FPCSR:
2154 // - Set rounding mode to "Round towards Minus Infinity"
2155 // (i.e. bits [23:22] = 0b10).
2156 // - Clear vfp cumulative exception flags (bits [3:0]).
2157 // - Make sure Flush-to-zero mode control bit is unset (bit 22).
2158 __ bic(r9, r3,
2159 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
2160 __ orr(r9, r9, Operand(kRoundToMinusInf));
2161 __ vmsr(r9);
2162
2163 // Convert the argument to an integer.
2164 __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
2165
2166 // Use vcvt latency to start checking for special cases.
2167 // Get the argument exponent and clear the sign bit.
2168 __ bic(r6, r5, Operand(HeapNumber::kSignMask));
2169 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
2170
2171 // Retrieve FPSCR and check for vfp exceptions.
2172 __ vmrs(r9);
2173 __ tst(r9, Operand(kVFPExceptionMask));
2174 __ b(&no_vfp_exception, eq);
2175
2176 // Check for NaN, Infinity, and -Infinity.
2177 // They are invariant through a Math.Floor call, so just 2149 // They are invariant through a Math.Floor call, so just
2178 // return the original argument. 2150 // return the original argument.
2179 __ sub(r7, r6, Operand(HeapNumber::kExponentMask 2151 __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2180 >> HeapNumber::kMantissaBitsInTopWord), SetCC); 2152 __ cmp(r3, Operand(-1));
2181 __ b(&restore_fpscr_and_return, eq); 2153 __ b(eq, &just_return);
2182 // We had an overflow or underflow in the conversion. Check if we 2154 __ eor(r3, r5, Operand(0x80000000u));
2183 // have a big exponent. 2155 __ orr(r3, r3, r4, SetCC);
2184 __ cmp(r7, Operand(HeapNumber::kMantissaBits)); 2156 __ b(eq, &just_return);
2185 // If greater or equal, the argument is already round and in r0. 2157 // Test for values that can be exactly represented as a
2186 __ b(&restore_fpscr_and_return, ge); 2158 // signed 32-bit integer.
2187 __ b(&wont_fit_smi); 2159 __ TryDoubleToInt32Exact(r0, d1, d2);
2160 // If exact, check smi
2161 __ b(eq, &smi_check);
2162 __ cmp(r5, Operand(0));
2188 2163
2189 __ bind(&no_vfp_exception); 2164 // If input is in ]+0, +inf[, the cmp has clear overflow and negative
hans 2013/03/01 14:47:54 ultra nit: "has cleared"?
2190 // Move the result back to general purpose register r0. 2165 // (V=0 and N=0), the two following instructions won't execute and
2191 __ vmov(r0, s0); 2166 // we fall through smi_check to check if the result can fit into an smi.
2192 // Check if the result fits into a smi. 2167
2168 // If input is in ]-inf, -0[, sub one and, go to slow if we have
2169 // an overflow. Else we fall through smi check.
2170 // Hint: if x is a negative, non integer number,
2171 // floor(x) <=> round_to_zero(x) - 1.
2172 __ sub(r0, r0, Operand(1), SetCC, mi);
2173 __ b(vs, &slow);
2174
2175 __ bind(&smi_check);
2176 // Check if the result can fit into an smi. If we had an overflow,
2177 // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
2193 __ add(r1, r0, Operand(0x40000000), SetCC); 2178 __ add(r1, r0, Operand(0x40000000), SetCC);
2194 __ b(&wont_fit_smi, mi); 2179 // If result doesn't fit into an smi, branch to slow.
2180 __ b(&slow, mi);
2195 // Tag the result. 2181 // Tag the result.
2196 STATIC_ASSERT(kSmiTag == 0);
2197 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 2182 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2198 2183
2199 // Check for -0. 2184 __ bind(&just_return);
2200 __ cmp(r0, Operand::Zero());
2201 __ b(&restore_fpscr_and_return, ne);
2202 // r5 already holds the HeapNumber exponent.
2203 __ tst(r5, Operand(HeapNumber::kSignMask));
2204 // If our HeapNumber is negative it was -0, so load its address and return.
2205 // Else r0 is loaded with 0, so we can also just return.
2206 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
2207
2208 __ bind(&restore_fpscr_and_return);
2209 // Restore FPSCR and return.
2210 __ vmsr(r3);
2211 __ Drop(argc + 1); 2185 __ Drop(argc + 1);
2212 __ Ret(); 2186 __ Ret();
2213 2187
2214 __ bind(&wont_fit_smi);
2215 // Restore FPCSR and fall to slow case.
2216 __ vmsr(r3);
2217
2218 __ bind(&slow); 2188 __ bind(&slow);
2219 // Tail call the full function. We do not have to patch the receiver 2189 // Tail call the full function. We do not have to patch the receiver
2220 // because the function makes no use of it. 2190 // because the function makes no use of it.
2221 __ InvokeFunction( 2191 __ InvokeFunction(
2222 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); 2192 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2223 2193
2224 __ bind(&miss); 2194 __ bind(&miss);
2225 // r2: function name. 2195 // r2: function name.
2226 GenerateMissBranch(); 2196 GenerateMissBranch();
2227 2197
(...skipping 1128 matching lines...) Expand 10 before | Expand all | Expand 10 after
3356 // number and check if the conversion is exact and fits into the smi 3326 // number and check if the conversion is exact and fits into the smi
3357 // range. 3327 // range.
3358 __ JumpIfSmi(key, &key_ok); 3328 __ JumpIfSmi(key, &key_ok);
3359 __ CheckMap(key, 3329 __ CheckMap(key,
3360 scratch0, 3330 scratch0,
3361 Heap::kHeapNumberMapRootIndex, 3331 Heap::kHeapNumberMapRootIndex,
3362 fail, 3332 fail,
3363 DONT_DO_SMI_CHECK); 3333 DONT_DO_SMI_CHECK);
3364 __ sub(ip, key, Operand(kHeapObjectTag)); 3334 __ sub(ip, key, Operand(kHeapObjectTag));
3365 __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); 3335 __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
3366 __ EmitVFPTruncate(kRoundToZero, 3336 __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
3367 scratch0,
3368 double_scratch0,
3369 scratch1,
3370 double_scratch1,
3371 kCheckForInexactConversion);
3372 __ b(ne, fail); 3337 __ b(ne, fail);
3373 __ TrySmiTag(scratch0, fail, scratch1); 3338 __ TrySmiTag(scratch0, fail, scratch1);
3374 __ mov(key, scratch0); 3339 __ mov(key, scratch0);
3375 __ bind(&key_ok); 3340 __ bind(&key_ok);
3376 } else { 3341 } else {
3377 // Check that the key is a smi. 3342 // Check that the key is a smi.
3378 __ JumpIfNotSmi(key, fail); 3343 __ JumpIfNotSmi(key, fail);
3379 } 3344 }
3380 } 3345 }
3381 3346
(...skipping 672 matching lines...) Expand 10 before | Expand all | Expand 10 after
4054 __ Jump(ic_slow, RelocInfo::CODE_TARGET); 4019 __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4055 } 4020 }
4056 } 4021 }
4057 4022
4058 4023
4059 #undef __ 4024 #undef __
4060 4025
4061 } } // namespace v8::internal 4026 } } // namespace v8::internal
4062 4027
4063 #endif // V8_TARGET_ARCH_ARM 4028 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698