Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 248 Label* then_label) { | 248 Label* then_label) { |
| 249 Label ok; | 249 Label ok; |
| 250 testl(result, result); | 250 testl(result, result); |
| 251 j(not_zero, &ok); | 251 j(not_zero, &ok); |
| 252 testl(op, op); | 252 testl(op, op); |
| 253 j(sign, then_label); | 253 j(sign, then_label); |
| 254 bind(&ok); | 254 bind(&ok); |
| 255 } | 255 } |
| 256 | 256 |
| 257 | 257 |
| 258 void MacroAssembler::DoubleToInteger32(Register dst, | |
| 259 Register src, | |
| 260 Register scratch) { | |
| 261 Label no_bits_in_range, left_shift, apply_sign, positive_sign, done; | |
| 262 ASSERT(!src.is(scratch)); | |
| 263 ASSERT(!src.is(kScratchRegister)); | |
| 264 ASSERT(!scratch.is(kScratchRegister)); | |
| 265 | |
| 266 movq(scratch, src); | |
| 267 movq(rcx, src); | |
| 268 shr(rcx, Immediate(52)); // Move exponent to bottom of register. | |
| 269 andl(rcx, Immediate(0x7ff)); // sign bit be gone. | |
| 270 subl(rcx, Immediate(0x3ff)); // bias substracted. | |
| 271 // If exponent in range 0..83, there is a significant bit in | |
| 272 // the 0..2^32 range. | |
| 273 cmpl(rcx, Immediate(52 + 31)); | |
| 274 j(above, &no_bits_in_range); | |
| 275 subl(rcx, Immediate(52)); | |
| 276 j(greater_equal, &left_shift); | |
| 277 // right shift by 52-(e-bias) | |
| 278 negl(rcx); | |
| 279 shr(scratch); | |
| 280 cmpl(rcx, Immediate(20)); | |
| 281 j(less_equal, &apply_sign); | |
| 282 | |
| 283 // Add implicit leading one. | |
| 284 subl(rcx, Immediate(52)); // Get back original exponent. | |
| 285 negl(rcx); | |
| 286 movl(kScratchRegister, Immediate(1)); | |
| 287 shll(kScratchRegister); | |
| 288 orl(scratch, kScratchRegister); | |
| 289 // Mask away exponent and sign bits. | |
| 290 addl(kScratchRegister, kScratchRegister); | |
| 291 subl(kScratchRegister, Immediate(1)); | |
|
Lasse Reichstein
2009/10/07 16:54:24
Just noticed that the previous two lines could be
| |
| 292 andl(scratch, kScratchRegister); | |
| 293 jmp(&apply_sign); | |
| 294 | |
| 295 bind(&no_bits_in_range); | |
| 296 // All significant digits of the number is either below the | |
| 297 // decimal point or at or above 2^32 (or the value is an infinity | |
| 298 // or NaN). Return zero in all these cases. | |
| 299 xor_(dst, dst); | |
| 300 jmp(&done); | |
| 301 | |
| 302 bind(&left_shift); | |
| 303 // Shift left by e-bias-52. | |
| 304 shll(scratch); | |
| 305 | |
| 306 bind(&apply_sign); | |
| 307 testq(src, src); | |
| 308 j(positive, &positive_sign); | |
| 309 neg(scratch); | |
| 310 bind(&positive_sign); | |
| 311 if (!dst.is(scratch)) { | |
| 312 movl(dst, scratch); | |
| 313 } | |
| 314 bind(&done); | |
| 315 } | |
| 316 | |
| 317 | |
| 258 void MacroAssembler::Abort(const char* msg) { | 318 void MacroAssembler::Abort(const char* msg) { |
| 259 // We want to pass the msg string like a smi to avoid GC | 319 // We want to pass the msg string like a smi to avoid GC |
| 260 // problems, however msg is not guaranteed to be aligned | 320 // problems, however msg is not guaranteed to be aligned |
| 261 // properly. Instead, we pass an aligned pointer that is | 321 // properly. Instead, we pass an aligned pointer that is |
| 262 // a proper v8 smi, but also pass the alignment difference | 322 // a proper v8 smi, but also pass the alignment difference |
| 263 // from the real pointer as a smi. | 323 // from the real pointer as a smi. |
| 264 intptr_t p1 = reinterpret_cast<intptr_t>(msg); | 324 intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
| 265 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; | 325 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
| 266 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag. | 326 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag. |
| 267 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); | 327 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
| (...skipping 1887 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2155 // Indicate that code has changed. | 2215 // Indicate that code has changed. |
| 2156 CPU::FlushICache(address_, size_); | 2216 CPU::FlushICache(address_, size_); |
| 2157 | 2217 |
| 2158 // Check that the code was patched as expected. | 2218 // Check that the code was patched as expected. |
| 2159 ASSERT(masm_.pc_ == address_ + size_); | 2219 ASSERT(masm_.pc_ == address_ + size_); |
| 2160 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2220 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2161 } | 2221 } |
| 2162 | 2222 |
| 2163 | 2223 |
| 2164 } } // namespace v8::internal | 2224 } } // namespace v8::internal |
| OLD | NEW |