| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1199 t.sub_result = 0; | 1199 t.sub_result = 0; |
| 1200 t.mul_result = 0; | 1200 t.mul_result = 0; |
| 1201 t.div_result = 0; | 1201 t.div_result = 0; |
| 1202 Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); | 1202 Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); |
| 1203 USE(dummy); | 1203 USE(dummy); |
| 1204 const uint32_t kArmNanUpper32 = 0x7ff80000; | 1204 const uint32_t kArmNanUpper32 = 0x7ff80000; |
| 1205 const uint32_t kArmNanLower32 = 0x00000000; | 1205 const uint32_t kArmNanLower32 = 0x00000000; |
| 1206 #ifdef DEBUG | 1206 #ifdef DEBUG |
| 1207 const uint64_t kArmNanInt64 = | 1207 const uint64_t kArmNanInt64 = |
| 1208 (static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32; | 1208 (static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32; |
| 1209 DCHECK(kArmNanInt64 != kHoleNanInt64); | 1209 CHECK(kArmNanInt64 != kHoleNanInt64); |
| 1210 #endif | 1210 #endif |
| 1211 // With VFP2 the sign of the canonicalized Nan is undefined. So | 1211 // With VFP2 the sign of the canonicalized Nan is undefined. So |
| 1212 // we remove the sign bit for the upper tests. | 1212 // we remove the sign bit for the upper tests. |
| 1213 CHECK_EQ(kArmNanUpper32, | 1213 CHECK_EQ(kArmNanUpper32, |
| 1214 (bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff); | 1214 (bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff); |
| 1215 CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu); | 1215 CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu); |
| 1216 CHECK_EQ(kArmNanUpper32, | 1216 CHECK_EQ(kArmNanUpper32, |
| 1217 (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff); | 1217 (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff); |
| 1218 CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu); | 1218 CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu); |
| 1219 CHECK_EQ(kArmNanUpper32, | 1219 CHECK_EQ(kArmNanUpper32, |
| (...skipping 935 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2155 HandleScope scope(isolate); | 2155 HandleScope scope(isolate); |
| 2156 | 2156 |
| 2157 Assembler assm(isolate, NULL, 0); | 2157 Assembler assm(isolate, NULL, 0); |
| 2158 __ mov(r0, Operand(isolate->factory()->infinity_value())); | 2158 __ mov(r0, Operand(isolate->factory()->infinity_value())); |
| 2159 __ BlockConstPoolFor(1019); | 2159 __ BlockConstPoolFor(1019); |
| 2160 for (int i = 0; i < 1019; ++i) __ nop(); | 2160 for (int i = 0; i < 1019; ++i) __ nop(); |
| 2161 __ vldr(d0, MemOperand(r0, 0)); | 2161 __ vldr(d0, MemOperand(r0, 0)); |
| 2162 } | 2162 } |
| 2163 | 2163 |
| 2164 #undef __ | 2164 #undef __ |
| OLD | NEW |