| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 187 #else | 187 #else |
| 188 __ stwu(sp, MemOperand(sp, -16)); | 188 __ stwu(sp, MemOperand(sp, -16)); |
| 189 __ stw(fp, MemOperand(sp, 12)); | 189 __ stw(fp, MemOperand(sp, 12)); |
| 190 #endif | 190 #endif |
| 191 __ mr(fp, sp); | 191 __ mr(fp, sp); |
| 192 | 192 |
| 193 // r4 points to our struct | 193 // r4 points to our struct |
| 194 __ mr(r4, r3); | 194 __ mr(r4, r3); |
| 195 | 195 |
| 196 // modify field int i of struct | 196 // modify field int i of struct |
| 197 __ lwz(r3, MemOperand(r4, OFFSET_OF(T, i))); | 197 __ lwz(r3, MemOperand(r4, offsetof(T, i))); |
| 198 __ srwi(r5, r3, Operand(1)); | 198 __ srwi(r5, r3, Operand(1)); |
| 199 __ stw(r5, MemOperand(r4, OFFSET_OF(T, i))); | 199 __ stw(r5, MemOperand(r4, offsetof(T, i))); |
| 200 | 200 |
| 201 // modify field char c of struct | 201 // modify field char c of struct |
| 202 __ lbz(r5, MemOperand(r4, OFFSET_OF(T, c))); | 202 __ lbz(r5, MemOperand(r4, offsetof(T, c))); |
| 203 __ add(r3, r5, r3); | 203 __ add(r3, r5, r3); |
| 204 __ slwi(r5, r5, Operand(2)); | 204 __ slwi(r5, r5, Operand(2)); |
| 205 __ stb(r5, MemOperand(r4, OFFSET_OF(T, c))); | 205 __ stb(r5, MemOperand(r4, offsetof(T, c))); |
| 206 | 206 |
| 207 // modify field int16_t s of struct | 207 // modify field int16_t s of struct |
| 208 __ lhz(r5, MemOperand(r4, OFFSET_OF(T, s))); | 208 __ lhz(r5, MemOperand(r4, offsetof(T, s))); |
| 209 __ add(r3, r5, r3); | 209 __ add(r3, r5, r3); |
| 210 __ srwi(r5, r5, Operand(3)); | 210 __ srwi(r5, r5, Operand(3)); |
| 211 __ sth(r5, MemOperand(r4, OFFSET_OF(T, s))); | 211 __ sth(r5, MemOperand(r4, offsetof(T, s))); |
| 212 | 212 |
| 213 // restore frame | 213 // restore frame |
| 214 #if V8_TARGET_ARCH_PPC64 | 214 #if V8_TARGET_ARCH_PPC64 |
| 215 __ addi(r11, fp, Operand(32)); | 215 __ addi(r11, fp, Operand(32)); |
| 216 __ ld(fp, MemOperand(r11, -8)); | 216 __ ld(fp, MemOperand(r11, -8)); |
| 217 #else | 217 #else |
| 218 __ addi(r11, fp, Operand(16)); | 218 __ addi(r11, fp, Operand(16)); |
| 219 __ lwz(fp, MemOperand(r11, -4)); | 219 __ lwz(fp, MemOperand(r11, -4)); |
| 220 #endif | 220 #endif |
| 221 __ mr(sp, r11); | 221 __ mr(sp, r11); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 271 Label L, C; | 271 Label L, C; |
| 272 | 272 |
| 273 if (CpuFeatures::IsSupported(VFP3)) { | 273 if (CpuFeatures::IsSupported(VFP3)) { |
| 274 CpuFeatures::Scope scope(VFP3); | 274 CpuFeatures::Scope scope(VFP3); |
| 275 | 275 |
| 276 __ mov(ip, Operand(sp)); | 276 __ mov(ip, Operand(sp)); |
| 277 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); | 277 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); |
| 278 __ sub(fp, ip, Operand(4)); | 278 __ sub(fp, ip, Operand(4)); |
| 279 | 279 |
| 280 __ mov(r4, Operand(r0)); | 280 __ mov(r4, Operand(r0)); |
| 281 __ vldr(d6, r4, OFFSET_OF(T, a)); | 281 __ vldr(d6, r4, offsetof(T, a)); |
| 282 __ vldr(d7, r4, OFFSET_OF(T, b)); | 282 __ vldr(d7, r4, offsetof(T, b)); |
| 283 __ vadd(d5, d6, d7); | 283 __ vadd(d5, d6, d7); |
| 284 __ vstr(d5, r4, OFFSET_OF(T, c)); | 284 __ vstr(d5, r4, offsetof(T, c)); |
| 285 | 285 |
| 286 __ vmov(r2, r3, d5); | 286 __ vmov(r2, r3, d5); |
| 287 __ vmov(d4, r2, r3); | 287 __ vmov(d4, r2, r3); |
| 288 __ vstr(d4, r4, OFFSET_OF(T, b)); | 288 __ vstr(d4, r4, offsetof(T, b)); |
| 289 | 289 |
| 290 // Load t.x and t.y, switch values, and store back to the struct. | 290 // Load t.x and t.y, switch values, and store back to the struct. |
| 291 __ vldr(s0, r4, OFFSET_OF(T, x)); | 291 __ vldr(s0, r4, offsetof(T, x)); |
| 292 __ vldr(s31, r4, OFFSET_OF(T, y)); | 292 __ vldr(s31, r4, offsetof(T, y)); |
| 293 __ vmov(s16, s0); | 293 __ vmov(s16, s0); |
| 294 __ vmov(s0, s31); | 294 __ vmov(s0, s31); |
| 295 __ vmov(s31, s16); | 295 __ vmov(s31, s16); |
| 296 __ vstr(s0, r4, OFFSET_OF(T, x)); | 296 __ vstr(s0, r4, offsetof(T, x)); |
| 297 __ vstr(s31, r4, OFFSET_OF(T, y)); | 297 __ vstr(s31, r4, offsetof(T, y)); |
| 298 | 298 |
| 299 // Move a literal into a register that can be encoded in the instruction. | 299 // Move a literal into a register that can be encoded in the instruction. |
| 300 __ vmov(d4, 1.0); | 300 __ vmov(d4, 1.0); |
| 301 __ vstr(d4, r4, OFFSET_OF(T, e)); | 301 __ vstr(d4, r4, offsetof(T, e)); |
| 302 | 302 |
| 303 // Move a literal into a register that requires 64 bits to encode. | 303 // Move a literal into a register that requires 64 bits to encode. |
| 304 // 0x3ff0000010000000 = 1.000000059604644775390625 | 304 // 0x3ff0000010000000 = 1.000000059604644775390625 |
| 305 __ vmov(d4, 1.000000059604644775390625); | 305 __ vmov(d4, 1.000000059604644775390625); |
| 306 __ vstr(d4, r4, OFFSET_OF(T, d)); | 306 __ vstr(d4, r4, offsetof(T, d)); |
| 307 | 307 |
| 308 // Convert from floating point to integer. | 308 // Convert from floating point to integer. |
| 309 __ vmov(d4, 2.0); | 309 __ vmov(d4, 2.0); |
| 310 __ vcvt_s32_f64(s31, d4); | 310 __ vcvt_s32_f64(s31, d4); |
| 311 __ vstr(s31, r4, OFFSET_OF(T, i)); | 311 __ vstr(s31, r4, offsetof(T, i)); |
| 312 | 312 |
| 313 // Convert from integer to floating point. | 313 // Convert from integer to floating point. |
| 314 __ mov(lr, Operand(42)); | 314 __ mov(lr, Operand(42)); |
| 315 __ vmov(s31, lr); | 315 __ vmov(s31, lr); |
| 316 __ vcvt_f64_s32(d4, s31); | 316 __ vcvt_f64_s32(d4, s31); |
| 317 __ vstr(d4, r4, OFFSET_OF(T, f)); | 317 __ vstr(d4, r4, offsetof(T, f)); |
| 318 | 318 |
| 319 // Test vabs. | 319 // Test vabs. |
| 320 __ vldr(d1, r4, OFFSET_OF(T, g)); | 320 __ vldr(d1, r4, offsetof(T, g)); |
| 321 __ vabs(d0, d1); | 321 __ vabs(d0, d1); |
| 322 __ vstr(d0, r4, OFFSET_OF(T, g)); | 322 __ vstr(d0, r4, offsetof(T, g)); |
| 323 __ vldr(d2, r4, OFFSET_OF(T, h)); | 323 __ vldr(d2, r4, offsetof(T, h)); |
| 324 __ vabs(d0, d2); | 324 __ vabs(d0, d2); |
| 325 __ vstr(d0, r4, OFFSET_OF(T, h)); | 325 __ vstr(d0, r4, offsetof(T, h)); |
| 326 | 326 |
| 327 // Test vneg. | 327 // Test vneg. |
| 328 __ vldr(d1, r4, OFFSET_OF(T, m)); | 328 __ vldr(d1, r4, offsetof(T, m)); |
| 329 __ vneg(d0, d1); | 329 __ vneg(d0, d1); |
| 330 __ vstr(d0, r4, OFFSET_OF(T, m)); | 330 __ vstr(d0, r4, offsetof(T, m)); |
| 331 __ vldr(d1, r4, OFFSET_OF(T, n)); | 331 __ vldr(d1, r4, offsetof(T, n)); |
| 332 __ vneg(d0, d1); | 332 __ vneg(d0, d1); |
| 333 __ vstr(d0, r4, OFFSET_OF(T, n)); | 333 __ vstr(d0, r4, offsetof(T, n)); |
| 334 | 334 |
| 335 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); | 335 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); |
| 336 | 336 |
| 337 CodeDesc desc; | 337 CodeDesc desc; |
| 338 assm.GetCode(&desc); | 338 assm.GetCode(&desc); |
| 339 Object* code = isolate->heap()->CreateCode( | 339 Object* code = isolate->heap()->CreateCode( |
| 340 desc, | 340 desc, |
| 341 Code::ComputeFlags(Code::STUB), | 341 Code::ComputeFlags(Code::STUB), |
| 342 Handle<Code>())->ToObjectChecked(); | 342 Handle<Code>())->ToObjectChecked(); |
| 343 CHECK(code->IsCode()); | 343 CHECK(code->IsCode()); |
| (...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 670 // single precision values around in memory. | 670 // single precision values around in memory. |
| 671 Assembler assm(isolate, NULL, 0); | 671 Assembler assm(isolate, NULL, 0); |
| 672 | 672 |
| 673 if (CpuFeatures::IsSupported(VFP2)) { | 673 if (CpuFeatures::IsSupported(VFP2)) { |
| 674 CpuFeatures::Scope scope(VFP2); | 674 CpuFeatures::Scope scope(VFP2); |
| 675 | 675 |
| 676 __ mov(ip, Operand(sp)); | 676 __ mov(ip, Operand(sp)); |
| 677 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); | 677 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); |
| 678 __ sub(fp, ip, Operand(4)); | 678 __ sub(fp, ip, Operand(4)); |
| 679 | 679 |
| 680 __ addi(r4, r0, Operand(OFFSET_OF(D, a))); | 680 __ addi(r4, r0, Operand(offsetof(D, a))); |
| 681 __ vldm(ia_w, r4, d0, d3); | 681 __ vldm(ia_w, r4, d0, d3); |
| 682 __ vldm(ia_w, r4, d4, d7); | 682 __ vldm(ia_w, r4, d4, d7); |
| 683 | 683 |
| 684 __ addi(r4, r0, Operand(OFFSET_OF(D, a))); | 684 __ addi(r4, r0, Operand(offsetof(D, a))); |
| 685 __ vstm(ia_w, r4, d6, d7); | 685 __ vstm(ia_w, r4, d6, d7); |
| 686 __ vstm(ia_w, r4, d0, d5); | 686 __ vstm(ia_w, r4, d0, d5); |
| 687 | 687 |
| 688 __ addi(r4, r1, Operand(OFFSET_OF(F, a))); | 688 __ addi(r4, r1, Operand(offsetof(F, a))); |
| 689 __ vldm(ia_w, r4, s0, s3); | 689 __ vldm(ia_w, r4, s0, s3); |
| 690 __ vldm(ia_w, r4, s4, s7); | 690 __ vldm(ia_w, r4, s4, s7); |
| 691 | 691 |
| 692 __ addi(r4, r1, Operand(OFFSET_OF(F, a))); | 692 __ addi(r4, r1, Operand(offsetof(F, a))); |
| 693 __ vstm(ia_w, r4, s6, s7); | 693 __ vstm(ia_w, r4, s6, s7); |
| 694 __ vstm(ia_w, r4, s0, s5); | 694 __ vstm(ia_w, r4, s0, s5); |
| 695 | 695 |
| 696 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); | 696 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); |
| 697 | 697 |
| 698 CodeDesc desc; | 698 CodeDesc desc; |
| 699 assm.GetCode(&desc); | 699 assm.GetCode(&desc); |
| 700 Object* code = isolate->heap()->CreateCode( | 700 Object* code = isolate->heap()->CreateCode( |
| 701 desc, | 701 desc, |
| 702 Code::ComputeFlags(Code::STUB), | 702 Code::ComputeFlags(Code::STUB), |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 782 // single precision values around in memory. | 782 // single precision values around in memory. |
| 783 Assembler assm(isolate, NULL, 0); | 783 Assembler assm(isolate, NULL, 0); |
| 784 | 784 |
| 785 if (CpuFeatures::IsSupported(VFP2)) { | 785 if (CpuFeatures::IsSupported(VFP2)) { |
| 786 CpuFeatures::Scope scope(VFP2); | 786 CpuFeatures::Scope scope(VFP2); |
| 787 | 787 |
| 788 __ mov(ip, Operand(sp)); | 788 __ mov(ip, Operand(sp)); |
| 789 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); | 789 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); |
| 790 __ sub(fp, ip, Operand(4)); | 790 __ sub(fp, ip, Operand(4)); |
| 791 | 791 |
| 792 __ addi(r4, r0, Operand(OFFSET_OF(D, a))); | 792 __ addi(r4, r0, Operand(offsetof(D, a))); |
| 793 __ vldm(ia, r4, d0, d3); | 793 __ vldm(ia, r4, d0, d3); |
| 794 __ addi(r4, r4, Operand(4 * 8)); | 794 __ addi(r4, r4, Operand(4 * 8)); |
| 795 __ vldm(ia, r4, d4, d7); | 795 __ vldm(ia, r4, d4, d7); |
| 796 | 796 |
| 797 __ addi(r4, r0, Operand(OFFSET_OF(D, a))); | 797 __ addi(r4, r0, Operand(offsetof(D, a))); |
| 798 __ vstm(ia, r4, d6, d7); | 798 __ vstm(ia, r4, d6, d7); |
| 799 __ addi(r4, r4, Operand(2 * 8)); | 799 __ addi(r4, r4, Operand(2 * 8)); |
| 800 __ vstm(ia, r4, d0, d5); | 800 __ vstm(ia, r4, d0, d5); |
| 801 | 801 |
| 802 __ addi(r4, r1, Operand(OFFSET_OF(F, a))); | 802 __ addi(r4, r1, Operand(offsetof(F, a))); |
| 803 __ vldm(ia, r4, s0, s3); | 803 __ vldm(ia, r4, s0, s3); |
| 804 __ addi(r4, r4, Operand(4 * 4)); | 804 __ addi(r4, r4, Operand(4 * 4)); |
| 805 __ vldm(ia, r4, s4, s7); | 805 __ vldm(ia, r4, s4, s7); |
| 806 | 806 |
| 807 __ addi(r4, r1, Operand(OFFSET_OF(F, a))); | 807 __ addi(r4, r1, Operand(offsetof(F, a))); |
| 808 __ vstm(ia, r4, s6, s7); | 808 __ vstm(ia, r4, s6, s7); |
| 809 __ addi(r4, r4, Operand(2 * 4)); | 809 __ addi(r4, r4, Operand(2 * 4)); |
| 810 __ vstm(ia, r4, s0, s5); | 810 __ vstm(ia, r4, s0, s5); |
| 811 | 811 |
| 812 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); | 812 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); |
| 813 | 813 |
| 814 CodeDesc desc; | 814 CodeDesc desc; |
| 815 assm.GetCode(&desc); | 815 assm.GetCode(&desc); |
| 816 Object* code = isolate->heap()->CreateCode( | 816 Object* code = isolate->heap()->CreateCode( |
| 817 desc, | 817 desc, |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 898 // single precision values around in memory. | 898 // single precision values around in memory. |
| 899 Assembler assm(isolate, NULL, 0); | 899 Assembler assm(isolate, NULL, 0); |
| 900 | 900 |
| 901 if (CpuFeatures::IsSupported(VFP2)) { | 901 if (CpuFeatures::IsSupported(VFP2)) { |
| 902 CpuFeatures::Scope scope(VFP2); | 902 CpuFeatures::Scope scope(VFP2); |
| 903 | 903 |
| 904 __ mov(ip, Operand(sp)); | 904 __ mov(ip, Operand(sp)); |
| 905 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); | 905 __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); |
| 906 __ sub(fp, ip, Operand(4)); | 906 __ sub(fp, ip, Operand(4)); |
| 907 | 907 |
| 908 __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8)); | 908 __ addi(r4, r0, Operand(offsetof(D, h) + 8)); |
| 909 __ vldm(db_w, r4, d4, d7); | 909 __ vldm(db_w, r4, d4, d7); |
| 910 __ vldm(db_w, r4, d0, d3); | 910 __ vldm(db_w, r4, d0, d3); |
| 911 | 911 |
| 912 __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8)); | 912 __ addi(r4, r0, Operand(offsetof(D, h) + 8)); |
| 913 __ vstm(db_w, r4, d0, d5); | 913 __ vstm(db_w, r4, d0, d5); |
| 914 __ vstm(db_w, r4, d6, d7); | 914 __ vstm(db_w, r4, d6, d7); |
| 915 | 915 |
| 916 __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4)); | 916 __ addi(r4, r1, Operand(offsetof(F, h) + 4)); |
| 917 __ vldm(db_w, r4, s4, s7); | 917 __ vldm(db_w, r4, s4, s7); |
| 918 __ vldm(db_w, r4, s0, s3); | 918 __ vldm(db_w, r4, s0, s3); |
| 919 | 919 |
| 920 __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4)); | 920 __ addi(r4, r1, Operand(offsetof(F, h) + 4)); |
| 921 __ vstm(db_w, r4, s0, s5); | 921 __ vstm(db_w, r4, s0, s5); |
| 922 __ vstm(db_w, r4, s6, s7); | 922 __ vstm(db_w, r4, s6, s7); |
| 923 | 923 |
| 924 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); | 924 __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); |
| 925 | 925 |
| 926 CodeDesc desc; | 926 CodeDesc desc; |
| 927 assm.GetCode(&desc); | 927 assm.GetCode(&desc); |
| 928 Object* code = isolate->heap()->CreateCode( | 928 Object* code = isolate->heap()->CreateCode( |
| 929 desc, | 929 desc, |
| 930 Code::ComputeFlags(Code::STUB), | 930 Code::ComputeFlags(Code::STUB), |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 989 int32_t d; | 989 int32_t d; |
| 990 } I; | 990 } I; |
| 991 I i; | 991 I i; |
| 992 | 992 |
| 993 i.a = 0xabcd0001; | 993 i.a = 0xabcd0001; |
| 994 i.b = 0xabcd0000; | 994 i.b = 0xabcd0000; |
| 995 | 995 |
| 996 Assembler assm(isolate, NULL, 0); | 996 Assembler assm(isolate, NULL, 0); |
| 997 | 997 |
| 998 // Test HeapObject untagging. | 998 // Test HeapObject untagging. |
| 999 __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a))); | 999 __ ldr(r1, MemOperand(r0, offsetof(I, a))); |
| 1000 __ mov(r1, Operand(r1, ASR, 1), SetCC); | 1000 __ mov(r1, Operand(r1, ASR, 1), SetCC); |
| 1001 __ adc(r1, r1, Operand(r1), LeaveCC, cs); | 1001 __ adc(r1, r1, Operand(r1), LeaveCC, cs); |
| 1002 __ str(r1, MemOperand(r0, OFFSET_OF(I, a))); | 1002 __ str(r1, MemOperand(r0, offsetof(I, a))); |
| 1003 | 1003 |
| 1004 __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b))); | 1004 __ ldr(r2, MemOperand(r0, offsetof(I, b))); |
| 1005 __ mov(r2, Operand(r2, ASR, 1), SetCC); | 1005 __ mov(r2, Operand(r2, ASR, 1), SetCC); |
| 1006 __ adc(r2, r2, Operand(r2), LeaveCC, cs); | 1006 __ adc(r2, r2, Operand(r2), LeaveCC, cs); |
| 1007 __ str(r2, MemOperand(r0, OFFSET_OF(I, b))); | 1007 __ str(r2, MemOperand(r0, offsetof(I, b))); |
| 1008 | 1008 |
| 1009 // Test corner cases. | 1009 // Test corner cases. |
| 1010 __ mov(r1, Operand(0xffffffff)); | 1010 __ mov(r1, Operand(0xffffffff)); |
| 1011 __ mov(r2, Operand::Zero()); | 1011 __ mov(r2, Operand::Zero()); |
| 1012 __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry. | 1012 __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry. |
| 1013 __ adc(r3, r1, Operand(r2)); | 1013 __ adc(r3, r1, Operand(r2)); |
| 1014 __ str(r3, MemOperand(r0, OFFSET_OF(I, c))); | 1014 __ str(r3, MemOperand(r0, offsetof(I, c))); |
| 1015 | 1015 |
| 1016 __ mov(r1, Operand(0xffffffff)); | 1016 __ mov(r1, Operand(0xffffffff)); |
| 1017 __ mov(r2, Operand::Zero()); | 1017 __ mov(r2, Operand::Zero()); |
| 1018 __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry. | 1018 __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry. |
| 1019 __ adc(r3, r1, Operand(r2)); | 1019 __ adc(r3, r1, Operand(r2)); |
| 1020 __ str(r3, MemOperand(r0, OFFSET_OF(I, d))); | 1020 __ str(r3, MemOperand(r0, offsetof(I, d))); |
| 1021 | 1021 |
| 1022 __ mov(pc, Operand(lr)); | 1022 __ mov(pc, Operand(lr)); |
| 1023 | 1023 |
| 1024 CodeDesc desc; | 1024 CodeDesc desc; |
| 1025 assm.GetCode(&desc); | 1025 assm.GetCode(&desc); |
| 1026 Object* code = isolate->heap()->CreateCode( | 1026 Object* code = isolate->heap()->CreateCode( |
| 1027 desc, | 1027 desc, |
| 1028 Code::ComputeFlags(Code::STUB), | 1028 Code::ComputeFlags(Code::STUB), |
| 1029 Handle<Code>())->ToObjectChecked(); | 1029 Handle<Code>())->ToObjectChecked(); |
| 1030 CHECK(code->IsCode()); | 1030 CHECK(code->IsCode()); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1051 Assembler assm(isolate, NULL, 0); | 1051 Assembler assm(isolate, NULL, 0); |
| 1052 Label target; | 1052 Label target; |
| 1053 __ b(eq, &target); | 1053 __ b(eq, &target); |
| 1054 __ b(ne, &target); | 1054 __ b(ne, &target); |
| 1055 __ bind(&target); | 1055 __ bind(&target); |
| 1056 __ nop(); | 1056 __ nop(); |
| 1057 } | 1057 } |
| 1058 #endif | 1058 #endif |
| 1059 | 1059 |
| 1060 #undef __ | 1060 #undef __ |
| OLD | NEW |