| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 264 } | 264 } |
| 265 if (shift_down != 0) { | 265 if (shift_down != 0) { |
| 266 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | 266 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); |
| 267 } | 267 } |
| 268 } else { | 268 } else { |
| 269 sbfx(dst, src1, lsb, width, cond); | 269 sbfx(dst, src1, lsb, width, cond); |
| 270 } | 270 } |
| 271 } | 271 } |
| 272 | 272 |
| 273 | 273 |
| 274 void MacroAssembler::Bfi(Register dst, |
| 275 Register src, |
| 276 Register scratch, |
| 277 int lsb, |
| 278 int width, |
| 279 Condition cond) { |
| 280 ASSERT(0 <= lsb && lsb < 32); |
| 281 ASSERT(0 <= width && width < 32); |
| 282 ASSERT(lsb + width < 32); |
| 283 ASSERT(!scratch.is(dst)); |
| 284 if (width == 0) return; |
| 285 if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { |
| 286 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 287 bic(dst, dst, Operand(mask)); |
| 288 and_(scratch, src, Operand((1 << width) - 1)); |
| 289 mov(scratch, Operand(scratch, LSL, lsb)); |
| 290 orr(dst, dst, scratch); |
| 291 } else { |
| 292 bfi(dst, src, lsb, width, cond); |
| 293 } |
| 294 } |
| 295 |
| 296 |
| 274 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { | 297 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { |
| 275 ASSERT(lsb < 32); | 298 ASSERT(lsb < 32); |
| 276 if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { | 299 if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { |
| 277 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 300 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 278 bic(dst, dst, Operand(mask)); | 301 bic(dst, dst, Operand(mask)); |
| 279 } else { | 302 } else { |
| 280 bfc(dst, lsb, width, cond); | 303 bfc(dst, lsb, width, cond); |
| 281 } | 304 } |
| 282 } | 305 } |
| 283 | 306 |
| (...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 478 | 501 |
| 479 void MacroAssembler::PopSafepointRegistersAndDoubles() { | 502 void MacroAssembler::PopSafepointRegistersAndDoubles() { |
| 480 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { | 503 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { |
| 481 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); | 504 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); |
| 482 } | 505 } |
| 483 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * | 506 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * |
| 484 kDoubleSize)); | 507 kDoubleSize)); |
| 485 PopSafepointRegisters(); | 508 PopSafepointRegisters(); |
| 486 } | 509 } |
| 487 | 510 |
| 488 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) { | 511 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, |
| 489 str(reg, SafepointRegistersAndDoublesSlot(reg)); | 512 Register dst) { |
| 513 str(src, SafepointRegistersAndDoublesSlot(dst)); |
| 490 } | 514 } |
| 491 | 515 |
| 492 | 516 |
| 493 void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { | 517 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
| 494 str(reg, SafepointRegisterSlot(reg)); | 518 str(src, SafepointRegisterSlot(dst)); |
| 495 } | 519 } |
| 496 | 520 |
| 497 | 521 |
| 498 void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) { | 522 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
| 499 ldr(reg, SafepointRegisterSlot(reg)); | 523 ldr(dst, SafepointRegisterSlot(src)); |
| 500 } | 524 } |
| 501 | 525 |
| 502 | 526 |
| 503 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 527 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| 504 // The registers are pushed starting with the highest encoding, | 528 // The registers are pushed starting with the highest encoding, |
| 505 // which means that lowest encodings are closest to the stack pointer. | 529 // which means that lowest encodings are closest to the stack pointer. |
| 506 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 530 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); |
| 507 return reg_code; | 531 return reg_code; |
| 508 } | 532 } |
| 509 | 533 |
| (...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 707 #else // defined(V8_HOST_ARCH_ARM) | 731 #else // defined(V8_HOST_ARCH_ARM) |
| 708 // If we are using the simulator then we should always align to the expected | 732 // If we are using the simulator then we should always align to the expected |
| 709 // alignment. As the simulator is used to generate snapshots we do not know | 733 // alignment. As the simulator is used to generate snapshots we do not know |
| 710 // if the target platform will need alignment, so this is controlled from a | 734 // if the target platform will need alignment, so this is controlled from a |
| 711 // flag. | 735 // flag. |
| 712 return FLAG_sim_stack_alignment; | 736 return FLAG_sim_stack_alignment; |
| 713 #endif // defined(V8_HOST_ARCH_ARM) | 737 #endif // defined(V8_HOST_ARCH_ARM) |
| 714 } | 738 } |
| 715 | 739 |
| 716 | 740 |
| 717 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 741 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
| 742 Register argument_count) { |
| 718 // Optionally restore all double registers. | 743 // Optionally restore all double registers. |
| 719 if (save_doubles) { | 744 if (save_doubles) { |
| 720 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { | 745 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
| 721 DwVfpRegister reg = DwVfpRegister::from_code(i); | 746 DwVfpRegister reg = DwVfpRegister::from_code(i); |
| 722 const int offset = -2 * kPointerSize; | 747 const int offset = -2 * kPointerSize; |
| 723 vldr(reg, fp, offset - ((i + 1) * kDoubleSize)); | 748 vldr(reg, fp, offset - ((i + 1) * kDoubleSize)); |
| 724 } | 749 } |
| 725 } | 750 } |
| 726 | 751 |
| 727 // Clear top frame. | 752 // Clear top frame. |
| 728 mov(r3, Operand(0, RelocInfo::NONE)); | 753 mov(r3, Operand(0, RelocInfo::NONE)); |
| 729 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); | 754 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); |
| 730 str(r3, MemOperand(ip)); | 755 str(r3, MemOperand(ip)); |
| 731 | 756 |
| 732 // Restore current context from top and clear it in debug mode. | 757 // Restore current context from top and clear it in debug mode. |
| 733 mov(ip, Operand(ExternalReference(Isolate::k_context_address))); | 758 mov(ip, Operand(ExternalReference(Isolate::k_context_address))); |
| 734 ldr(cp, MemOperand(ip)); | 759 ldr(cp, MemOperand(ip)); |
| 735 #ifdef DEBUG | 760 #ifdef DEBUG |
| 736 str(r3, MemOperand(ip)); | 761 str(r3, MemOperand(ip)); |
| 737 #endif | 762 #endif |
| 738 | 763 |
| 739 // Tear down the exit frame, pop the arguments, and return. Callee-saved | 764 // Tear down the exit frame, pop the arguments, and return. |
| 740 // register r4 still holds argc. | |
| 741 mov(sp, Operand(fp)); | 765 mov(sp, Operand(fp)); |
| 742 ldm(ia_w, sp, fp.bit() | lr.bit()); | 766 ldm(ia_w, sp, fp.bit() | lr.bit()); |
| 743 add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); | 767 if (argument_count.is_valid()) { |
| 744 mov(pc, lr); | 768 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); |
| 769 } |
| 770 } |
| 771 |
| 772 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { |
| 773 #if !defined(USE_ARM_EABI) |
| 774 UNREACHABLE(); |
| 775 #else |
| 776 vmov(dst, r0, r1); |
| 777 #endif |
| 745 } | 778 } |
| 746 | 779 |
| 747 | 780 |
| 748 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 781 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 749 const ParameterCount& actual, | 782 const ParameterCount& actual, |
| 750 Handle<Code> code_constant, | 783 Handle<Code> code_constant, |
| 751 Register code_reg, | 784 Register code_reg, |
| 752 Label* done, | 785 Label* done, |
| 753 InvokeFlag flag, | 786 InvokeFlag flag, |
| 754 PostCallGenerator* post_call_generator) { | 787 PostCallGenerator* post_call_generator) { |
| (...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 923 Label* fail) { | 956 Label* fail) { |
| 924 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 957 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 925 cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); | 958 cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); |
| 926 b(lt, fail); | 959 b(lt, fail); |
| 927 cmp(scratch, Operand(LAST_JS_OBJECT_TYPE)); | 960 cmp(scratch, Operand(LAST_JS_OBJECT_TYPE)); |
| 928 b(gt, fail); | 961 b(gt, fail); |
| 929 } | 962 } |
| 930 | 963 |
| 931 | 964 |
| 932 void MacroAssembler::IsObjectJSStringType(Register object, | 965 void MacroAssembler::IsObjectJSStringType(Register object, |
| 933 Register scratch, | 966 Register scratch, |
| 934 Label* fail) { | 967 Label* fail) { |
| 935 ASSERT(kNotStringTag != 0); | 968 ASSERT(kNotStringTag != 0); |
| 936 | 969 |
| 937 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 970 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 938 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 971 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 939 tst(scratch, Operand(kIsNotStringMask)); | 972 tst(scratch, Operand(kIsNotStringMask)); |
| 940 b(ne, fail); | 973 b(ne, fail); |
| 941 } | 974 } |
| 942 | 975 |
| 943 | 976 |
| 944 #ifdef ENABLE_DEBUGGER_SUPPORT | 977 #ifdef ENABLE_DEBUGGER_SUPPORT |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 999 | 1032 |
| 1000 void MacroAssembler::PopTryHandler() { | 1033 void MacroAssembler::PopTryHandler() { |
| 1001 ASSERT_EQ(0, StackHandlerConstants::kNextOffset); | 1034 ASSERT_EQ(0, StackHandlerConstants::kNextOffset); |
| 1002 pop(r1); | 1035 pop(r1); |
| 1003 mov(ip, Operand(ExternalReference(Isolate::k_handler_address))); | 1036 mov(ip, Operand(ExternalReference(Isolate::k_handler_address))); |
| 1004 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); | 1037 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
| 1005 str(r1, MemOperand(ip)); | 1038 str(r1, MemOperand(ip)); |
| 1006 } | 1039 } |
| 1007 | 1040 |
| 1008 | 1041 |
| 1042 void MacroAssembler::Throw(Register value) { |
| 1043 // r0 is expected to hold the exception. |
| 1044 if (!value.is(r0)) { |
| 1045 mov(r0, value); |
| 1046 } |
| 1047 |
| 1048 // Adjust this code if not the case. |
| 1049 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 1050 |
| 1051 // Drop the sp to the top of the handler. |
| 1052 mov(r3, Operand(ExternalReference(Isolate::k_handler_address))); |
| 1053 ldr(sp, MemOperand(r3)); |
| 1054 |
| 1055 // Restore the next handler and frame pointer, discard handler state. |
| 1056 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 1057 pop(r2); |
| 1058 str(r2, MemOperand(r3)); |
| 1059 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); |
| 1060 ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. |
| 1061 |
| 1062 // Before returning we restore the context from the frame pointer if |
| 1063 // not NULL. The frame pointer is NULL in the exception handler of a |
| 1064 // JS entry frame. |
| 1065 cmp(fp, Operand(0, RelocInfo::NONE)); |
| 1066 // Set cp to NULL if fp is NULL. |
| 1067 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
| 1068 // Restore cp otherwise. |
| 1069 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); |
| 1070 #ifdef DEBUG |
| 1071 if (FLAG_debug_code) { |
| 1072 mov(lr, Operand(pc)); |
| 1073 } |
| 1074 #endif |
| 1075 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); |
| 1076 pop(pc); |
| 1077 } |
| 1078 |
| 1079 |
| 1080 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, |
| 1081 Register value) { |
| 1082 // Adjust this code if not the case. |
| 1083 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 1084 |
| 1085 // r0 is expected to hold the exception. |
| 1086 if (!value.is(r0)) { |
| 1087 mov(r0, value); |
| 1088 } |
| 1089 |
| 1090 // Drop sp to the top stack handler. |
| 1091 mov(r3, Operand(ExternalReference(Isolate::k_handler_address))); |
| 1092 ldr(sp, MemOperand(r3)); |
| 1093 |
| 1094 // Unwind the handlers until the ENTRY handler is found. |
| 1095 Label loop, done; |
| 1096 bind(&loop); |
| 1097 // Load the type of the current stack handler. |
| 1098 const int kStateOffset = StackHandlerConstants::kStateOffset; |
| 1099 ldr(r2, MemOperand(sp, kStateOffset)); |
| 1100 cmp(r2, Operand(StackHandler::ENTRY)); |
| 1101 b(eq, &done); |
| 1102 // Fetch the next handler in the list. |
| 1103 const int kNextOffset = StackHandlerConstants::kNextOffset; |
| 1104 ldr(sp, MemOperand(sp, kNextOffset)); |
| 1105 jmp(&loop); |
| 1106 bind(&done); |
| 1107 |
| 1108 // Set the top handler address to next handler past the current ENTRY handler. |
| 1109 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 1110 pop(r2); |
| 1111 str(r2, MemOperand(r3)); |
| 1112 |
| 1113 if (type == OUT_OF_MEMORY) { |
| 1114 // Set external caught exception to false. |
| 1115 ExternalReference external_caught( |
| 1116 Isolate::k_external_caught_exception_address); |
| 1117 mov(r0, Operand(false, RelocInfo::NONE)); |
| 1118 mov(r2, Operand(external_caught)); |
| 1119 str(r0, MemOperand(r2)); |
| 1120 |
| 1121 // Set pending exception and r0 to out of memory exception. |
| 1122 Failure* out_of_memory = Failure::OutOfMemoryException(); |
| 1123 mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
| 1124 mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address))); |
| 1125 str(r0, MemOperand(r2)); |
| 1126 } |
| 1127 |
| 1128 // Stack layout at this point. See also StackHandlerConstants. |
| 1129 // sp -> state (ENTRY) |
| 1130 // fp |
| 1131 // lr |
| 1132 |
| 1133 // Discard handler state (r2 is not used) and restore frame pointer. |
| 1134 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); |
| 1135 ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. |
| 1136 // Before returning we restore the context from the frame pointer if |
| 1137 // not NULL. The frame pointer is NULL in the exception handler of a |
| 1138 // JS entry frame. |
| 1139 cmp(fp, Operand(0, RelocInfo::NONE)); |
| 1140 // Set cp to NULL if fp is NULL. |
| 1141 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
| 1142 // Restore cp otherwise. |
| 1143 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); |
| 1144 #ifdef DEBUG |
| 1145 if (FLAG_debug_code) { |
| 1146 mov(lr, Operand(pc)); |
| 1147 } |
| 1148 #endif |
| 1149 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); |
| 1150 pop(pc); |
| 1151 } |
| 1152 |
| 1153 |
| 1009 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1154 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 1010 Register scratch, | 1155 Register scratch, |
| 1011 Label* miss) { | 1156 Label* miss) { |
| 1012 Label same_contexts; | 1157 Label same_contexts; |
| 1013 | 1158 |
| 1014 ASSERT(!holder_reg.is(scratch)); | 1159 ASSERT(!holder_reg.is(scratch)); |
| 1015 ASSERT(!holder_reg.is(ip)); | 1160 ASSERT(!holder_reg.is(ip)); |
| 1016 ASSERT(!scratch.is(ip)); | 1161 ASSERT(!scratch.is(ip)); |
| 1017 | 1162 |
| 1018 // Load current lexical context from the stack frame. | 1163 // Load current lexical context from the stack frame. |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1096 mov(scratch1, Operand(0x7191)); | 1241 mov(scratch1, Operand(0x7191)); |
| 1097 mov(scratch2, Operand(0x7291)); | 1242 mov(scratch2, Operand(0x7291)); |
| 1098 } | 1243 } |
| 1099 jmp(gc_required); | 1244 jmp(gc_required); |
| 1100 return; | 1245 return; |
| 1101 } | 1246 } |
| 1102 | 1247 |
| 1103 ASSERT(!result.is(scratch1)); | 1248 ASSERT(!result.is(scratch1)); |
| 1104 ASSERT(!result.is(scratch2)); | 1249 ASSERT(!result.is(scratch2)); |
| 1105 ASSERT(!scratch1.is(scratch2)); | 1250 ASSERT(!scratch1.is(scratch2)); |
| 1251 ASSERT(!scratch1.is(ip)); |
| 1252 ASSERT(!scratch2.is(ip)); |
| 1106 | 1253 |
| 1107 // Make object size into bytes. | 1254 // Make object size into bytes. |
| 1108 if ((flags & SIZE_IN_WORDS) != 0) { | 1255 if ((flags & SIZE_IN_WORDS) != 0) { |
| 1109 object_size *= kPointerSize; | 1256 object_size *= kPointerSize; |
| 1110 } | 1257 } |
| 1111 ASSERT_EQ(0, object_size & kObjectAlignmentMask); | 1258 ASSERT_EQ(0, object_size & kObjectAlignmentMask); |
| 1112 | 1259 |
| 1113 // Check relative positions of allocation top and limit addresses. | 1260 // Check relative positions of allocation top and limit addresses. |
| 1114 // The values must be adjacent in memory to allow the use of LDM. | 1261 // The values must be adjacent in memory to allow the use of LDM. |
| 1115 // Also, assert that the registers are numbered such that the values | 1262 // Also, assert that the registers are numbered such that the values |
| (...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1491 return result; | 1638 return result; |
| 1492 } | 1639 } |
| 1493 | 1640 |
| 1494 | 1641 |
| 1495 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | 1642 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { |
| 1496 return ref0.address() - ref1.address(); | 1643 return ref0.address() - ref1.address(); |
| 1497 } | 1644 } |
| 1498 | 1645 |
| 1499 | 1646 |
| 1500 MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( | 1647 MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( |
| 1501 ApiFunction* function, int stack_space) { | 1648 ExternalReference function, int stack_space) { |
| 1502 ExternalReference next_address = | 1649 ExternalReference next_address = |
| 1503 ExternalReference::handle_scope_next_address(); | 1650 ExternalReference::handle_scope_next_address(); |
| 1504 const int kNextOffset = 0; | 1651 const int kNextOffset = 0; |
| 1505 const int kLimitOffset = AddressOffset( | 1652 const int kLimitOffset = AddressOffset( |
| 1506 ExternalReference::handle_scope_limit_address(), | 1653 ExternalReference::handle_scope_limit_address(), |
| 1507 next_address); | 1654 next_address); |
| 1508 const int kLevelOffset = AddressOffset( | 1655 const int kLevelOffset = AddressOffset( |
| 1509 ExternalReference::handle_scope_level_address(), | 1656 ExternalReference::handle_scope_level_address(), |
| 1510 next_address); | 1657 next_address); |
| 1511 | 1658 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1548 b(ne, &delete_allocated_handles); | 1695 b(ne, &delete_allocated_handles); |
| 1549 | 1696 |
| 1550 // Check if the function scheduled an exception. | 1697 // Check if the function scheduled an exception. |
| 1551 bind(&leave_exit_frame); | 1698 bind(&leave_exit_frame); |
| 1552 LoadRoot(r4, Heap::kTheHoleValueRootIndex); | 1699 LoadRoot(r4, Heap::kTheHoleValueRootIndex); |
| 1553 mov(ip, Operand(ExternalReference::scheduled_exception_address())); | 1700 mov(ip, Operand(ExternalReference::scheduled_exception_address())); |
| 1554 ldr(r5, MemOperand(ip)); | 1701 ldr(r5, MemOperand(ip)); |
| 1555 cmp(r4, r5); | 1702 cmp(r4, r5); |
| 1556 b(ne, &promote_scheduled_exception); | 1703 b(ne, &promote_scheduled_exception); |
| 1557 | 1704 |
| 1558 // LeaveExitFrame expects unwind space to be in r4. | 1705 // LeaveExitFrame expects unwind space to be in a register. |
| 1559 mov(r4, Operand(stack_space)); | 1706 mov(r4, Operand(stack_space)); |
| 1560 LeaveExitFrame(false); | 1707 LeaveExitFrame(false, r4); |
| 1708 mov(pc, lr); |
| 1561 | 1709 |
| 1562 bind(&promote_scheduled_exception); | 1710 bind(&promote_scheduled_exception); |
| 1563 MaybeObject* result = TryTailCallExternalReference( | 1711 MaybeObject* result = TryTailCallExternalReference( |
| 1564 ExternalReference(Runtime::kPromoteScheduledException), 0, 1); | 1712 ExternalReference(Runtime::kPromoteScheduledException), 0, 1); |
| 1565 if (result->IsFailure()) { | 1713 if (result->IsFailure()) { |
| 1566 return result; | 1714 return result; |
| 1567 } | 1715 } |
| 1568 | 1716 |
| 1569 // HandleScope limit has changed. Delete allocated extensions. | 1717 // HandleScope limit has changed. Delete allocated extensions. |
| 1570 bind(&delete_allocated_handles); | 1718 bind(&delete_allocated_handles); |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1690 // This code is faster for doubles that are in the ranges -0x7fffffff to | 1838 // This code is faster for doubles that are in the ranges -0x7fffffff to |
| 1691 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to | 1839 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to |
| 1692 // the range of signed int32 values that are not Smis. Jumps to the label | 1840 // the range of signed int32 values that are not Smis. Jumps to the label |
| 1693 // 'not_int32' if the double isn't in the range -0x80000000.0 to | 1841 // 'not_int32' if the double isn't in the range -0x80000000.0 to |
| 1694 // 0x80000000.0 (excluding the endpoints). | 1842 // 0x80000000.0 (excluding the endpoints). |
| 1695 Label right_exponent, done; | 1843 Label right_exponent, done; |
| 1696 // Get exponent word. | 1844 // Get exponent word. |
| 1697 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); | 1845 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); |
| 1698 // Get exponent alone in scratch2. | 1846 // Get exponent alone in scratch2. |
| 1699 Ubfx(scratch2, | 1847 Ubfx(scratch2, |
| 1700 scratch, | 1848 scratch, |
| 1701 HeapNumber::kExponentShift, | 1849 HeapNumber::kExponentShift, |
| 1702 HeapNumber::kExponentBits); | 1850 HeapNumber::kExponentBits); |
| 1703 // Load dest with zero. We use this either for the final shift or | 1851 // Load dest with zero. We use this either for the final shift or |
| 1704 // for the answer. | 1852 // for the answer. |
| 1705 mov(dest, Operand(0, RelocInfo::NONE)); | 1853 mov(dest, Operand(0, RelocInfo::NONE)); |
| 1706 // Check whether the exponent matches a 32 bit signed int that is not a Smi. | 1854 // Check whether the exponent matches a 32 bit signed int that is not a Smi. |
| 1707 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is | 1855 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is |
| 1708 // the exponent that we are fastest at and also the highest exponent we can | 1856 // the exponent that we are fastest at and also the highest exponent we can |
| 1709 // handle here. | 1857 // handle here. |
| 1710 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; | 1858 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; |
| 1711 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we | 1859 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we |
| 1712 // split it up to avoid a constant pool entry. You can't do that in general | 1860 // split it up to avoid a constant pool entry. You can't do that in general |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1755 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 1903 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); |
| 1756 // Move down according to the exponent. | 1904 // Move down according to the exponent. |
| 1757 mov(dest, Operand(scratch, LSR, dest)); | 1905 mov(dest, Operand(scratch, LSR, dest)); |
| 1758 // Fix sign if sign bit was set. | 1906 // Fix sign if sign bit was set. |
| 1759 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 1907 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
| 1760 bind(&done); | 1908 bind(&done); |
| 1761 } | 1909 } |
| 1762 } | 1910 } |
| 1763 | 1911 |
| 1764 | 1912 |
| 1913 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, |
| 1914 SwVfpRegister result, |
| 1915 DwVfpRegister double_input, |
| 1916 Register scratch1, |
| 1917 Register scratch2, |
| 1918 CheckForInexactConversion check_inexact) { |
| 1919 ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3)); |
| 1920 CpuFeatures::Scope scope(VFP3); |
| 1921 Register prev_fpscr = scratch1; |
| 1922 Register scratch = scratch2; |
| 1923 |
| 1924 int32_t check_inexact_conversion = |
| 1925 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; |
| 1926 |
| 1927 // Set custom FPCSR: |
| 1928 // - Set rounding mode. |
| 1929 // - Clear vfp cumulative exception flags. |
| 1930 // - Make sure Flush-to-zero mode control bit is unset. |
| 1931 vmrs(prev_fpscr); |
| 1932 bic(scratch, |
| 1933 prev_fpscr, |
| 1934 Operand(kVFPExceptionMask | |
| 1935 check_inexact_conversion | |
| 1936 kVFPRoundingModeMask | |
| 1937 kVFPFlushToZeroMask)); |
| 1938 // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. |
| 1939 if (rounding_mode != kRoundToNearest) { |
| 1940 orr(scratch, scratch, Operand(rounding_mode)); |
| 1941 } |
| 1942 vmsr(scratch); |
| 1943 |
| 1944 // Convert the argument to an integer. |
| 1945 vcvt_s32_f64(result, |
| 1946 double_input, |
| 1947 (rounding_mode == kRoundToZero) ? kDefaultRoundToZero |
| 1948 : kFPSCRRounding); |
| 1949 |
| 1950 // Retrieve FPSCR. |
| 1951 vmrs(scratch); |
| 1952 // Restore FPSCR. |
| 1953 vmsr(prev_fpscr); |
| 1954 // Check for vfp exceptions. |
| 1955 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); |
| 1956 } |
| 1957 |
| 1958 |
| 1765 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 1959 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 1766 Register src, | 1960 Register src, |
| 1767 int num_least_bits) { | 1961 int num_least_bits) { |
| 1768 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { | 1962 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { |
| 1769 ubfx(dst, src, kSmiTagSize, num_least_bits); | 1963 ubfx(dst, src, kSmiTagSize, num_least_bits); |
| 1770 } else { | 1964 } else { |
| 1771 mov(dst, Operand(src, ASR, kSmiTagSize)); | 1965 mov(dst, Operand(src, ASR, kSmiTagSize)); |
| 1772 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 1966 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
| 1773 } | 1967 } |
| 1774 } | 1968 } |
| 1775 | 1969 |
| 1776 | 1970 |
| 1971 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
| 1972 Register src, |
| 1973 int num_least_bits) { |
| 1974 and_(dst, src, Operand((1 << num_least_bits) - 1)); |
| 1975 } |
| 1976 |
| 1977 |
| 1777 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 1978 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
| 1778 int num_arguments) { | 1979 int num_arguments) { |
| 1779 // All parameters are on the stack. r0 has the return value after call. | 1980 // All parameters are on the stack. r0 has the return value after call. |
| 1780 | 1981 |
| 1781 // If the expected number of arguments of the runtime function is | 1982 // If the expected number of arguments of the runtime function is |
| 1782 // constant, we check that the actual number of arguments match the | 1983 // constant, we check that the actual number of arguments match the |
| 1783 // expectation. | 1984 // expectation. |
| 1784 if (f->nargs >= 0 && f->nargs != num_arguments) { | 1985 if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 1785 IllegalOperation(num_arguments); | 1986 IllegalOperation(num_arguments); |
| 1786 return; | 1987 return; |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2029 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2230 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| 2030 if (context_chain_length > 0) { | 2231 if (context_chain_length > 0) { |
| 2031 // Move up the chain of contexts to the context containing the slot. | 2232 // Move up the chain of contexts to the context containing the slot. |
| 2032 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); | 2233 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 2033 // Load the function context (which is the incoming, outer context). | 2234 // Load the function context (which is the incoming, outer context). |
| 2034 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); | 2235 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
| 2035 for (int i = 1; i < context_chain_length; i++) { | 2236 for (int i = 1; i < context_chain_length; i++) { |
| 2036 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); | 2237 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 2037 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); | 2238 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
| 2038 } | 2239 } |
| 2039 // The context may be an intermediate context, not a function context. | 2240 } else { |
| 2040 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); | 2241 // Slot is in the current function context. Move it into the |
| 2041 } else { // Slot is in the current function context. | 2242 // destination register in case we store into it (the write barrier |
| 2042 // The context may be an intermediate context, not a function context. | 2243 // cannot be allowed to destroy the context in esi). |
| 2043 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); | 2244 mov(dst, cp); |
| 2245 } |
| 2246 |
| 2247 // We should not have found a 'with' context by walking the context chain |
| 2248 // (i.e., the static scope chain and runtime context chain do not agree). |
| 2249 // A variable occurring in such a scope should have slot type LOOKUP and |
| 2250 // not CONTEXT. |
| 2251 if (FLAG_debug_code) { |
| 2252 ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
| 2253 cmp(dst, ip); |
| 2254 Check(eq, "Yo dawg, I heard you liked function contexts " |
| 2255 "so I put function contexts in all your contexts"); |
| 2044 } | 2256 } |
| 2045 } | 2257 } |
| 2046 | 2258 |
| 2047 | 2259 |
| 2048 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 2260 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
| 2049 // Load the global or builtins object from the current context. | 2261 // Load the global or builtins object from the current context. |
| 2050 ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 2262 ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 2051 // Load the global context from the global or builtins object. | 2263 // Load the global context from the global or builtins object. |
| 2052 ldr(function, FieldMemOperand(function, | 2264 ldr(function, FieldMemOperand(function, |
| 2053 GlobalObject::kGlobalContextOffset)); | 2265 GlobalObject::kGlobalContextOffset)); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2110 } | 2322 } |
| 2111 | 2323 |
| 2112 | 2324 |
| 2113 void MacroAssembler::AbortIfNotSmi(Register object) { | 2325 void MacroAssembler::AbortIfNotSmi(Register object) { |
| 2114 STATIC_ASSERT(kSmiTag == 0); | 2326 STATIC_ASSERT(kSmiTag == 0); |
| 2115 tst(object, Operand(kSmiTagMask)); | 2327 tst(object, Operand(kSmiTagMask)); |
| 2116 Assert(eq, "Operand is not smi"); | 2328 Assert(eq, "Operand is not smi"); |
| 2117 } | 2329 } |
| 2118 | 2330 |
| 2119 | 2331 |
| 2332 void MacroAssembler::AbortIfNotString(Register object) { |
| 2333 STATIC_ASSERT(kSmiTag == 0); |
| 2334 tst(object, Operand(kSmiTagMask)); |
| 2335 Assert(ne, "Operand is not a string"); |
| 2336 push(object); |
| 2337 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2338 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); |
| 2339 pop(object); |
| 2340 Assert(lo, "Operand is not a string"); |
| 2341 } |
| 2342 |
| 2343 |
| 2344 |
| 2120 void MacroAssembler::AbortIfNotRootValue(Register src, | 2345 void MacroAssembler::AbortIfNotRootValue(Register src, |
| 2121 Heap::RootListIndex root_value_index, | 2346 Heap::RootListIndex root_value_index, |
| 2122 const char* message) { | 2347 const char* message) { |
| 2123 ASSERT(!src.is(ip)); | 2348 ASSERT(!src.is(ip)); |
| 2124 LoadRoot(ip, root_value_index); | 2349 LoadRoot(ip, root_value_index); |
| 2125 cmp(src, ip); | 2350 cmp(src, ip); |
| 2126 Assert(eq, message); | 2351 Assert(eq, message); |
| 2127 } | 2352 } |
| 2128 | 2353 |
| 2129 | 2354 |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2231 } | 2456 } |
| 2232 ASSERT(!tmp.is(no_reg)); | 2457 ASSERT(!tmp.is(no_reg)); |
| 2233 | 2458 |
| 2234 for (int i = 0; i < field_count; i++) { | 2459 for (int i = 0; i < field_count; i++) { |
| 2235 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); | 2460 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); |
| 2236 str(tmp, FieldMemOperand(dst, i * kPointerSize)); | 2461 str(tmp, FieldMemOperand(dst, i * kPointerSize)); |
| 2237 } | 2462 } |
| 2238 } | 2463 } |
| 2239 | 2464 |
| 2240 | 2465 |
| 2466 void MacroAssembler::CopyBytes(Register src, |
| 2467 Register dst, |
| 2468 Register length, |
| 2469 Register scratch) { |
| 2470 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; |
| 2471 |
| 2472 // Align src before copying in word size chunks. |
| 2473 bind(&align_loop); |
| 2474 cmp(length, Operand(0)); |
| 2475 b(eq, &done); |
| 2476 bind(&align_loop_1); |
| 2477 tst(src, Operand(kPointerSize - 1)); |
| 2478 b(eq, &word_loop); |
| 2479 ldrb(scratch, MemOperand(src, 1, PostIndex)); |
| 2480 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2481 sub(length, length, Operand(1), SetCC); |
| 2482 b(ne, &byte_loop_1); |
| 2483 |
| 2484 // Copy bytes in word size chunks. |
| 2485 bind(&word_loop); |
| 2486 if (FLAG_debug_code) { |
| 2487 tst(src, Operand(kPointerSize - 1)); |
| 2488 Assert(eq, "Expecting alignment for CopyBytes"); |
| 2489 } |
| 2490 cmp(length, Operand(kPointerSize)); |
| 2491 b(lt, &byte_loop); |
| 2492 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); |
| 2493 #if CAN_USE_UNALIGNED_ACCESSES |
| 2494 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); |
| 2495 #else |
| 2496 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2497 mov(scratch, Operand(scratch, LSR, 8)); |
| 2498 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2499 mov(scratch, Operand(scratch, LSR, 8)); |
| 2500 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2501 mov(scratch, Operand(scratch, LSR, 8)); |
| 2502 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2503 #endif |
| 2504 sub(length, length, Operand(kPointerSize)); |
| 2505 b(&word_loop); |
| 2506 |
| 2507 // Copy the last bytes if any left. |
| 2508 bind(&byte_loop); |
| 2509 cmp(length, Operand(0)); |
| 2510 b(eq, &done); |
| 2511 bind(&byte_loop_1); |
| 2512 ldrb(scratch, MemOperand(src, 1, PostIndex)); |
| 2513 strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2514 sub(length, length, Operand(1), SetCC); |
| 2515 b(ne, &byte_loop_1); |
| 2516 bind(&done); |
| 2517 } |
| 2518 |
| 2519 |
| 2241 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. | 2520 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. |
| 2242 Register source, // Input. | 2521 Register source, // Input. |
| 2243 Register scratch) { | 2522 Register scratch) { |
| 2244 ASSERT(!zeros.is(source) || !source.is(scratch)); | 2523 ASSERT(!zeros.is(source) || !source.is(scratch)); |
| 2245 ASSERT(!zeros.is(scratch)); | 2524 ASSERT(!zeros.is(scratch)); |
| 2246 ASSERT(!scratch.is(ip)); | 2525 ASSERT(!scratch.is(ip)); |
| 2247 ASSERT(!source.is(ip)); | 2526 ASSERT(!source.is(ip)); |
| 2248 ASSERT(!zeros.is(ip)); | 2527 ASSERT(!zeros.is(ip)); |
| 2249 #ifdef CAN_USE_ARMV5_INSTRUCTIONS | 2528 #ifdef CAN_USE_ARMV5_INSTRUCTIONS |
| 2250 clz(zeros, source); // This instruction is only supported after ARM5. | 2529 clz(zeros, source); // This instruction is only supported after ARM5. |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2411 // Result was clobbered. Restore it. | 2690 // Result was clobbered. Restore it. |
| 2412 ldr(result, MemOperand(ldr_location)); | 2691 ldr(result, MemOperand(ldr_location)); |
| 2413 } | 2692 } |
| 2414 // Get the address of the constant. | 2693 // Get the address of the constant. |
| 2415 and_(result, result, Operand(kLdrOffsetMask)); | 2694 and_(result, result, Operand(kLdrOffsetMask)); |
| 2416 add(result, ldr_location, Operand(result)); | 2695 add(result, ldr_location, Operand(result)); |
| 2417 add(result, result, Operand(kPCRegOffset)); | 2696 add(result, result, Operand(kPCRegOffset)); |
| 2418 } | 2697 } |
| 2419 | 2698 |
| 2420 | 2699 |
| 2421 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 2422 CodePatcher::CodePatcher(byte* address, int instructions) | 2700 CodePatcher::CodePatcher(byte* address, int instructions) |
| 2423 : address_(address), | 2701 : address_(address), |
| 2424 instructions_(instructions), | 2702 instructions_(instructions), |
| 2425 size_(instructions * Assembler::kInstrSize), | 2703 size_(instructions * Assembler::kInstrSize), |
| 2426 masm_(address, size_ + Assembler::kGap) { | 2704 masm_(address, size_ + Assembler::kGap) { |
| 2427 // Create a new macro assembler pointing to the address of the code to patch. | 2705 // Create a new macro assembler pointing to the address of the code to patch. |
| 2428 // The size is adjusted with kGap on order for the assembler to generate size | 2706 // The size is adjusted with kGap on order for the assembler to generate size |
| 2429 // bytes of instructions without failing with buffer size constraints. | 2707 // bytes of instructions without failing with buffer size constraints. |
| 2430 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2708 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2431 } | 2709 } |
| 2432 | 2710 |
| 2433 | 2711 |
| 2434 CodePatcher::~CodePatcher() { | 2712 CodePatcher::~CodePatcher() { |
| 2435 // Indicate that code has changed. | 2713 // Indicate that code has changed. |
| 2436 CPU::FlushICache(address_, size_); | 2714 CPU::FlushICache(address_, size_); |
| 2437 | 2715 |
| 2438 // Check that the code was patched as expected. | 2716 // Check that the code was patched as expected. |
| 2439 ASSERT(masm_.pc_ == address_ + size_); | 2717 ASSERT(masm_.pc_ == address_ + size_); |
| 2440 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2718 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2441 } | 2719 } |
| 2442 | 2720 |
| 2443 | 2721 |
| 2444 void CodePatcher::Emit(Instr x) { | 2722 void CodePatcher::Emit(Instr instr) { |
| 2445 masm()->emit(x); | 2723 masm()->emit(instr); |
| 2446 } | 2724 } |
| 2447 | 2725 |
| 2448 | 2726 |
| 2449 void CodePatcher::Emit(Address addr) { | 2727 void CodePatcher::Emit(Address addr) { |
| 2450 masm()->emit(reinterpret_cast<Instr>(addr)); | 2728 masm()->emit(reinterpret_cast<Instr>(addr)); |
| 2451 } | 2729 } |
| 2452 #endif // ENABLE_DEBUGGER_SUPPORT | 2730 |
| 2731 |
| 2732 void CodePatcher::EmitCondition(Condition cond) { |
| 2733 Instr instr = Assembler::instr_at(masm_.pc_); |
| 2734 instr = (instr & ~kCondMask) | cond; |
| 2735 masm_.emit(instr); |
| 2736 } |
| 2453 | 2737 |
| 2454 | 2738 |
| 2455 } } // namespace v8::internal | 2739 } } // namespace v8::internal |
| 2456 | 2740 |
| 2457 #endif // V8_TARGET_ARCH_ARM | 2741 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |