OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <stdarg.h> | 5 #include <stdarg.h> |
6 #include <stdlib.h> | 6 #include <stdlib.h> |
7 #include <cmath> | 7 #include <cmath> |
8 | 8 |
9 #if V8_TARGET_ARCH_ARM | 9 #if V8_TARGET_ARCH_ARM |
10 | 10 |
11 #include "src/arm/constants-arm.h" | 11 #include "src/arm/constants-arm.h" |
12 #include "src/arm/simulator-arm.h" | 12 #include "src/arm/simulator-arm.h" |
13 #include "src/assembler.h" | 13 #include "src/assembler.h" |
14 #include "src/base/bits.h" | 14 #include "src/base/bits.h" |
15 #include "src/codegen.h" | 15 #include "src/codegen.h" |
16 #include "src/disasm.h" | 16 #include "src/disasm.h" |
17 #include "src/runtime/runtime-utils.h" | 17 #include "src/runtime/runtime-utils.h" |
18 | 18 |
19 #if defined(USE_SIMULATOR) | 19 #if defined(USE_SIMULATOR) |
20 | 20 |
21 // Only build the simulator if not compiling for real ARM hardware. | 21 // Only build the simulator if not compiling for real ARM hardware. |
22 namespace v8 { | 22 namespace v8 { |
23 namespace internal { | 23 namespace internal { |
24 | 24 |
| 25 // static |
| 26 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ = |
| 27 LAZY_INSTANCE_INITIALIZER; |
| 28 |
25 // This macro provides a platform independent use of sscanf. The reason for | 29 // This macro provides a platform independent use of sscanf. The reason for |
26 // SScanF not being implemented in a platform independent way through | 30 // SScanF not being implemented in a platform independent way through |
27 // ::v8::internal::OS in the same way as SNPrintF is that the | 31 // ::v8::internal::OS in the same way as SNPrintF is that the |
28 // Windows C Run-Time Library does not provide vsscanf. | 32 // Windows C Run-Time Library does not provide vsscanf. |
29 #define SScanF sscanf // NOLINT | 33 #define SScanF sscanf // NOLINT |
30 | 34 |
31 // The ArmDebugger class is used by the simulator while debugging simulated ARM | 35 // The ArmDebugger class is used by the simulator while debugging simulated ARM |
32 // code. | 36 // code. |
33 class ArmDebugger { | 37 class ArmDebugger { |
34 public: | 38 public: |
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
703 // some buffer below. | 707 // some buffer below. |
704 registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64; | 708 registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64; |
705 // The lr and pc are initialized to a known bad value that will cause an | 709 // The lr and pc are initialized to a known bad value that will cause an |
706 // access violation if the simulator ever tries to execute it. | 710 // access violation if the simulator ever tries to execute it. |
707 registers_[pc] = bad_lr; | 711 registers_[pc] = bad_lr; |
708 registers_[lr] = bad_lr; | 712 registers_[lr] = bad_lr; |
709 | 713 |
710 last_debugger_input_ = NULL; | 714 last_debugger_input_ = NULL; |
711 } | 715 } |
712 | 716 |
713 | 717 Simulator::~Simulator() { |
714 Simulator::~Simulator() { free(stack_); } | 718 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_); |
715 | 719 free(stack_); |
| 720 } |
716 | 721 |
717 // When the generated code calls an external reference we need to catch that in | 722 // When the generated code calls an external reference we need to catch that in |
718 // the simulator. The external reference will be a function compiled for the | 723 // the simulator. The external reference will be a function compiled for the |
719 // host architecture. We need to call that function instead of trying to | 724 // host architecture. We need to call that function instead of trying to |
720 // execute it with the simulator. We do that by redirecting the external | 725 // execute it with the simulator. We do that by redirecting the external |
721 // reference to a svc (Supervisor Call) instruction that is handled by | 726 // reference to a svc (Supervisor Call) instruction that is handled by |
722 // the simulator. We write the original destination of the jump just at a known | 727 // the simulator. We write the original destination of the jump just at a known |
723 // offset from the svc instruction so the simulator knows what to call. | 728 // offset from the svc instruction so the simulator knows what to call. |
724 class Redirection { | 729 class Redirection { |
725 public: | 730 public: |
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1033 // We don't trash the registers with the return value. | 1038 // We don't trash the registers with the return value. |
1034 registers_[2] = 0x50Bad4U; | 1039 registers_[2] = 0x50Bad4U; |
1035 registers_[3] = 0x50Bad4U; | 1040 registers_[3] = 0x50Bad4U; |
1036 registers_[12] = 0x50Bad4U; | 1041 registers_[12] = 0x50Bad4U; |
1037 } | 1042 } |
1038 | 1043 |
1039 | 1044 |
1040 int Simulator::ReadW(int32_t addr, Instruction* instr) { | 1045 int Simulator::ReadW(int32_t addr, Instruction* instr) { |
1041 // All supported ARM targets allow unaligned accesses, so we don't need to | 1046 // All supported ARM targets allow unaligned accesses, so we don't need to |
1042 // check the alignment here. | 1047 // check the alignment here. |
| 1048 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1049 local_monitor_.NotifyLoad(addr); |
1043 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); | 1050 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); |
1044 return *ptr; | 1051 return *ptr; |
1045 } | 1052 } |
1046 | 1053 |
| 1054 int Simulator::ReadExW(int32_t addr, Instruction* instr) { |
| 1055 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1056 local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word); |
| 1057 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr, |
| 1058 &global_monitor_processor_); |
| 1059 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); |
| 1060 return *ptr; |
| 1061 } |
1047 | 1062 |
1048 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { | 1063 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { |
1049 // All supported ARM targets allow unaligned accesses, so we don't need to | 1064 // All supported ARM targets allow unaligned accesses, so we don't need to |
1050 // check the alignment here. | 1065 // check the alignment here. |
| 1066 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1067 local_monitor_.NotifyStore(addr); |
| 1068 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1069 &global_monitor_processor_); |
1051 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); | 1070 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); |
1052 *ptr = value; | 1071 *ptr = value; |
1053 } | 1072 } |
1054 | 1073 |
| 1074 int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) { |
| 1075 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1076 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) && |
| 1077 global_monitor_.Pointer()->NotifyStoreExcl_Locked( |
| 1078 addr, &global_monitor_processor_)) { |
| 1079 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); |
| 1080 *ptr = value; |
| 1081 return 0; |
| 1082 } else { |
| 1083 return 1; |
| 1084 } |
| 1085 } |
1055 | 1086 |
1056 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { | 1087 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { |
1057 // All supported ARM targets allow unaligned accesses, so we don't need to | 1088 // All supported ARM targets allow unaligned accesses, so we don't need to |
1058 // check the alignment here. | 1089 // check the alignment here. |
| 1090 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1091 local_monitor_.NotifyLoad(addr); |
1059 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); | 1092 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); |
1060 return *ptr; | 1093 return *ptr; |
1061 } | 1094 } |
1062 | 1095 |
1063 | |
1064 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { | 1096 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { |
1065 // All supported ARM targets allow unaligned accesses, so we don't need to | 1097 // All supported ARM targets allow unaligned accesses, so we don't need to |
1066 // check the alignment here. | 1098 // check the alignment here. |
| 1099 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1100 local_monitor_.NotifyLoad(addr); |
1067 int16_t* ptr = reinterpret_cast<int16_t*>(addr); | 1101 int16_t* ptr = reinterpret_cast<int16_t*>(addr); |
1068 return *ptr; | 1102 return *ptr; |
1069 } | 1103 } |
1070 | 1104 |
| 1105 uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) { |
| 1106 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1107 local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord); |
| 1108 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr, |
| 1109 &global_monitor_processor_); |
| 1110 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); |
| 1111 return *ptr; |
| 1112 } |
1071 | 1113 |
1072 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { | 1114 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { |
1073 // All supported ARM targets allow unaligned accesses, so we don't need to | 1115 // All supported ARM targets allow unaligned accesses, so we don't need to |
1074 // check the alignment here. | 1116 // check the alignment here. |
| 1117 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1118 local_monitor_.NotifyStore(addr); |
| 1119 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1120 &global_monitor_processor_); |
1075 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); | 1121 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); |
1076 *ptr = value; | 1122 *ptr = value; |
1077 } | 1123 } |
1078 | 1124 |
1079 | |
1080 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { | 1125 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { |
1081 // All supported ARM targets allow unaligned accesses, so we don't need to | 1126 // All supported ARM targets allow unaligned accesses, so we don't need to |
1082 // check the alignment here. | 1127 // check the alignment here. |
| 1128 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1129 local_monitor_.NotifyStore(addr); |
| 1130 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1131 &global_monitor_processor_); |
1083 int16_t* ptr = reinterpret_cast<int16_t*>(addr); | 1132 int16_t* ptr = reinterpret_cast<int16_t*>(addr); |
1084 *ptr = value; | 1133 *ptr = value; |
1085 } | 1134 } |
1086 | 1135 |
| 1136 int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) { |
| 1137 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1138 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) && |
| 1139 global_monitor_.Pointer()->NotifyStoreExcl_Locked( |
| 1140 addr, &global_monitor_processor_)) { |
| 1141 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); |
| 1142 *ptr = value; |
| 1143 return 0; |
| 1144 } else { |
| 1145 return 1; |
| 1146 } |
| 1147 } |
1087 | 1148 |
1088 uint8_t Simulator::ReadBU(int32_t addr) { | 1149 uint8_t Simulator::ReadBU(int32_t addr) { |
| 1150 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1151 local_monitor_.NotifyLoad(addr); |
1089 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); | 1152 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); |
1090 return *ptr; | 1153 return *ptr; |
1091 } | 1154 } |
1092 | 1155 |
1093 | |
1094 int8_t Simulator::ReadB(int32_t addr) { | 1156 int8_t Simulator::ReadB(int32_t addr) { |
| 1157 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1158 local_monitor_.NotifyLoad(addr); |
1095 int8_t* ptr = reinterpret_cast<int8_t*>(addr); | 1159 int8_t* ptr = reinterpret_cast<int8_t*>(addr); |
1096 return *ptr; | 1160 return *ptr; |
1097 } | 1161 } |
1098 | 1162 |
| 1163 uint8_t Simulator::ReadExBU(int32_t addr) { |
| 1164 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1165 local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte); |
| 1166 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr, |
| 1167 &global_monitor_processor_); |
| 1168 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); |
| 1169 return *ptr; |
| 1170 } |
1099 | 1171 |
1100 void Simulator::WriteB(int32_t addr, uint8_t value) { | 1172 void Simulator::WriteB(int32_t addr, uint8_t value) { |
| 1173 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1174 local_monitor_.NotifyStore(addr); |
| 1175 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1176 &global_monitor_processor_); |
1101 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); | 1177 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); |
1102 *ptr = value; | 1178 *ptr = value; |
1103 } | 1179 } |
1104 | 1180 |
1105 | |
1106 void Simulator::WriteB(int32_t addr, int8_t value) { | 1181 void Simulator::WriteB(int32_t addr, int8_t value) { |
| 1182 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1183 local_monitor_.NotifyStore(addr); |
| 1184 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1185 &global_monitor_processor_); |
1107 int8_t* ptr = reinterpret_cast<int8_t*>(addr); | 1186 int8_t* ptr = reinterpret_cast<int8_t*>(addr); |
1108 *ptr = value; | 1187 *ptr = value; |
1109 } | 1188 } |
1110 | 1189 |
| 1190 int Simulator::WriteExB(int32_t addr, uint8_t value) { |
| 1191 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1192 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) && |
| 1193 global_monitor_.Pointer()->NotifyStoreExcl_Locked( |
| 1194 addr, &global_monitor_processor_)) { |
| 1195 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); |
| 1196 *ptr = value; |
| 1197 return 0; |
| 1198 } else { |
| 1199 return 1; |
| 1200 } |
| 1201 } |
1111 | 1202 |
1112 int32_t* Simulator::ReadDW(int32_t addr) { | 1203 int32_t* Simulator::ReadDW(int32_t addr) { |
1113 // All supported ARM targets allow unaligned accesses, so we don't need to | 1204 // All supported ARM targets allow unaligned accesses, so we don't need to |
1114 // check the alignment here. | 1205 // check the alignment here. |
| 1206 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1207 local_monitor_.NotifyLoad(addr); |
1115 int32_t* ptr = reinterpret_cast<int32_t*>(addr); | 1208 int32_t* ptr = reinterpret_cast<int32_t*>(addr); |
1116 return ptr; | 1209 return ptr; |
1117 } | 1210 } |
1118 | 1211 |
1119 | 1212 |
1120 void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { | 1213 void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { |
1121 // All supported ARM targets allow unaligned accesses, so we don't need to | 1214 // All supported ARM targets allow unaligned accesses, so we don't need to |
1122 // check the alignment here. | 1215 // check the alignment here. |
| 1216 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1217 local_monitor_.NotifyStore(addr); |
| 1218 global_monitor_.Pointer()->NotifyStore_Locked(addr, |
| 1219 &global_monitor_processor_); |
1123 int32_t* ptr = reinterpret_cast<int32_t*>(addr); | 1220 int32_t* ptr = reinterpret_cast<int32_t*>(addr); |
1124 *ptr++ = value1; | 1221 *ptr++ = value1; |
1125 *ptr = value2; | 1222 *ptr = value2; |
1126 } | 1223 } |
1127 | 1224 |
1128 | 1225 |
1129 // Returns the limit of the stack area to enable checking for stack overflows. | 1226 // Returns the limit of the stack area to enable checking for stack overflows. |
1130 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { | 1227 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { |
1131 // The simulator uses a separate JS stack. If we have exhausted the C stack, | 1228 // The simulator uses a separate JS stack. If we have exhausted the C stack, |
1132 // we also drop down the JS limit to reflect the exhaustion on the JS stack. | 1229 // we also drop down the JS limit to reflect the exhaustion on the JS stack. |
(...skipping 933 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2066 hi_res = static_cast<int32_t>(result >> 32); | 2163 hi_res = static_cast<int32_t>(result >> 32); |
2067 lo_res = static_cast<int32_t>(result & 0xffffffff); | 2164 lo_res = static_cast<int32_t>(result & 0xffffffff); |
2068 } | 2165 } |
2069 set_register(rd_lo, lo_res); | 2166 set_register(rd_lo, lo_res); |
2070 set_register(rd_hi, hi_res); | 2167 set_register(rd_hi, hi_res); |
2071 if (instr->HasS()) { | 2168 if (instr->HasS()) { |
2072 UNIMPLEMENTED(); | 2169 UNIMPLEMENTED(); |
2073 } | 2170 } |
2074 } | 2171 } |
2075 } else { | 2172 } else { |
2076 UNIMPLEMENTED(); // Not used by V8. | 2173 if (instr->Bits(24, 23) == 3) { |
| 2174 if (instr->Bit(20) == 1) { |
| 2175 // ldrex |
| 2176 int rt = instr->RtValue(); |
| 2177 int rn = instr->RnValue(); |
| 2178 int32_t addr = get_register(rn); |
| 2179 switch (instr->Bits(22, 21)) { |
| 2180 case 0: { |
| 2181 // Format(instr, "ldrex'cond 'rt, ['rn]"); |
| 2182 int value = ReadExW(addr, instr); |
| 2183 set_register(rt, value); |
| 2184 break; |
| 2185 } |
| 2186 case 2: { |
| 2187 // Format(instr, "ldrexb'cond 'rt, ['rn]"); |
| 2188 uint8_t value = ReadExBU(addr); |
| 2189 set_register(rt, value); |
| 2190 break; |
| 2191 } |
| 2192 case 3: { |
| 2193 // Format(instr, "ldrexh'cond 'rt, ['rn]"); |
| 2194 uint16_t value = ReadExHU(addr, instr); |
| 2195 set_register(rt, value); |
| 2196 break; |
| 2197 } |
| 2198 default: |
| 2199 UNREACHABLE(); |
| 2200 break; |
| 2201 } |
| 2202 } else { |
| 2203 // The instruction is documented as strex rd, rt, [rn], but the |
| 2204 // "rt" register is using the rm bits. |
| 2205 int rd = instr->RdValue(); |
| 2206 int rt = instr->RmValue(); |
| 2207 int rn = instr->RnValue(); |
| 2208 int32_t addr = get_register(rn); |
| 2209 switch (instr->Bits(22, 21)) { |
| 2210 case 0: { |
| 2211 // Format(instr, "strex'cond 'rd, 'rm, ['rn]"); |
| 2212 int value = get_register(rt); |
| 2213 int status = WriteExW(addr, value, instr); |
| 2214 set_register(rd, status); |
| 2215 break; |
| 2216 } |
| 2217 case 2: { |
| 2218 // Format(instr, "strexb'cond 'rd, 'rm, ['rn]"); |
| 2219 uint8_t value = get_register(rt); |
| 2220 int status = WriteExB(addr, value); |
| 2221 set_register(rd, status); |
| 2222 break; |
| 2223 } |
| 2224 case 3: { |
| 2225 // Format(instr, "strexh'cond 'rd, 'rm, ['rn]"); |
| 2226 uint16_t value = get_register(rt); |
| 2227 int status = WriteExH(addr, value, instr); |
| 2228 set_register(rd, status); |
| 2229 break; |
| 2230 } |
| 2231 default: |
| 2232 UNREACHABLE(); |
| 2233 break; |
| 2234 } |
| 2235 } |
| 2236 } else { |
| 2237 UNIMPLEMENTED(); // Not used by V8. |
| 2238 } |
2077 } | 2239 } |
2078 } else { | 2240 } else { |
2079 // extra load/store instructions | 2241 // extra load/store instructions |
2080 int rd = instr->RdValue(); | 2242 int rd = instr->RdValue(); |
2081 int rn = instr->RnValue(); | 2243 int rn = instr->RnValue(); |
2082 int32_t rn_val = get_register(rn); | 2244 int32_t rn_val = get_register(rn); |
2083 int32_t addr = 0; | 2245 int32_t addr = 0; |
2084 if (instr->Bit(22) == 0) { | 2246 if (instr->Bit(22) == 0) { |
2085 int rm = instr->RmValue(); | 2247 int rm = instr->RmValue(); |
2086 int32_t rm_val = get_register(rm); | 2248 int32_t rm_val = get_register(rm); |
(...skipping 3251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5338 | 5500 |
5339 | 5501 |
5340 uintptr_t Simulator::PopAddress() { | 5502 uintptr_t Simulator::PopAddress() { |
5341 int current_sp = get_register(sp); | 5503 int current_sp = get_register(sp); |
5342 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); | 5504 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); |
5343 uintptr_t address = *stack_slot; | 5505 uintptr_t address = *stack_slot; |
5344 set_register(sp, current_sp + sizeof(uintptr_t)); | 5506 set_register(sp, current_sp + sizeof(uintptr_t)); |
5345 return address; | 5507 return address; |
5346 } | 5508 } |
5347 | 5509 |
| 5510 Simulator::LocalMonitor::LocalMonitor() |
| 5511 : access_state_(MonitorAccess::Open), |
| 5512 tagged_addr_(0), |
| 5513 size_(TransactionSize::None) {} |
| 5514 |
| 5515 void Simulator::LocalMonitor::Clear() { |
| 5516 access_state_ = MonitorAccess::Open; |
| 5517 tagged_addr_ = 0; |
| 5518 size_ = TransactionSize::None; |
| 5519 } |
| 5520 |
| 5521 void Simulator::LocalMonitor::NotifyLoad(int32_t addr) { |
| 5522 if (access_state_ == MonitorAccess::Exclusive) { |
| 5523 // A load could cause a cache eviction which will affect the monitor. As a |
| 5524 // result, it's most strict to unconditionally clear the local monitor on |
| 5525 // load. |
| 5526 Clear(); |
| 5527 } |
| 5528 } |
| 5529 |
| 5530 void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr, |
| 5531 TransactionSize size) { |
| 5532 access_state_ = MonitorAccess::Exclusive; |
| 5533 tagged_addr_ = addr; |
| 5534 size_ = size; |
| 5535 } |
| 5536 |
| 5537 void Simulator::LocalMonitor::NotifyStore(int32_t addr) { |
| 5538 if (access_state_ == MonitorAccess::Exclusive) { |
| 5539 // It is implementation-defined whether a non-exclusive store to an address |
| 5540 // covered by the local monitor during exclusive access transitions to open |
| 5541 // or exclusive access. See ARM DDI 0406C.b, A3.4.1. |
| 5542 // |
| 5543 // However, a store could cause a cache eviction which will affect the |
| 5544 // monitor. As a result, it's most strict to unconditionally clear the |
| 5545 // local monitor on store. |
| 5546 Clear(); |
| 5547 } |
| 5548 } |
| 5549 |
| 5550 bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr, |
| 5551 TransactionSize size) { |
| 5552 if (access_state_ == MonitorAccess::Exclusive) { |
| 5553 // It is allowed for a processor to require that the address matches |
| 5554 // exactly (A3.4.5), so this comparison does not mask addr. |
| 5555 if (addr == tagged_addr_ && size_ == size) { |
| 5556 Clear(); |
| 5557 return true; |
| 5558 } else { |
| 5559 // It is implementation-defined whether an exclusive store to a |
| 5560 // non-tagged address will update memory. Behavior is unpredictable if |
| 5561 // the transaction size of the exclusive store differs from that of the |
| 5562 // exclusive load. See ARM DDI 0406C.b, A3.4.5. |
| 5563 Clear(); |
| 5564 return false; |
| 5565 } |
| 5566 } else { |
| 5567 DCHECK(access_state_ == MonitorAccess::Open); |
| 5568 return false; |
| 5569 } |
| 5570 } |
| 5571 |
| 5572 Simulator::GlobalMonitor::Processor::Processor() |
| 5573 : access_state_(MonitorAccess::Open), |
| 5574 tagged_addr_(0), |
| 5575 next_(nullptr), |
| 5576 prev_(nullptr), |
| 5577 failure_counter_(0) {} |
| 5578 |
| 5579 void Simulator::GlobalMonitor::Processor::Clear_Locked() { |
| 5580 access_state_ = MonitorAccess::Open; |
| 5581 tagged_addr_ = 0; |
| 5582 } |
| 5583 |
| 5584 void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) { |
| 5585 access_state_ = MonitorAccess::Exclusive; |
| 5586 tagged_addr_ = addr; |
| 5587 } |
| 5588 |
| 5589 void Simulator::GlobalMonitor::Processor::NotifyStore_Locked( |
| 5590 int32_t addr, bool is_requesting_processor) { |
| 5591 if (access_state_ == MonitorAccess::Exclusive) { |
| 5592 // It is implementation-defined whether a non-exclusive store by the |
| 5593 // requesting processor to an address covered by the global monitor |
| 5594 // during exclusive access transitions to open or exclusive access. |
| 5595 // |
| 5596 // For any other processor, the access state always transitions to open |
| 5597 // access. |
| 5598 // |
| 5599 // See ARM DDI 0406C.b, A3.4.2. |
| 5600 // |
| 5601 // However, similar to the local monitor, it is possible that a store |
| 5602 // caused a cache eviction, which can affect the montior, so |
| 5603 // conservatively, we always clear the monitor. |
| 5604 Clear_Locked(); |
| 5605 } |
| 5606 } |
| 5607 |
| 5608 bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked( |
| 5609 int32_t addr, bool is_requesting_processor) { |
| 5610 if (access_state_ == MonitorAccess::Exclusive) { |
| 5611 if (is_requesting_processor) { |
| 5612 // It is allowed for a processor to require that the address matches |
| 5613 // exactly (A3.4.5), so this comparison does not mask addr. |
| 5614 if (addr == tagged_addr_) { |
| 5615 // The access state for the requesting processor after a successful |
| 5616 // exclusive store is implementation-defined, but according to the ARM |
| 5617 // DDI, this has no effect on the subsequent operation of the global |
| 5618 // monitor. |
| 5619 Clear_Locked(); |
| 5620 // Introduce occasional strex failures. This is to simulate the |
| 5621 // behavior of hardware, which can randomly fail due to background |
| 5622 // cache evictions. |
| 5623 if (failure_counter_++ >= kMaxFailureCounter) { |
| 5624 failure_counter_ = 0; |
| 5625 return false; |
| 5626 } else { |
| 5627 return true; |
| 5628 } |
| 5629 } |
| 5630 } else if ((addr & kExclusiveTaggedAddrMask) == |
| 5631 (tagged_addr_ & kExclusiveTaggedAddrMask)) { |
| 5632 // Check the masked addresses when responding to a successful lock by |
| 5633 // another processor so the implementation is more conservative (i.e. the |
| 5634 // granularity of locking is as large as possible.) |
| 5635 Clear_Locked(); |
| 5636 return false; |
| 5637 } |
| 5638 } |
| 5639 return false; |
| 5640 } |
| 5641 |
| 5642 Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {} |
| 5643 |
| 5644 void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr, |
| 5645 Processor* processor) { |
| 5646 processor->NotifyLoadExcl_Locked(addr); |
| 5647 PrependProcessor_Locked(processor); |
| 5648 } |
| 5649 |
| 5650 void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr, |
| 5651 Processor* processor) { |
| 5652 // Notify each processor of the store operation. |
| 5653 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 5654 bool is_requesting_processor = iter == processor; |
| 5655 iter->NotifyStore_Locked(addr, is_requesting_processor); |
| 5656 } |
| 5657 } |
| 5658 |
| 5659 bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr, |
| 5660 Processor* processor) { |
| 5661 DCHECK(IsProcessorInLinkedList_Locked(processor)); |
| 5662 if (processor->NotifyStoreExcl_Locked(addr, true)) { |
| 5663 // Notify the other processors that this StoreExcl succeeded. |
| 5664 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 5665 if (iter != processor) { |
| 5666 iter->NotifyStoreExcl_Locked(addr, false); |
| 5667 } |
| 5668 } |
| 5669 return true; |
| 5670 } else { |
| 5671 return false; |
| 5672 } |
| 5673 } |
| 5674 |
| 5675 bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( |
| 5676 Processor* processor) const { |
| 5677 return head_ == processor || processor->next_ || processor->prev_; |
| 5678 } |
| 5679 |
| 5680 void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) { |
| 5681 if (IsProcessorInLinkedList_Locked(processor)) { |
| 5682 return; |
| 5683 } |
| 5684 |
| 5685 if (head_) { |
| 5686 head_->prev_ = processor; |
| 5687 } |
| 5688 processor->prev_ = nullptr; |
| 5689 processor->next_ = head_; |
| 5690 head_ = processor; |
| 5691 } |
| 5692 |
| 5693 void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) { |
| 5694 base::LockGuard<base::Mutex> lock_guard(&mutex); |
| 5695 if (!IsProcessorInLinkedList_Locked(processor)) { |
| 5696 return; |
| 5697 } |
| 5698 |
| 5699 if (processor->prev_) { |
| 5700 processor->prev_->next_ = processor->next_; |
| 5701 } else { |
| 5702 head_ = processor->next_; |
| 5703 } |
| 5704 if (processor->next_) { |
| 5705 processor->next_->prev_ = processor->prev_; |
| 5706 } |
| 5707 processor->prev_ = nullptr; |
| 5708 processor->next_ = nullptr; |
| 5709 } |
| 5710 |
5348 } // namespace internal | 5711 } // namespace internal |
5349 } // namespace v8 | 5712 } // namespace v8 |
5350 | 5713 |
5351 #endif // USE_SIMULATOR | 5714 #endif // USE_SIMULATOR |
5352 | 5715 |
5353 #endif // V8_TARGET_ARCH_ARM | 5716 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |