Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(414)

Side by Side Diff: src/arm/simulator-arm.cc

Issue 2006183004: Implement ldrex and strex in ARM simulator (Closed) Base URL: http://chromium.googlesource.com/v8/v8.git@master
Patch Set: fixes Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm/simulator-arm.h ('k') | test/cctest/BUILD.gn » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <stdarg.h> 5 #include <stdarg.h>
6 #include <stdlib.h> 6 #include <stdlib.h>
7 #include <cmath> 7 #include <cmath>
8 8
9 #if V8_TARGET_ARCH_ARM 9 #if V8_TARGET_ARCH_ARM
10 10
11 #include "src/arm/constants-arm.h" 11 #include "src/arm/constants-arm.h"
12 #include "src/arm/simulator-arm.h" 12 #include "src/arm/simulator-arm.h"
13 #include "src/assembler.h" 13 #include "src/assembler.h"
14 #include "src/base/bits.h" 14 #include "src/base/bits.h"
15 #include "src/codegen.h" 15 #include "src/codegen.h"
16 #include "src/disasm.h" 16 #include "src/disasm.h"
17 #include "src/runtime/runtime-utils.h" 17 #include "src/runtime/runtime-utils.h"
18 18
19 #if defined(USE_SIMULATOR) 19 #if defined(USE_SIMULATOR)
20 20
21 // Only build the simulator if not compiling for real ARM hardware. 21 // Only build the simulator if not compiling for real ARM hardware.
22 namespace v8 { 22 namespace v8 {
23 namespace internal { 23 namespace internal {
24 24
25 // static
26 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
27 LAZY_INSTANCE_INITIALIZER;
28
25 // This macro provides a platform independent use of sscanf. The reason for 29 // This macro provides a platform independent use of sscanf. The reason for
26 // SScanF not being implemented in a platform independent way through 30 // SScanF not being implemented in a platform independent way through
27 // ::v8::internal::OS in the same way as SNPrintF is that the 31 // ::v8::internal::OS in the same way as SNPrintF is that the
28 // Windows C Run-Time Library does not provide vsscanf. 32 // Windows C Run-Time Library does not provide vsscanf.
29 #define SScanF sscanf // NOLINT 33 #define SScanF sscanf // NOLINT
30 34
31 // The ArmDebugger class is used by the simulator while debugging simulated ARM 35 // The ArmDebugger class is used by the simulator while debugging simulated ARM
32 // code. 36 // code.
33 class ArmDebugger { 37 class ArmDebugger {
34 public: 38 public:
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after
703 // some buffer below. 707 // some buffer below.
704 registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64; 708 registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
705 // The lr and pc are initialized to a known bad value that will cause an 709 // The lr and pc are initialized to a known bad value that will cause an
706 // access violation if the simulator ever tries to execute it. 710 // access violation if the simulator ever tries to execute it.
707 registers_[pc] = bad_lr; 711 registers_[pc] = bad_lr;
708 registers_[lr] = bad_lr; 712 registers_[lr] = bad_lr;
709 713
710 last_debugger_input_ = NULL; 714 last_debugger_input_ = NULL;
711 } 715 }
712 716
713 717 Simulator::~Simulator() {
714 Simulator::~Simulator() { free(stack_); } 718 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
715 719 free(stack_);
720 }
716 721
717 // When the generated code calls an external reference we need to catch that in 722 // When the generated code calls an external reference we need to catch that in
718 // the simulator. The external reference will be a function compiled for the 723 // the simulator. The external reference will be a function compiled for the
719 // host architecture. We need to call that function instead of trying to 724 // host architecture. We need to call that function instead of trying to
720 // execute it with the simulator. We do that by redirecting the external 725 // execute it with the simulator. We do that by redirecting the external
721 // reference to a svc (Supervisor Call) instruction that is handled by 726 // reference to a svc (Supervisor Call) instruction that is handled by
722 // the simulator. We write the original destination of the jump just at a known 727 // the simulator. We write the original destination of the jump just at a known
723 // offset from the svc instruction so the simulator knows what to call. 728 // offset from the svc instruction so the simulator knows what to call.
724 class Redirection { 729 class Redirection {
725 public: 730 public:
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
1045 // We don't trash the registers with the return value. 1050 // We don't trash the registers with the return value.
1046 registers_[2] = 0x50Bad4U; 1051 registers_[2] = 0x50Bad4U;
1047 registers_[3] = 0x50Bad4U; 1052 registers_[3] = 0x50Bad4U;
1048 registers_[12] = 0x50Bad4U; 1053 registers_[12] = 0x50Bad4U;
1049 } 1054 }
1050 1055
1051 1056
1052 int Simulator::ReadW(int32_t addr, Instruction* instr) { 1057 int Simulator::ReadW(int32_t addr, Instruction* instr) {
1053 // All supported ARM targets allow unaligned accesses, so we don't need to 1058 // All supported ARM targets allow unaligned accesses, so we don't need to
1054 // check the alignment here. 1059 // check the alignment here.
1060 local_monitor_.NotifyLoad(addr);
1055 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); 1061 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
1056 return *ptr; 1062 return *ptr;
Jarin 2017/01/13 07:56:29 Would not this be a data race with writes in other
binji 2017/01/17 22:22:00 Done.
1057 } 1063 }
1058 1064
1065 int Simulator::ReadExW(int32_t addr, Instruction* instr) {
1066 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1067 local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
1068 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
1069 &global_monitor_processor_);
1070 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
1071 return *ptr;
1072 }
1059 1073
1060 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { 1074 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
1061 // All supported ARM targets allow unaligned accesses, so we don't need to 1075 // All supported ARM targets allow unaligned accesses, so we don't need to
1062 // check the alignment here. 1076 // check the alignment here.
1077 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1078 local_monitor_.NotifyStore(addr);
1079 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1080 &global_monitor_processor_);
1063 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); 1081 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
1064 *ptr = value; 1082 *ptr = value;
1065 } 1083 }
1066 1084
1085 int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
1086 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1087 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
1088 global_monitor_.Pointer()->NotifyStoreExcl_Locked(
1089 addr, &global_monitor_processor_)) {
1090 intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
1091 *ptr = value;
1092 return 0;
1093 } else {
1094 return 1;
1095 }
1096 }
1067 1097
1068 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { 1098 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
1069 // All supported ARM targets allow unaligned accesses, so we don't need to 1099 // All supported ARM targets allow unaligned accesses, so we don't need to
1070 // check the alignment here. 1100 // check the alignment here.
1101 local_monitor_.NotifyLoad(addr);
1071 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); 1102 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
1072 return *ptr; 1103 return *ptr;
1073 } 1104 }
1074 1105
1075
1076 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { 1106 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
1077 // All supported ARM targets allow unaligned accesses, so we don't need to 1107 // All supported ARM targets allow unaligned accesses, so we don't need to
1078 // check the alignment here. 1108 // check the alignment here.
1109 local_monitor_.NotifyLoad(addr);
1079 int16_t* ptr = reinterpret_cast<int16_t*>(addr); 1110 int16_t* ptr = reinterpret_cast<int16_t*>(addr);
1080 return *ptr; 1111 return *ptr;
1081 } 1112 }
1082 1113
1114 uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
1115 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1116 local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
1117 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
1118 &global_monitor_processor_);
1119 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
1120 return *ptr;
1121 }
1083 1122
1084 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { 1123 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
1085 // All supported ARM targets allow unaligned accesses, so we don't need to 1124 // All supported ARM targets allow unaligned accesses, so we don't need to
1086 // check the alignment here. 1125 // check the alignment here.
1126 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1127 local_monitor_.NotifyStore(addr);
1128 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1129 &global_monitor_processor_);
1087 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); 1130 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
1088 *ptr = value; 1131 *ptr = value;
1089 } 1132 }
1090 1133
1091
1092 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { 1134 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
1093 // All supported ARM targets allow unaligned accesses, so we don't need to 1135 // All supported ARM targets allow unaligned accesses, so we don't need to
1094 // check the alignment here. 1136 // check the alignment here.
1137 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1138 local_monitor_.NotifyStore(addr);
1139 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1140 &global_monitor_processor_);
1095 int16_t* ptr = reinterpret_cast<int16_t*>(addr); 1141 int16_t* ptr = reinterpret_cast<int16_t*>(addr);
1096 *ptr = value; 1142 *ptr = value;
1097 } 1143 }
1098 1144
1145 int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) {
1146 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1147 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
1148 global_monitor_.Pointer()->NotifyStoreExcl_Locked(
1149 addr, &global_monitor_processor_)) {
1150 uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
1151 *ptr = value;
1152 return 0;
1153 } else {
1154 return 1;
1155 }
1156 }
1099 1157
1100 uint8_t Simulator::ReadBU(int32_t addr) { 1158 uint8_t Simulator::ReadBU(int32_t addr) {
1159 local_monitor_.NotifyLoad(addr);
1101 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); 1160 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1102 return *ptr; 1161 return *ptr;
1103 } 1162 }
1104 1163
1105
1106 int8_t Simulator::ReadB(int32_t addr) { 1164 int8_t Simulator::ReadB(int32_t addr) {
1165 local_monitor_.NotifyLoad(addr);
1107 int8_t* ptr = reinterpret_cast<int8_t*>(addr); 1166 int8_t* ptr = reinterpret_cast<int8_t*>(addr);
1108 return *ptr; 1167 return *ptr;
1109 } 1168 }
1110 1169
1170 uint8_t Simulator::ReadExBU(int32_t addr) {
1171 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1172 local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
1173 global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
1174 &global_monitor_processor_);
1175 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1176 return *ptr;
1177 }
1111 1178
1112 void Simulator::WriteB(int32_t addr, uint8_t value) { 1179 void Simulator::WriteB(int32_t addr, uint8_t value) {
1180 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1181 local_monitor_.NotifyStore(addr);
1182 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1183 &global_monitor_processor_);
1113 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); 1184 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1114 *ptr = value; 1185 *ptr = value;
1115 } 1186 }
1116 1187
1117
1118 void Simulator::WriteB(int32_t addr, int8_t value) { 1188 void Simulator::WriteB(int32_t addr, int8_t value) {
1189 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1190 local_monitor_.NotifyStore(addr);
1191 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1192 &global_monitor_processor_);
1119 int8_t* ptr = reinterpret_cast<int8_t*>(addr); 1193 int8_t* ptr = reinterpret_cast<int8_t*>(addr);
1120 *ptr = value; 1194 *ptr = value;
1121 } 1195 }
1122 1196
1197 int Simulator::WriteExB(int32_t addr, uint8_t value) {
1198 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1199 if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
1200 global_monitor_.Pointer()->NotifyStoreExcl_Locked(
1201 addr, &global_monitor_processor_)) {
1202 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1203 *ptr = value;
1204 return 0;
1205 } else {
1206 return 1;
1207 }
1208 }
1123 1209
1124 int32_t* Simulator::ReadDW(int32_t addr) { 1210 int32_t* Simulator::ReadDW(int32_t addr) {
1125 // All supported ARM targets allow unaligned accesses, so we don't need to 1211 // All supported ARM targets allow unaligned accesses, so we don't need to
1126 // check the alignment here. 1212 // check the alignment here.
1213 local_monitor_.NotifyLoad(addr);
1127 int32_t* ptr = reinterpret_cast<int32_t*>(addr); 1214 int32_t* ptr = reinterpret_cast<int32_t*>(addr);
1128 return ptr; 1215 return ptr;
1129 } 1216 }
1130 1217
1131 1218
1132 void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { 1219 void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
1133 // All supported ARM targets allow unaligned accesses, so we don't need to 1220 // All supported ARM targets allow unaligned accesses, so we don't need to
1134 // check the alignment here. 1221 // check the alignment here.
1222 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1223 local_monitor_.NotifyStore(addr);
1224 global_monitor_.Pointer()->NotifyStore_Locked(addr,
1225 &global_monitor_processor_);
1135 int32_t* ptr = reinterpret_cast<int32_t*>(addr); 1226 int32_t* ptr = reinterpret_cast<int32_t*>(addr);
1136 *ptr++ = value1; 1227 *ptr++ = value1;
1137 *ptr = value2; 1228 *ptr = value2;
1138 } 1229 }
1139 1230
1140 1231
1141 // Returns the limit of the stack area to enable checking for stack overflows. 1232 // Returns the limit of the stack area to enable checking for stack overflows.
1142 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { 1233 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
1143 // The simulator uses a separate JS stack. If we have exhausted the C stack, 1234 // The simulator uses a separate JS stack. If we have exhausted the C stack,
1144 // we also drop down the JS limit to reflect the exhaustion on the JS stack. 1235 // we also drop down the JS limit to reflect the exhaustion on the JS stack.
(...skipping 933 matching lines...) Expand 10 before | Expand all | Expand 10 after
2078 hi_res = static_cast<int32_t>(result >> 32); 2169 hi_res = static_cast<int32_t>(result >> 32);
2079 lo_res = static_cast<int32_t>(result & 0xffffffff); 2170 lo_res = static_cast<int32_t>(result & 0xffffffff);
2080 } 2171 }
2081 set_register(rd_lo, lo_res); 2172 set_register(rd_lo, lo_res);
2082 set_register(rd_hi, hi_res); 2173 set_register(rd_hi, hi_res);
2083 if (instr->HasS()) { 2174 if (instr->HasS()) {
2084 UNIMPLEMENTED(); 2175 UNIMPLEMENTED();
2085 } 2176 }
2086 } 2177 }
2087 } else { 2178 } else {
2088 UNIMPLEMENTED(); // Not used by V8. 2179 if (instr->Bits(24, 23) == 3) {
2180 if (instr->Bit(20) == 1) {
2181 // ldrex
2182 int rt = instr->RtValue();
2183 int rn = instr->RnValue();
2184 int32_t addr = get_register(rn);
2185 switch (instr->Bits(22, 21)) {
2186 case 0: {
2187 // Format(instr, "ldrex'cond 'rt, ['rn]");
2188 int value = ReadExW(addr, instr);
2189 set_register(rt, value);
2190 break;
2191 }
2192 case 2: {
2193 // Format(instr, "ldrexb'cond 'rt, ['rn]");
2194 uint8_t value = ReadExBU(addr);
2195 set_register(rt, value);
2196 break;
2197 }
2198 case 3: {
2199 // Format(instr, "ldrexh'cond 'rt, ['rn]");
2200 uint16_t value = ReadExHU(addr, instr);
2201 set_register(rt, value);
2202 break;
2203 }
2204 default:
2205 UNREACHABLE();
2206 break;
2207 }
2208 } else {
2209 // The instruction is documented as strex rd, rt, [rn], but the
2210 // "rt" register is using the rm bits.
2211 int rd = instr->RdValue();
2212 int rt = instr->RmValue();
2213 int rn = instr->RnValue();
2214 int32_t addr = get_register(rn);
2215 switch (instr->Bits(22, 21)) {
2216 case 0: {
2217 // Format(instr, "strex'cond 'rd, 'rm, ['rn]");
2218 int value = get_register(rt);
2219 int status = WriteExW(addr, value, instr);
2220 set_register(rd, status);
2221 break;
2222 }
2223 case 2: {
2224 // Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
2225 uint8_t value = get_register(rt);
2226 int status = WriteExB(addr, value);
2227 set_register(rd, status);
2228 break;
2229 }
2230 case 3: {
2231 // Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
2232 uint16_t value = get_register(rt);
2233 int status = WriteExH(addr, value, instr);
2234 set_register(rd, status);
2235 break;
2236 }
2237 default:
2238 UNREACHABLE();
2239 break;
2240 }
2241 }
2242 } else {
2243 UNIMPLEMENTED(); // Not used by V8.
2244 }
2089 } 2245 }
2090 } else { 2246 } else {
2091 // extra load/store instructions 2247 // extra load/store instructions
2092 int rd = instr->RdValue(); 2248 int rd = instr->RdValue();
2093 int rn = instr->RnValue(); 2249 int rn = instr->RnValue();
2094 int32_t rn_val = get_register(rn); 2250 int32_t rn_val = get_register(rn);
2095 int32_t addr = 0; 2251 int32_t addr = 0;
2096 if (instr->Bit(22) == 0) { 2252 if (instr->Bit(22) == 0) {
2097 int rm = instr->RmValue(); 2253 int rm = instr->RmValue();
2098 int32_t rm_val = get_register(rm); 2254 int32_t rm_val = get_register(rm);
(...skipping 2918 matching lines...) Expand 10 before | Expand all | Expand 10 after
5017 5173
5018 5174
5019 uintptr_t Simulator::PopAddress() { 5175 uintptr_t Simulator::PopAddress() {
5020 int current_sp = get_register(sp); 5176 int current_sp = get_register(sp);
5021 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); 5177 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
5022 uintptr_t address = *stack_slot; 5178 uintptr_t address = *stack_slot;
5023 set_register(sp, current_sp + sizeof(uintptr_t)); 5179 set_register(sp, current_sp + sizeof(uintptr_t));
5024 return address; 5180 return address;
5025 } 5181 }
5026 5182
5183 Simulator::LocalMonitor::LocalMonitor()
5184 : access_state_(MonitorAccess::Open),
5185 tagged_addr_(0),
5186 size_(TransactionSize::None) {}
5187
5188 void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
5189 if (access_state_ == MonitorAccess::Exclusive) {
5190 // A load could cause a cache eviction which will affect the monitor. As a
5191 // result, it's most strict to unconditionally clear the local monitor on
5192 // load.
5193 access_state_ = MonitorAccess::Open;
5194 tagged_addr_ = 0;
5195 size_ = TransactionSize::None;
Jarin 2017/01/13 07:56:29 Perhaps it would be better to introduce void Sim
binji 2017/01/17 22:22:00 Done.
5196 }
5197 }
5198
5199 void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
5200 TransactionSize size) {
5201 access_state_ = MonitorAccess::Exclusive;
5202 tagged_addr_ = addr;
5203 size_ = size;
5204 }
5205
5206 void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
5207 if (access_state_ == MonitorAccess::Exclusive) {
5208 // It is implementation-defined whether a non-exclusive store to an address
5209 // covered by the local monitor during exclusive access transitions to open
5210 // or exclusive access. See ARM DDI 0406C.b, A3.4.1.
5211 //
5212 // However, a store could cause a cache eviction which will affect the
5213 // monitor. As a result, it's most strict to unconditionally clear the
5214 // local monitor on store.
5215 access_state_ = MonitorAccess::Open;
5216 tagged_addr_ = 0;
5217 size_ = TransactionSize::None;
5218 }
5219 }
5220
5221 bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
5222 TransactionSize size) {
5223 if (access_state_ == MonitorAccess::Exclusive) {
5224 // It is allowed for a processor to require that the address matches
5225 // exactly (A3.4.5), so this comparison does not mask addr.
5226 if (addr == tagged_addr_ && size_ == size) {
5227 access_state_ = MonitorAccess::Open;
5228 tagged_addr_ = 0;
5229 size_ = TransactionSize::None;
5230 return true;
5231 } else {
5232 // It is implementation-defined whether an exclusive store to a
5233 // non-tagged address will update memory. Behavior is unpredictable if
5234 // the transaction size of the exclusive store differs from that of the
5235 // exclusive load. See ARM DDI 0406C.b, A3.4.5.
5236 access_state_ = MonitorAccess::Open;
5237 tagged_addr_ = 0;
5238 size_ = TransactionSize::None;
5239 return false;
5240 }
5241 } else {
5242 DCHECK(access_state_ == MonitorAccess::Open);
5243 return false;
5244 }
5245 }
5246
5247 Simulator::GlobalMonitor::Processor::Processor()
5248 : access_state_(MonitorAccess::Open),
5249 tagged_addr_(0),
5250 next_(nullptr),
5251 prev_(nullptr),
5252 failure_counter_(0) {}
5253
5254 void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
5255 access_state_ = MonitorAccess::Exclusive;
5256 tagged_addr_ = addr;
5257 }
5258
5259 void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
5260 int32_t addr, bool is_requesting_processor) {
5261 if (access_state_ == MonitorAccess::Exclusive) {
5262 // It is implementation-defined whether a non-exclusive store by the
5263 // requesting processor to an address covered by the global monitor
5264 // during exclusive access transitions to open or exclusive access.
5265 //
5266 // For any other processor, the access state always transitions to open
5267 // access.
5268 //
5269 // See ARM DDI 0406C.b, A3.4.2.
5270 //
5271 // However, similar to the local monitor, it is possible that a store
5272 // caused a cache eviction, which can affect the montior, so
5273 // conservatively, we always clear the monitor.
5274 access_state_ = MonitorAccess::Open;
5275 tagged_addr_ = 0;
5276 }
5277 }
5278
5279 bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
5280 int32_t addr, bool is_requesting_processor) {
5281 if (access_state_ == MonitorAccess::Exclusive) {
5282 if (is_requesting_processor) {
5283 // It is allowed for a processor to require that the address matches
5284 // exactly (A3.4.5), so this comparison does not mask addr.
5285 if (addr == tagged_addr_) {
5286 // The access state for the requesting processor after a successful
5287 // exclusive store is implementation-defined, but according to the ARM
5288 // DDI, this has no effect on the subsequent operation of the global
5289 // monitor.
5290 access_state_ = MonitorAccess::Open;
5291 tagged_addr_ = 0;
5292 // Introduce occasional strex failures. This is to simulate the
5293 // behavior of hardware, which can randomly fail due to background
5294 // cache evictions.
5295 if (failure_counter_++ >= kMaxFailureCounter) {
5296 failure_counter_ = 0;
5297 return false;
5298 } else {
5299 return true;
5300 }
5301 }
5302 } else if ((addr & kExclusiveTaggedAddrMask) ==
5303 (tagged_addr_ & kExclusiveTaggedAddrMask)) {
5304 // Check the masked addresses when responding to a successful lock by
5305 // another processor so the implementation is more conservative (i.e. the
5306 // granularity of locking is as large as possible.)
5307 access_state_ = MonitorAccess::Open;
5308 tagged_addr_ = 0;
5309 return false;
5310 }
5311 }
5312 return false;
5313 }
5314
5315 Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
5316
5317 void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
5318 Processor* processor) {
5319 processor->NotifyLoadExcl_Locked(addr);
5320 PrependProcessor_Locked(processor);
5321 }
5322
5323 void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
5324 Processor* processor) {
5325 // Notify each processor of the store operation.
5326 for (Processor* iter = head_; iter; iter = iter->next_) {
5327 bool is_requesting_processor = iter == processor;
5328 iter->NotifyStore_Locked(addr, is_requesting_processor);
5329 }
5330 }
5331
5332 bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
5333 Processor* processor) {
5334 DCHECK(IsProcessorInLinkedList_Locked(processor));
5335 if (processor->NotifyStoreExcl_Locked(addr, true)) {
5336 // Notify the other processors that this StoreExcl succeeded.
5337 for (Processor* iter = head_; iter; iter = iter->next_) {
5338 if (iter != processor) {
5339 iter->NotifyStoreExcl_Locked(addr, false);
5340 }
5341 }
5342 return true;
5343 } else {
5344 return false;
5345 }
5346 }
5347
5348 bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
5349 Processor* processor) const {
5350 return head_ == processor || processor->next_ || processor->prev_;
5351 }
5352
5353 void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
5354 if (IsProcessorInLinkedList_Locked(processor)) {
5355 return;
5356 }
5357
5358 if (head_) {
5359 head_->prev_ = processor;
5360 }
5361 processor->prev_ = nullptr;
5362 processor->next_ = head_;
5363 head_ = processor;
5364 }
5365
5366 void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
5367 base::LockGuard<base::Mutex> lock_guard(&mutex);
5368 if (!IsProcessorInLinkedList_Locked(processor)) {
5369 return;
5370 }
5371
5372 if (processor->prev_) {
5373 processor->prev_->next_ = processor->next_;
5374 } else {
5375 head_ = processor->next_;
5376 }
5377 if (processor->next_) {
5378 processor->next_->prev_ = processor->prev_;
5379 }
5380 processor->prev_ = nullptr;
5381 processor->next_ = nullptr;
5382 }
5383
5027 } // namespace internal 5384 } // namespace internal
5028 } // namespace v8 5385 } // namespace v8
5029 5386
5030 #endif // USE_SIMULATOR 5387 #endif // USE_SIMULATOR
5031 5388
5032 #endif // V8_TARGET_ARCH_ARM 5389 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/simulator-arm.h ('k') | test/cctest/BUILD.gn » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698