Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(434)

Side by Side Diff: src/arm64/simulator-arm64.cc

Issue 2819093002: Revert "Reland "ARM64: Add NEON support"" (Closed)
Patch Set: Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/simulator-arm64.h ('k') | src/arm64/simulator-logic-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <stdlib.h> 5 #include <stdlib.h>
6 #include <cmath> 6 #include <cmath>
7 #include <cstdarg> 7 #include <cstdarg>
8 8
9 #if V8_TARGET_ARCH_ARM64 9 #if V8_TARGET_ARCH_ARM64
10 10
(...skipping 25 matching lines...) Expand all
36 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m" 36 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
37 #define NORMAL "" 37 #define NORMAL ""
38 #define GREY "30" 38 #define GREY "30"
39 #define RED "31" 39 #define RED "31"
40 #define GREEN "32" 40 #define GREEN "32"
41 #define YELLOW "33" 41 #define YELLOW "33"
42 #define BLUE "34" 42 #define BLUE "34"
43 #define MAGENTA "35" 43 #define MAGENTA "35"
44 #define CYAN "36" 44 #define CYAN "36"
45 #define WHITE "37" 45 #define WHITE "37"
46
47 typedef char const * const TEXT_COLOUR; 46 typedef char const * const TEXT_COLOUR;
48 TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : ""; 47 TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
49 TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : ""; 48 TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
50 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; 49 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
51 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; 50 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
52 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; 51 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
53 TEXT_COLOUR clr_vreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; 52 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
54 TEXT_COLOUR clr_vreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; 53 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
55 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; 54 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
56 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; 55 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
57 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; 56 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
58 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; 57 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
59 58
60 // static 59 // static
61 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ = 60 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
62 LAZY_INSTANCE_INITIALIZER; 61 LAZY_INSTANCE_INITIALIZER;
63 62
64 // This is basically the same as PrintF, with a guard for FLAG_trace_sim. 63 // This is basically the same as PrintF, with a guard for FLAG_trace_sim.
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
227 return CallInt64(entry, args); 226 return CallInt64(entry, args);
228 } 227 }
229 228
230 229
231 void Simulator::CheckPCSComplianceAndRun() { 230 void Simulator::CheckPCSComplianceAndRun() {
232 // Adjust JS-based stack limit to C-based stack limit. 231 // Adjust JS-based stack limit to C-based stack limit.
233 isolate_->stack_guard()->AdjustStackLimitForSimulator(); 232 isolate_->stack_guard()->AdjustStackLimitForSimulator();
234 233
235 #ifdef DEBUG 234 #ifdef DEBUG
236 CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count()); 235 CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
237 CHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count()); 236 CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
238 237
239 int64_t saved_registers[kNumberOfCalleeSavedRegisters]; 238 int64_t saved_registers[kNumberOfCalleeSavedRegisters];
240 uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters]; 239 uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
241 240
242 CPURegList register_list = kCalleeSaved; 241 CPURegList register_list = kCalleeSaved;
243 CPURegList fpregister_list = kCalleeSavedV; 242 CPURegList fpregister_list = kCalleeSavedFP;
244 243
245 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { 244 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
246 // x31 is not a caller saved register, so no need to specify if we want 245 // x31 is not a caller saved register, so no need to specify if we want
247 // the stack or zero. 246 // the stack or zero.
248 saved_registers[i] = xreg(register_list.PopLowestIndex().code()); 247 saved_registers[i] = xreg(register_list.PopLowestIndex().code());
249 } 248 }
250 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { 249 for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
251 saved_fpregisters[i] = 250 saved_fpregisters[i] =
252 dreg_bits(fpregister_list.PopLowestIndex().code()); 251 dreg_bits(fpregister_list.PopLowestIndex().code());
253 } 252 }
254 int64_t original_stack = sp(); 253 int64_t original_stack = sp();
255 #endif 254 #endif
256 // Start the simulation! 255 // Start the simulation!
257 Run(); 256 Run();
258 #ifdef DEBUG 257 #ifdef DEBUG
259 CHECK_EQ(original_stack, sp()); 258 CHECK_EQ(original_stack, sp());
260 // Check that callee-saved registers have been preserved. 259 // Check that callee-saved registers have been preserved.
261 register_list = kCalleeSaved; 260 register_list = kCalleeSaved;
262 fpregister_list = kCalleeSavedV; 261 fpregister_list = kCalleeSavedFP;
263 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { 262 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
264 CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); 263 CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
265 } 264 }
266 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { 265 for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
267 DCHECK(saved_fpregisters[i] == 266 DCHECK(saved_fpregisters[i] ==
268 dreg_bits(fpregister_list.PopLowestIndex().code())); 267 dreg_bits(fpregister_list.PopLowestIndex().code()));
269 } 268 }
270 269
271 // Corrupt caller saved register minus the return regiters. 270 // Corrupt caller saved register minus the return regiters.
272 271
273 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1 272 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
274 // for now . 273 // for now .
275 register_list = kCallerSaved; 274 register_list = kCallerSaved;
276 register_list.Remove(x0); 275 register_list.Remove(x0);
277 register_list.Remove(x1); 276 register_list.Remove(x1);
278 277
279 // In theory d0 to d7 can be used for return values, but V8 only uses d0 278 // In theory d0 to d7 can be used for return values, but V8 only uses d0
280 // for now . 279 // for now .
281 fpregister_list = kCallerSavedV; 280 fpregister_list = kCallerSavedFP;
282 fpregister_list.Remove(d0); 281 fpregister_list.Remove(d0);
283 282
284 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue); 283 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
285 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue); 284 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
286 #endif 285 #endif
287 } 286 }
288 287
289 288
290 #ifdef DEBUG 289 #ifdef DEBUG
291 // The least significant byte of the curruption value holds the corresponding 290 // The least significant byte of the curruption value holds the corresponding
292 // register's code. 291 // register's code.
293 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { 292 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
294 if (list->type() == CPURegister::kRegister) { 293 if (list->type() == CPURegister::kRegister) {
295 while (!list->IsEmpty()) { 294 while (!list->IsEmpty()) {
296 unsigned code = list->PopLowestIndex().code(); 295 unsigned code = list->PopLowestIndex().code();
297 set_xreg(code, value | code); 296 set_xreg(code, value | code);
298 } 297 }
299 } else { 298 } else {
300 DCHECK_EQ(list->type(), CPURegister::kVRegister); 299 DCHECK(list->type() == CPURegister::kFPRegister);
301 while (!list->IsEmpty()) { 300 while (!list->IsEmpty()) {
302 unsigned code = list->PopLowestIndex().code(); 301 unsigned code = list->PopLowestIndex().code();
303 set_dreg_bits(code, value | code); 302 set_dreg_bits(code, value | code);
304 } 303 }
305 } 304 }
306 } 305 }
307 306
308 307
309 void Simulator::CorruptAllCallerSavedCPURegisters() { 308 void Simulator::CorruptAllCallerSavedCPURegisters() {
310 // Corrupt alters its parameter so copy them first. 309 // Corrupt alters its parameter so copy them first.
311 CPURegList register_list = kCallerSaved; 310 CPURegList register_list = kCallerSaved;
312 CPURegList fpregister_list = kCallerSavedV; 311 CPURegList fpregister_list = kCallerSavedFP;
313 312
314 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue); 313 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
315 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue); 314 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
316 } 315 }
317 #endif 316 #endif
318 317
319 318
320 // Extending the stack by 2 * 64 bits is required for stack alignment purposes. 319 // Extending the stack by 2 * 64 bits is required for stack alignment purposes.
321 uintptr_t Simulator::PushAddress(uintptr_t address) { 320 uintptr_t Simulator::PushAddress(uintptr_t address) {
322 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize); 321 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
323 intptr_t new_sp = sp() - 2 * kXRegSize; 322 intptr_t new_sp = sp() - 2 * kXRegSize;
324 uintptr_t* alignment_slot = 323 uintptr_t* alignment_slot =
325 reinterpret_cast<uintptr_t*>(new_sp + kXRegSize); 324 reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 void Simulator::ResetState() { 412 void Simulator::ResetState() {
414 // Reset the system registers. 413 // Reset the system registers.
415 nzcv_ = SimSystemRegister::DefaultValueFor(NZCV); 414 nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
416 fpcr_ = SimSystemRegister::DefaultValueFor(FPCR); 415 fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
417 416
418 // Reset registers to 0. 417 // Reset registers to 0.
419 pc_ = NULL; 418 pc_ = NULL;
420 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 419 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
421 set_xreg(i, 0xbadbeef); 420 set_xreg(i, 0xbadbeef);
422 } 421 }
423 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 422 for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
424 // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP. 423 // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
425 set_dreg_bits(i, 0x7ff000007f800001UL); 424 set_dreg_bits(i, 0x7ff000007f800001UL);
426 } 425 }
427 // Returning to address 0 exits the Simulator. 426 // Returning to address 0 exits the Simulator.
428 set_lr(kEndOfSimAddress); 427 set_lr(kEndOfSimAddress);
429 428
430 // Reset debug helpers. 429 // Reset debug helpers.
431 breakpoints_.empty(); 430 breakpoints_.empty();
432 break_on_next_ = false; 431 break_on_next_ = false;
433 } 432 }
434 433
435 434
436 Simulator::~Simulator() { 435 Simulator::~Simulator() {
437 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_); 436 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
438 delete[] reinterpret_cast<byte*>(stack_); 437 delete[] reinterpret_cast<byte*>(stack_);
439 if (FLAG_log_instruction_stats) { 438 if (FLAG_log_instruction_stats) {
440 delete instrument_; 439 delete instrument_;
441 } 440 }
442 delete disassembler_decoder_; 441 delete disassembler_decoder_;
443 delete print_disasm_; 442 delete print_disasm_;
444 DeleteArray(last_debugger_input_); 443 DeleteArray(last_debugger_input_);
445 delete decoder_; 444 delete decoder_;
446 } 445 }
447 446
448 447
449 void Simulator::Run() { 448 void Simulator::Run() {
450 // Flush any written registers before executing anything, so that
451 // manually-set registers are logged _before_ the first instruction.
452 LogAllWrittenRegisters();
453
454 pc_modified_ = false; 449 pc_modified_ = false;
455 while (pc_ != kEndOfSimAddress) { 450 while (pc_ != kEndOfSimAddress) {
456 ExecuteInstruction(); 451 ExecuteInstruction();
457 } 452 }
458 } 453 }
459 454
460 455
461 void Simulator::RunFrom(Instruction* start) { 456 void Simulator::RunFrom(Instruction* start) {
462 set_pc(start); 457 set_pc(start);
463 Run(); 458 Run();
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after
821 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; 816 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
822 817
823 const char* Simulator::vreg_names[] = { 818 const char* Simulator::vreg_names[] = {
824 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", 819 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
825 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", 820 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
826 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", 821 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
827 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; 822 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
828 823
829 824
830 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { 825 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
831 static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1), 826 STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
832 "Array must be large enough to hold all register names."); 827 DCHECK(code < kNumberOfRegisters);
833 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
834 // The modulo operator has no effect here, but it silences a broken GCC 828 // The modulo operator has no effect here, but it silences a broken GCC
835 // warning about out-of-bounds array accesses. 829 // warning about out-of-bounds array accesses.
836 code %= kNumberOfRegisters; 830 code %= kNumberOfRegisters;
837 831
838 // If the code represents the stack pointer, index the name after zr. 832 // If the code represents the stack pointer, index the name after zr.
839 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { 833 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
840 code = kZeroRegCode + 1; 834 code = kZeroRegCode + 1;
841 } 835 }
842 return wreg_names[code]; 836 return wreg_names[code];
843 } 837 }
844 838
845 839
846 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { 840 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
847 static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1), 841 STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
848 "Array must be large enough to hold all register names."); 842 DCHECK(code < kNumberOfRegisters);
849 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
850 code %= kNumberOfRegisters; 843 code %= kNumberOfRegisters;
851 844
852 // If the code represents the stack pointer, index the name after zr. 845 // If the code represents the stack pointer, index the name after zr.
853 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { 846 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
854 code = kZeroRegCode + 1; 847 code = kZeroRegCode + 1;
855 } 848 }
856 return xreg_names[code]; 849 return xreg_names[code];
857 } 850 }
858 851
859 852
860 const char* Simulator::SRegNameForCode(unsigned code) { 853 const char* Simulator::SRegNameForCode(unsigned code) {
861 static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters, 854 STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
862 "Array must be large enough to hold all register names."); 855 DCHECK(code < kNumberOfFPRegisters);
863 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters)); 856 return sreg_names[code % kNumberOfFPRegisters];
864 return sreg_names[code % kNumberOfVRegisters];
865 } 857 }
866 858
867 859
868 const char* Simulator::DRegNameForCode(unsigned code) { 860 const char* Simulator::DRegNameForCode(unsigned code) {
869 static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters, 861 STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
870 "Array must be large enough to hold all register names."); 862 DCHECK(code < kNumberOfFPRegisters);
871 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters)); 863 return dreg_names[code % kNumberOfFPRegisters];
872 return dreg_names[code % kNumberOfVRegisters];
873 } 864 }
874 865
875 866
876 const char* Simulator::VRegNameForCode(unsigned code) { 867 const char* Simulator::VRegNameForCode(unsigned code) {
877 static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters, 868 STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
878 "Array must be large enough to hold all register names."); 869 DCHECK(code < kNumberOfFPRegisters);
879 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters)); 870 return vreg_names[code % kNumberOfFPRegisters];
880 return vreg_names[code % kNumberOfVRegisters];
881 }
882
883 void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index,
884 uint64_t addr) const {
885 switch (LaneSizeInBitsFromFormat(vform)) {
886 case 8:
887 register_.Insert(index, SimMemory::Read<uint8_t>(addr));
888 break;
889 case 16:
890 register_.Insert(index, SimMemory::Read<uint16_t>(addr));
891 break;
892 case 32:
893 register_.Insert(index, SimMemory::Read<uint32_t>(addr));
894 break;
895 case 64:
896 register_.Insert(index, SimMemory::Read<uint64_t>(addr));
897 break;
898 default:
899 UNREACHABLE();
900 return;
901 }
902 }
903
904 void LogicVRegister::WriteUintToMem(VectorFormat vform, int index,
905 uint64_t addr) const {
906 switch (LaneSizeInBitsFromFormat(vform)) {
907 case 8:
908 SimMemory::Write<uint8_t>(addr, static_cast<uint8_t>(Uint(vform, index)));
909 break;
910 case 16:
911 SimMemory::Write<uint16_t>(addr,
912 static_cast<uint16_t>(Uint(vform, index)));
913 break;
914 case 32:
915 SimMemory::Write<uint32_t>(addr,
916 static_cast<uint32_t>(Uint(vform, index)));
917 break;
918 case 64:
919 SimMemory::Write<uint64_t>(addr, Uint(vform, index));
920 break;
921 default:
922 UNREACHABLE();
923 return;
924 }
925 } 871 }
926 872
927 873
928 int Simulator::CodeFromName(const char* name) { 874 int Simulator::CodeFromName(const char* name) {
929 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 875 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
930 if ((strcmp(xreg_names[i], name) == 0) || 876 if ((strcmp(xreg_names[i], name) == 0) ||
931 (strcmp(wreg_names[i], name) == 0)) { 877 (strcmp(wreg_names[i], name) == 0)) {
932 return i; 878 return i;
933 } 879 }
934 } 880 }
935 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 881 for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
936 if ((strcmp(vreg_names[i], name) == 0) || 882 if ((strcmp(vreg_names[i], name) == 0) ||
937 (strcmp(dreg_names[i], name) == 0) || 883 (strcmp(dreg_names[i], name) == 0) ||
938 (strcmp(sreg_names[i], name) == 0)) { 884 (strcmp(sreg_names[i], name) == 0)) {
939 return i; 885 return i;
940 } 886 }
941 } 887 }
942 if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) { 888 if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
943 return kSPRegInternalCode; 889 return kSPRegInternalCode;
944 } 890 }
945 return -1; 891 return -1;
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
1068 T result = op2; 1014 T result = op2;
1069 1015
1070 if (lsb) { 1016 if (lsb) {
1071 T op1 = reg<T>(instr->Rn()); 1017 T op1 = reg<T>(instr->Rn());
1072 result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb)); 1018 result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb));
1073 } 1019 }
1074 set_reg<T>(instr->Rd(), result); 1020 set_reg<T>(instr->Rd(), result);
1075 } 1021 }
1076 1022
1077 1023
1024 template<> double Simulator::FPDefaultNaN<double>() const {
1025 return kFP64DefaultNaN;
1026 }
1027
1028
1029 template<> float Simulator::FPDefaultNaN<float>() const {
1030 return kFP32DefaultNaN;
1031 }
1032
1033
1078 void Simulator::FPCompare(double val0, double val1) { 1034 void Simulator::FPCompare(double val0, double val1) {
1079 AssertSupportedFPCR(); 1035 AssertSupportedFPCR();
1080 1036
1081 // TODO(jbramley): This assumes that the C++ implementation handles 1037 // TODO(jbramley): This assumes that the C++ implementation handles
1082 // comparisons in the way that we expect (as per AssertSupportedFPCR()). 1038 // comparisons in the way that we expect (as per AssertSupportedFPCR()).
1083 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) { 1039 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
1084 nzcv().SetRawValue(FPUnorderedFlag); 1040 nzcv().SetRawValue(FPUnorderedFlag);
1085 } else if (val0 < val1) { 1041 } else if (val0 < val1) {
1086 nzcv().SetRawValue(FPLessThanFlag); 1042 nzcv().SetRawValue(FPLessThanFlag);
1087 } else if (val0 > val1) { 1043 } else if (val0 > val1) {
1088 nzcv().SetRawValue(FPGreaterThanFlag); 1044 nzcv().SetRawValue(FPGreaterThanFlag);
1089 } else if (val0 == val1) { 1045 } else if (val0 == val1) {
1090 nzcv().SetRawValue(FPEqualFlag); 1046 nzcv().SetRawValue(FPEqualFlag);
1091 } else { 1047 } else {
1092 UNREACHABLE(); 1048 UNREACHABLE();
1093 } 1049 }
1094 LogSystemRegister(NZCV); 1050 LogSystemRegister(NZCV);
1095 } 1051 }
1096 1052
1097 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
1098 size_t reg_size, size_t lane_size) {
1099 DCHECK_GE(reg_size, lane_size);
1100
1101 uint32_t format = 0;
1102 if (reg_size != lane_size) {
1103 switch (reg_size) {
1104 default:
1105 UNREACHABLE();
1106 break;
1107 case kQRegSize:
1108 format = kPrintRegAsQVector;
1109 break;
1110 case kDRegSize:
1111 format = kPrintRegAsDVector;
1112 break;
1113 }
1114 }
1115
1116 switch (lane_size) {
1117 default:
1118 UNREACHABLE();
1119 case kQRegSize:
1120 format |= kPrintReg1Q;
1121 break;
1122 case kDRegSize:
1123 format |= kPrintReg1D;
1124 break;
1125 case kSRegSize:
1126 format |= kPrintReg1S;
1127 break;
1128 case kHRegSize:
1129 format |= kPrintReg1H;
1130 break;
1131 case kBRegSize:
1132 format |= kPrintReg1B;
1133 break;
1134 }
1135
1136 // These sizes would be duplicate case labels.
1137 static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
1138 static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
1139 static_assert(kPrintXReg == kPrintReg1D,
1140 "X and D register printing code is shared.");
1141 static_assert(kPrintWReg == kPrintReg1S,
1142 "W and S register printing code is shared.");
1143
1144 return static_cast<PrintRegisterFormat>(format);
1145 }
1146
1147 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
1148 VectorFormat vform) {
1149 switch (vform) {
1150 default:
1151 UNREACHABLE();
1152 return kPrintReg16B;
1153 case kFormat16B:
1154 return kPrintReg16B;
1155 case kFormat8B:
1156 return kPrintReg8B;
1157 case kFormat8H:
1158 return kPrintReg8H;
1159 case kFormat4H:
1160 return kPrintReg4H;
1161 case kFormat4S:
1162 return kPrintReg4S;
1163 case kFormat2S:
1164 return kPrintReg2S;
1165 case kFormat2D:
1166 return kPrintReg2D;
1167 case kFormat1D:
1168 return kPrintReg1D;
1169
1170 case kFormatB:
1171 return kPrintReg1B;
1172 case kFormatH:
1173 return kPrintReg1H;
1174 case kFormatS:
1175 return kPrintReg1S;
1176 case kFormatD:
1177 return kPrintReg1D;
1178 }
1179 }
1180
1181 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
1182 VectorFormat vform) {
1183 switch (vform) {
1184 default:
1185 UNREACHABLE();
1186 return kPrintReg16B;
1187 case kFormat4S:
1188 return kPrintReg4SFP;
1189 case kFormat2S:
1190 return kPrintReg2SFP;
1191 case kFormat2D:
1192 return kPrintReg2DFP;
1193 case kFormat1D:
1194 return kPrintReg1DFP;
1195
1196 case kFormatS:
1197 return kPrintReg1SFP;
1198 case kFormatD:
1199 return kPrintReg1DFP;
1200 }
1201 }
1202 1053
1203 void Simulator::SetBreakpoint(Instruction* location) { 1054 void Simulator::SetBreakpoint(Instruction* location) {
1204 for (unsigned i = 0; i < breakpoints_.size(); i++) { 1055 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1205 if (breakpoints_.at(i).location == location) { 1056 if (breakpoints_.at(i).location == location) {
1206 PrintF(stream_, 1057 PrintF(stream_,
1207 "Existing breakpoint at %p was %s\n", 1058 "Existing breakpoint at %p was %s\n",
1208 reinterpret_cast<void*>(location), 1059 reinterpret_cast<void*>(location),
1209 breakpoints_.at(i).enabled ? "disabled" : "enabled"); 1060 breakpoints_.at(i).enabled ? "disabled" : "enabled");
1210 breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled; 1061 breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
1211 return; 1062 return;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1255 } 1106 }
1256 1107
1257 1108
1258 void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) { 1109 void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
1259 Instruction* end = start->InstructionAtOffset(count * kInstructionSize); 1110 Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
1260 for (Instruction* pc = start; pc < end; pc = pc->following()) { 1111 for (Instruction* pc = start; pc < end; pc = pc->following()) {
1261 disassembler_decoder_->Decode(pc); 1112 disassembler_decoder_->Decode(pc);
1262 } 1113 }
1263 } 1114 }
1264 1115
1265 void Simulator::PrintWrittenRegisters() {
1266 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1267 if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
1268 }
1269 }
1270
1271 void Simulator::PrintWrittenVRegisters() {
1272 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1273 // At this point there is no type information, so print as a raw 1Q.
1274 if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
1275 }
1276 }
1277 1116
1278 void Simulator::PrintSystemRegisters() { 1117 void Simulator::PrintSystemRegisters() {
1279 PrintSystemRegister(NZCV); 1118 PrintSystemRegister(NZCV);
1280 PrintSystemRegister(FPCR); 1119 PrintSystemRegister(FPCR);
1281 } 1120 }
1282 1121
1283 1122
1284 void Simulator::PrintRegisters() { 1123 void Simulator::PrintRegisters() {
1285 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 1124 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1286 PrintRegister(i); 1125 PrintRegister(i);
1287 } 1126 }
1288 } 1127 }
1289 1128
1290 void Simulator::PrintVRegisters() { 1129
1291 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 1130 void Simulator::PrintFPRegisters() {
1292 // At this point there is no type information, so print as a raw 1Q. 1131 for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
1293 PrintVRegister(i, kPrintReg1Q); 1132 PrintFPRegister(i);
1294 } 1133 }
1295 } 1134 }
1296 1135
1297 1136
1298 void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { 1137 void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
1299 registers_[code].NotifyRegisterLogged();
1300
1301 // Don't print writes into xzr. 1138 // Don't print writes into xzr.
1302 if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { 1139 if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
1303 return; 1140 return;
1304 } 1141 }
1305 1142
1306 // The template for all x and w registers: 1143 // The template is "# x<code>:value".
1307 // "# x{code}: 0x{value}" 1144 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
1308 // "# w{code}: 0x{value}" 1145 clr_reg_name, XRegNameForCode(code, r31mode),
1309 1146 clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
1310 PrintRegisterRawHelper(code, r31mode);
1311 fprintf(stream_, "\n");
1312 }
1313
1314 // Print a register's name and raw value.
1315 //
1316 // The `bytes` and `lsb` arguments can be used to limit the bytes that are
1317 // printed. These arguments are intended for use in cases where register hasn't
1318 // actually been updated (such as in PrintVWrite).
1319 //
1320 // No newline is printed. This allows the caller to print more details (such as
1321 // a floating-point interpretation or a memory access annotation).
1322 void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
1323 // The template for vector types:
1324 // "# v{code}: 0xffeeddccbbaa99887766554433221100".
1325 // An example with bytes=4 and lsb=8:
1326 // "# v{code}: 0xbbaa9988 ".
1327 fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
1328 clr_vreg_value);
1329
1330 int msb = lsb + bytes - 1;
1331 int byte = kQRegSize - 1;
1332
1333 // Print leading padding spaces. (Two spaces per byte.)
1334 while (byte > msb) {
1335 fprintf(stream_, " ");
1336 byte--;
1337 }
1338
1339 // Print the specified part of the value, byte by byte.
1340 qreg_t rawbits = qreg(code);
1341 fprintf(stream_, "0x");
1342 while (byte >= lsb) {
1343 fprintf(stream_, "%02x", rawbits.val[byte]);
1344 byte--;
1345 }
1346
1347 // Print trailing padding spaces.
1348 while (byte >= 0) {
1349 fprintf(stream_, " ");
1350 byte--;
1351 }
1352 fprintf(stream_, "%s", clr_normal);
1353 }
1354
1355 // Print each of the specified lanes of a register as a float or double value.
1356 //
1357 // The `lane_count` and `lslane` arguments can be used to limit the lanes that
1358 // are printed. These arguments are intended for use in cases where register
1359 // hasn't actually been updated (such as in PrintVWrite).
1360 //
1361 // No newline is printed. This allows the caller to print more details (such as
1362 // a memory access annotation).
1363 void Simulator::PrintVRegisterFPHelper(unsigned code,
1364 unsigned lane_size_in_bytes,
1365 int lane_count, int rightmost_lane) {
1366 DCHECK((lane_size_in_bytes == kSRegSize) ||
1367 (lane_size_in_bytes == kDRegSize));
1368
1369 unsigned msb = (lane_count + rightmost_lane) * lane_size_in_bytes;
1370 DCHECK_LE(msb, static_cast<unsigned>(kQRegSize));
1371
1372 // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
1373 // name is used:
1374 // " (s{code}: {value})"
1375 // " (d{code}: {value})"
1376 // For vector types, "..." is used to represent one or more omitted lanes.
1377 // " (..., {value}, {value}, ...)"
1378 if ((lane_count == 1) && (rightmost_lane == 0)) {
1379 const char* name = (lane_size_in_bytes == kSRegSize)
1380 ? SRegNameForCode(code)
1381 : DRegNameForCode(code);
1382 fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
1383 } else {
1384 if (msb < (kQRegSize - 1)) {
1385 fprintf(stream_, " (..., ");
1386 } else {
1387 fprintf(stream_, " (");
1388 }
1389 }
1390
1391 // Print the list of values.
1392 const char* separator = "";
1393 int leftmost_lane = rightmost_lane + lane_count - 1;
1394 for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
1395 double value = (lane_size_in_bytes == kSRegSize)
1396 ? vreg(code).Get<float>(lane)
1397 : vreg(code).Get<double>(lane);
1398 fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
1399 separator = ", ";
1400 }
1401
1402 if (rightmost_lane > 0) {
1403 fprintf(stream_, ", ...");
1404 }
1405 fprintf(stream_, ")");
1406 }
1407
1408 // Print a register's name and raw value.
1409 //
1410 // Only the least-significant `size_in_bytes` bytes of the register are printed,
1411 // but the value is aligned as if the whole register had been printed.
1412 //
1413 // For typical register updates, size_in_bytes should be set to kXRegSize
1414 // -- the default -- so that the whole register is printed. Other values of
1415 // size_in_bytes are intended for use when the register hasn't actually been
1416 // updated (such as in PrintWrite).
1417 //
1418 // No newline is printed. This allows the caller to print more details (such as
1419 // a memory access annotation).
1420 void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
1421 int size_in_bytes) {
1422 // The template for all supported sizes.
1423 // "# x{code}: 0xffeeddccbbaa9988"
1424 // "# w{code}: 0xbbaa9988"
1425 // "# w{code}<15:0>: 0x9988"
1426 // "# w{code}<7:0>: 0x88"
1427 unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
1428
1429 const char* name = "";
1430 const char* suffix = "";
1431 switch (size_in_bytes) {
1432 case kXRegSize:
1433 name = XRegNameForCode(code, r31mode);
1434 break;
1435 case kWRegSize:
1436 name = WRegNameForCode(code, r31mode);
1437 break;
1438 case 2:
1439 name = WRegNameForCode(code, r31mode);
1440 suffix = "<15:0>";
1441 padding_chars -= strlen(suffix);
1442 break;
1443 case 1:
1444 name = WRegNameForCode(code, r31mode);
1445 suffix = "<7:0>";
1446 padding_chars -= strlen(suffix);
1447 break;
1448 default:
1449 UNREACHABLE();
1450 }
1451 fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
1452
1453 // Print leading padding spaces.
1454 DCHECK_LT(padding_chars, kXRegSize * 2U);
1455 for (unsigned i = 0; i < padding_chars; i++) {
1456 putc(' ', stream_);
1457 }
1458
1459 // Print the specified bits in hexadecimal format.
1460 uint64_t bits = reg<uint64_t>(code, r31mode);
1461 bits &= kXRegMask >> ((kXRegSize - size_in_bytes) * 8);
1462 static_assert(sizeof(bits) == kXRegSize,
1463 "X registers and uint64_t must be the same size.");
1464
1465 int chars = size_in_bytes * 2;
1466 fprintf(stream_, "%s0x%0*" PRIx64 "%s", clr_reg_value, chars, bits,
1467 clr_normal);
1468 }
1469
1470 void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
1471 vregisters_[code].NotifyRegisterLogged();
1472
1473 int lane_size_log2 = format & kPrintRegLaneSizeMask;
1474
1475 int reg_size_log2;
1476 if (format & kPrintRegAsQVector) {
1477 reg_size_log2 = kQRegSizeLog2;
1478 } else if (format & kPrintRegAsDVector) {
1479 reg_size_log2 = kDRegSizeLog2;
1480 } else {
1481 // Scalar types.
1482 reg_size_log2 = lane_size_log2;
1483 }
1484
1485 int lane_count = 1 << (reg_size_log2 - lane_size_log2);
1486 int lane_size = 1 << lane_size_log2;
1487
1488 // The template for vector types:
1489 // "# v{code}: 0x{rawbits} (..., {value}, ...)".
1490 // The template for scalar types:
1491 // "# v{code}: 0x{rawbits} ({reg}:{value})".
1492 // The values in parentheses after the bit representations are floating-point
1493 // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
1494
1495 PrintVRegisterRawHelper(code);
1496 if (format & kPrintRegAsFP) {
1497 PrintVRegisterFPHelper(code, lane_size, lane_count);
1498 }
1499
1500 fprintf(stream_, "\n");
1501 } 1147 }
1502 1148
1503 1149
1150 void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
1151 // The template is "# v<code>:bits (d<code>:value, ...)".
1152
1153 DCHECK(sizes != 0);
1154 DCHECK((sizes & kPrintAllFPRegValues) == sizes);
1155
1156 // Print the raw bits.
1157 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
1158 clr_fpreg_name, VRegNameForCode(code),
1159 clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
1160
1161 // Print all requested value interpretations.
1162 bool need_separator = false;
1163 if (sizes & kPrintDRegValue) {
1164 fprintf(stream_, "%s%s%s: %s%g%s",
1165 need_separator ? ", " : "",
1166 clr_fpreg_name, DRegNameForCode(code),
1167 clr_fpreg_value, fpreg<double>(code), clr_normal);
1168 need_separator = true;
1169 }
1170
1171 if (sizes & kPrintSRegValue) {
1172 fprintf(stream_, "%s%s%s: %s%g%s",
1173 need_separator ? ", " : "",
1174 clr_fpreg_name, SRegNameForCode(code),
1175 clr_fpreg_value, fpreg<float>(code), clr_normal);
1176 need_separator = true;
1177 }
1178
1179 // End the value list.
1180 fprintf(stream_, ")\n");
1181 }
1182
1183
1504 void Simulator::PrintSystemRegister(SystemRegister id) { 1184 void Simulator::PrintSystemRegister(SystemRegister id) {
1505 switch (id) { 1185 switch (id) {
1506 case NZCV: 1186 case NZCV:
1507 fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", 1187 fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
1508 clr_flag_name, clr_flag_value, 1188 clr_flag_name, clr_flag_value,
1509 nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(), 1189 nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
1510 clr_normal); 1190 clr_normal);
1511 break; 1191 break;
1512 case FPCR: { 1192 case FPCR: {
1513 static const char * rmode[] = { 1193 static const char * rmode[] = {
1514 "0b00 (Round to Nearest)", 1194 "0b00 (Round to Nearest)",
1515 "0b01 (Round towards Plus Infinity)", 1195 "0b01 (Round towards Plus Infinity)",
1516 "0b10 (Round towards Minus Infinity)", 1196 "0b10 (Round towards Minus Infinity)",
1517 "0b11 (Round towards Zero)" 1197 "0b11 (Round towards Zero)"
1518 }; 1198 };
1519 DCHECK(fpcr().RMode() < arraysize(rmode)); 1199 DCHECK(fpcr().RMode() < arraysize(rmode));
1520 fprintf(stream_, 1200 fprintf(stream_,
1521 "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", 1201 "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1522 clr_flag_name, clr_flag_value, 1202 clr_flag_name, clr_flag_value,
1523 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()], 1203 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
1524 clr_normal); 1204 clr_normal);
1525 break; 1205 break;
1526 } 1206 }
1527 default: 1207 default:
1528 UNREACHABLE(); 1208 UNREACHABLE();
1529 } 1209 }
1530 } 1210 }
1531 1211
1532 void Simulator::PrintRead(uintptr_t address, unsigned reg_code,
1533 PrintRegisterFormat format) {
1534 registers_[reg_code].NotifyRegisterLogged();
1535 1212
1536 USE(format); 1213 void Simulator::PrintRead(uintptr_t address,
1214 size_t size,
1215 unsigned reg_code) {
1216 USE(size); // Size is unused here.
1537 1217
1538 // The template is "# {reg}: 0x{value} <- {address}". 1218 // The template is "# x<code>:value <- address".
1539 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); 1219 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
1220 clr_reg_name, XRegNameForCode(reg_code),
1221 clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
1222
1540 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", 1223 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
1541 clr_memory_address, address, clr_normal); 1224 clr_memory_address, address, clr_normal);
1542 } 1225 }
1543 1226
1544 void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
1545 PrintRegisterFormat format, unsigned lane) {
1546 vregisters_[reg_code].NotifyRegisterLogged();
1547 1227
1548 // The template is "# v{code}: 0x{rawbits} <- address". 1228 void Simulator::PrintReadFP(uintptr_t address,
1549 PrintVRegisterRawHelper(reg_code); 1229 size_t size,
1550 if (format & kPrintRegAsFP) { 1230 unsigned reg_code) {
1551 PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format), 1231 // The template is "# reg:bits (reg:value) <- address".
1552 GetPrintRegLaneCount(format), lane); 1232 switch (size) {
1233 case kSRegSize:
1234 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
1235 clr_fpreg_name, VRegNameForCode(reg_code),
1236 clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
1237 clr_fpreg_name, SRegNameForCode(reg_code),
1238 clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
1239 break;
1240 case kDRegSize:
1241 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
1242 clr_fpreg_name, VRegNameForCode(reg_code),
1243 clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
1244 clr_fpreg_name, DRegNameForCode(reg_code),
1245 clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
1246 break;
1247 default:
1248 UNREACHABLE();
1553 } 1249 }
1250
1554 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", 1251 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
1555 clr_memory_address, address, clr_normal); 1252 clr_memory_address, address, clr_normal);
1556 } 1253 }
1557 1254
1558 void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
1559 PrintRegisterFormat format) {
1560 DCHECK_EQ(GetPrintRegLaneCount(format), 1U);
1561 1255
1562 // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy 1256 void Simulator::PrintWrite(uintptr_t address,
1563 // and readable, the value is aligned with the values in the register trace. 1257 size_t size,
1564 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister, 1258 unsigned reg_code) {
1565 GetPrintRegSizeInBytes(format)); 1259 // The template is "# reg:value -> address". To keep the trace tidy and
1260 // readable, the value is aligned with the values in the register trace.
1261 switch (size) {
1262 case kByteSizeInBytes:
1263 fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s",
1264 clr_reg_name, WRegNameForCode(reg_code),
1265 clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
1266 break;
1267 case kHalfWordSizeInBytes:
1268 fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s",
1269 clr_reg_name, WRegNameForCode(reg_code),
1270 clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
1271 break;
1272 case kWRegSize:
1273 fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s",
1274 clr_reg_name, WRegNameForCode(reg_code),
1275 clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
1276 break;
1277 case kXRegSize:
1278 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
1279 clr_reg_name, XRegNameForCode(reg_code),
1280 clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
1281 break;
1282 default:
1283 UNREACHABLE();
1284 }
1285
1566 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", 1286 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
1567 clr_memory_address, address, clr_normal); 1287 clr_memory_address, address, clr_normal);
1568 } 1288 }
1569 1289
1570 void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code, 1290
1571 PrintRegisterFormat format, unsigned lane) { 1291 void Simulator::PrintWriteFP(uintptr_t address,
1572 // The templates: 1292 size_t size,
1573 // "# v{code}: 0x{rawbits} -> {address}" 1293 unsigned reg_code) {
1574 // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". 1294 // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
1575 // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" 1295 // and readable, the value is aligned with the values in the register trace.
1576 // Because this trace doesn't represent a change to the source register's 1296 switch (size) {
1577 // value, only the relevant part of the value is printed. To keep the trace 1297 case kSRegSize:
1578 // tidy and readable, the raw value is aligned with the other values in the 1298 fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
1579 // register trace. 1299 clr_fpreg_name, VRegNameForCode(reg_code),
1580 int lane_count = GetPrintRegLaneCount(format); 1300 clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
1581 int lane_size = GetPrintRegLaneSizeInBytes(format); 1301 clr_fpreg_name, SRegNameForCode(reg_code),
1582 int reg_size = GetPrintRegSizeInBytes(format); 1302 clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
1583 PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); 1303 break;
1584 if (format & kPrintRegAsFP) { 1304 case kDRegSize:
1585 PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); 1305 fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
1306 clr_fpreg_name, VRegNameForCode(reg_code),
1307 clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
1308 clr_fpreg_name, DRegNameForCode(reg_code),
1309 clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
1310 break;
1311 default:
1312 UNREACHABLE();
1586 } 1313 }
1314
1587 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", 1315 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
1588 clr_memory_address, address, clr_normal); 1316 clr_memory_address, address, clr_normal);
1589 } 1317 }
1590 1318
1591 1319
1592 // Visitors--------------------------------------------------------------------- 1320 // Visitors---------------------------------------------------------------------
1593 1321
1594 void Simulator::VisitUnimplemented(Instruction* instr) { 1322 void Simulator::VisitUnimplemented(Instruction* instr) {
1595 fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n", 1323 fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
1596 reinterpret_cast<void*>(instr), instr->InstructionBits()); 1324 reinterpret_cast<void*>(instr), instr->InstructionBits());
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
1922 // below ensures that push operations are safe even when interrupted: the 1650 // below ensures that push operations are safe even when interrupted: the
1923 // stack pointer will be decremented before adding an element to the stack. 1651 // stack pointer will be decremented before adding an element to the stack.
1924 if (instr->IsStore()) { 1652 if (instr->IsStore()) {
1925 LoadStoreWriteBack(addr_reg, offset, addrmode); 1653 LoadStoreWriteBack(addr_reg, offset, addrmode);
1926 1654
1927 // For store the address post writeback is used to check access below the 1655 // For store the address post writeback is used to check access below the
1928 // stack. 1656 // stack.
1929 stack = sp(); 1657 stack = sp();
1930 } 1658 }
1931 1659
1932 LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask)); 1660 LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
1933 switch (op) { 1661 switch (op) {
1934 // Use _no_log variants to suppress the register trace (LOG_REGS, 1662 // Use _no_log variants to suppress the register trace (LOG_REGS,
1935 // LOG_VREGS). We will print a more detailed log. 1663 // LOG_FP_REGS). We will print a more detailed log.
1936 case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break; 1664 case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
1937 case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break; 1665 case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
1938 case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break; 1666 case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
1939 case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break; 1667 case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
1940 case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break; 1668 case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
1941 case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break; 1669 case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
1942 case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break; 1670 case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
1943 case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break; 1671 case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
1944 case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break; 1672 case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
1945 case LDR_b:
1946 set_breg_no_log(srcdst, MemoryRead<uint8_t>(address));
1947 break;
1948 case LDR_h:
1949 set_hreg_no_log(srcdst, MemoryRead<uint16_t>(address));
1950 break;
1951 case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break; 1673 case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
1952 case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break; 1674 case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
1953 case LDR_q:
1954 set_qreg_no_log(srcdst, MemoryRead<qreg_t>(address));
1955 break;
1956 1675
1957 case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break; 1676 case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
1958 case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break; 1677 case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
1959 case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break; 1678 case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
1960 case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break; 1679 case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
1961 case STR_b:
1962 MemoryWrite<uint8_t>(address, breg(srcdst));
1963 break;
1964 case STR_h:
1965 MemoryWrite<uint16_t>(address, hreg(srcdst));
1966 break;
1967 case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break; 1680 case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
1968 case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break; 1681 case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
1969 case STR_q:
1970 MemoryWrite<qreg_t>(address, qreg(srcdst));
1971 break;
1972 1682
1973 default: UNIMPLEMENTED(); 1683 default: UNIMPLEMENTED();
1974 } 1684 }
1975 1685
1976 // Print a detailed trace (including the memory address) instead of the basic 1686 // Print a detailed trace (including the memory address) instead of the basic
1977 // register:value trace generated by set_*reg(). 1687 // register:value trace generated by set_*reg().
1978 unsigned access_size = 1 << instr->SizeLS(); 1688 size_t access_size = 1 << instr->SizeLS();
1979 if (instr->IsLoad()) { 1689 if (instr->IsLoad()) {
1980 if ((op == LDR_s) || (op == LDR_d)) { 1690 if ((op == LDR_s) || (op == LDR_d)) {
1981 LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); 1691 LogReadFP(address, access_size, srcdst);
1982 } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
1983 LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1984 } else { 1692 } else {
1985 LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1693 LogRead(address, access_size, srcdst);
1986 } 1694 }
1987 } else { 1695 } else {
1988 if ((op == STR_s) || (op == STR_d)) { 1696 if ((op == STR_s) || (op == STR_d)) {
1989 LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); 1697 LogWriteFP(address, access_size, srcdst);
1990 } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
1991 LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1992 } else { 1698 } else {
1993 LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1699 LogWrite(address, access_size, srcdst);
1994 } 1700 }
1995 } 1701 }
1996 1702
1997 // Handle the writeback for loads after the load to ensure safe pop 1703 // Handle the writeback for loads after the load to ensure safe pop
1998 // operation even when interrupted in the middle of it. The stack pointer 1704 // operation even when interrupted in the middle of it. The stack pointer
1999 // is only updated after the load so pop(fp) will never break the invariant 1705 // is only updated after the load so pop(fp) will never break the invariant
2000 // sp <= fp expected while walking the stack in the sampler. 1706 // sp <= fp expected while walking the stack in the sampler.
2001 if (instr->IsLoad()) { 1707 if (instr->IsLoad()) {
2002 // For loads the address pre writeback is used to check access below the 1708 // For loads the address pre writeback is used to check access below the
2003 // stack. 1709 // stack.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
2067 } 1773 }
2068 1774
2069 LoadStorePairOp op = 1775 LoadStorePairOp op =
2070 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); 1776 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
2071 1777
2072 // 'rt' and 'rt2' can only be aliased for stores. 1778 // 'rt' and 'rt2' can only be aliased for stores.
2073 DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2)); 1779 DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
2074 1780
2075 switch (op) { 1781 switch (op) {
2076 // Use _no_log variants to suppress the register trace (LOG_REGS, 1782 // Use _no_log variants to suppress the register trace (LOG_REGS,
2077 // LOG_VREGS). We will print a more detailed log. 1783 // LOG_FP_REGS). We will print a more detailed log.
2078 case LDP_w: { 1784 case LDP_w: {
2079 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize)); 1785 DCHECK(access_size == kWRegSize);
2080 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); 1786 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2081 set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2)); 1787 set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
2082 break; 1788 break;
2083 } 1789 }
2084 case LDP_s: { 1790 case LDP_s: {
2085 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize)); 1791 DCHECK(access_size == kSRegSize);
2086 set_sreg_no_log(rt, MemoryRead<float>(address)); 1792 set_sreg_no_log(rt, MemoryRead<float>(address));
2087 set_sreg_no_log(rt2, MemoryRead<float>(address2)); 1793 set_sreg_no_log(rt2, MemoryRead<float>(address2));
2088 break; 1794 break;
2089 } 1795 }
2090 case LDP_x: { 1796 case LDP_x: {
2091 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize)); 1797 DCHECK(access_size == kXRegSize);
2092 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); 1798 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2093 set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2)); 1799 set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
2094 break; 1800 break;
2095 } 1801 }
2096 case LDP_d: { 1802 case LDP_d: {
2097 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize)); 1803 DCHECK(access_size == kDRegSize);
2098 set_dreg_no_log(rt, MemoryRead<double>(address)); 1804 set_dreg_no_log(rt, MemoryRead<double>(address));
2099 set_dreg_no_log(rt2, MemoryRead<double>(address2)); 1805 set_dreg_no_log(rt2, MemoryRead<double>(address2));
2100 break; 1806 break;
2101 } 1807 }
2102 case LDP_q: {
2103 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
2104 set_qreg(rt, MemoryRead<qreg_t>(address), NoRegLog);
2105 set_qreg(rt2, MemoryRead<qreg_t>(address2), NoRegLog);
2106 break;
2107 }
2108 case LDPSW_x: { 1808 case LDPSW_x: {
2109 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize)); 1809 DCHECK(access_size == kWRegSize);
2110 set_xreg_no_log(rt, MemoryRead<int32_t>(address)); 1810 set_xreg_no_log(rt, MemoryRead<int32_t>(address));
2111 set_xreg_no_log(rt2, MemoryRead<int32_t>(address2)); 1811 set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
2112 break; 1812 break;
2113 } 1813 }
2114 case STP_w: { 1814 case STP_w: {
2115 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize)); 1815 DCHECK(access_size == kWRegSize);
2116 MemoryWrite<uint32_t>(address, wreg(rt)); 1816 MemoryWrite<uint32_t>(address, wreg(rt));
2117 MemoryWrite<uint32_t>(address2, wreg(rt2)); 1817 MemoryWrite<uint32_t>(address2, wreg(rt2));
2118 break; 1818 break;
2119 } 1819 }
2120 case STP_s: { 1820 case STP_s: {
2121 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize)); 1821 DCHECK(access_size == kSRegSize);
2122 MemoryWrite<float>(address, sreg(rt)); 1822 MemoryWrite<float>(address, sreg(rt));
2123 MemoryWrite<float>(address2, sreg(rt2)); 1823 MemoryWrite<float>(address2, sreg(rt2));
2124 break; 1824 break;
2125 } 1825 }
2126 case STP_x: { 1826 case STP_x: {
2127 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize)); 1827 DCHECK(access_size == kXRegSize);
2128 MemoryWrite<uint64_t>(address, xreg(rt)); 1828 MemoryWrite<uint64_t>(address, xreg(rt));
2129 MemoryWrite<uint64_t>(address2, xreg(rt2)); 1829 MemoryWrite<uint64_t>(address2, xreg(rt2));
2130 break; 1830 break;
2131 } 1831 }
2132 case STP_d: { 1832 case STP_d: {
2133 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize)); 1833 DCHECK(access_size == kDRegSize);
2134 MemoryWrite<double>(address, dreg(rt)); 1834 MemoryWrite<double>(address, dreg(rt));
2135 MemoryWrite<double>(address2, dreg(rt2)); 1835 MemoryWrite<double>(address2, dreg(rt2));
2136 break; 1836 break;
2137 } 1837 }
2138 case STP_q: {
2139 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
2140 MemoryWrite<qreg_t>(address, qreg(rt));
2141 MemoryWrite<qreg_t>(address2, qreg(rt2));
2142 break;
2143 }
2144 default: UNREACHABLE(); 1838 default: UNREACHABLE();
2145 } 1839 }
2146 1840
2147 // Print a detailed trace (including the memory address) instead of the basic 1841 // Print a detailed trace (including the memory address) instead of the basic
2148 // register:value trace generated by set_*reg(). 1842 // register:value trace generated by set_*reg().
2149 if (instr->IsLoad()) { 1843 if (instr->IsLoad()) {
2150 if ((op == LDP_s) || (op == LDP_d)) { 1844 if ((op == LDP_s) || (op == LDP_d)) {
2151 LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(access_size)); 1845 LogReadFP(address, access_size, rt);
2152 LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size)); 1846 LogReadFP(address2, access_size, rt2);
2153 } else if (op == LDP_q) {
2154 LogVRead(address, rt, GetPrintRegisterFormatForSize(access_size));
2155 LogVRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2156 } else { 1847 } else {
2157 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size)); 1848 LogRead(address, access_size, rt);
2158 LogRead(address2, rt2, GetPrintRegisterFormatForSize(access_size)); 1849 LogRead(address2, access_size, rt2);
2159 } 1850 }
2160 } else { 1851 } else {
2161 if ((op == STP_s) || (op == STP_d)) { 1852 if ((op == STP_s) || (op == STP_d)) {
2162 LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(access_size)); 1853 LogWriteFP(address, access_size, rt);
2163 LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size)); 1854 LogWriteFP(address2, access_size, rt2);
2164 } else if (op == STP_q) {
2165 LogVWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
2166 LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2167 } else { 1855 } else {
2168 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size)); 1856 LogWrite(address, access_size, rt);
2169 LogWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size)); 1857 LogWrite(address2, access_size, rt2);
2170 } 1858 }
2171 } 1859 }
2172 1860
2173 // Handle the writeback for loads after the load to ensure safe pop 1861 // Handle the writeback for loads after the load to ensure safe pop
2174 // operation even when interrupted in the middle of it. The stack pointer 1862 // operation even when interrupted in the middle of it. The stack pointer
2175 // is only updated after the load so pop(fp) will never break the invariant 1863 // is only updated after the load so pop(fp) will never break the invariant
2176 // sp <= fp expected while walking the stack in the sampler. 1864 // sp <= fp expected while walking the stack in the sampler.
2177 if (instr->IsLoad()) { 1865 if (instr->IsLoad()) {
2178 // For loads the address pre writeback is used to check access below the 1866 // For loads the address pre writeback is used to check access below the
2179 // stack. 1867 // stack.
(...skipping 10 matching lines...) Expand all
2190 1878
2191 void Simulator::VisitLoadLiteral(Instruction* instr) { 1879 void Simulator::VisitLoadLiteral(Instruction* instr) {
2192 uintptr_t address = instr->LiteralAddress(); 1880 uintptr_t address = instr->LiteralAddress();
2193 unsigned rt = instr->Rt(); 1881 unsigned rt = instr->Rt();
2194 1882
2195 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); 1883 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
2196 local_monitor_.NotifyLoad(address); 1884 local_monitor_.NotifyLoad(address);
2197 1885
2198 switch (instr->Mask(LoadLiteralMask)) { 1886 switch (instr->Mask(LoadLiteralMask)) {
2199 // Use _no_log variants to suppress the register trace (LOG_REGS, 1887 // Use _no_log variants to suppress the register trace (LOG_REGS,
2200 // LOG_VREGS), then print a more detailed log. 1888 // LOG_FP_REGS), then print a more detailed log.
2201 case LDR_w_lit: 1889 case LDR_w_lit:
2202 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); 1890 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2203 LogRead(address, rt, kPrintWReg); 1891 LogRead(address, kWRegSize, rt);
2204 break; 1892 break;
2205 case LDR_x_lit: 1893 case LDR_x_lit:
2206 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); 1894 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2207 LogRead(address, rt, kPrintXReg); 1895 LogRead(address, kXRegSize, rt);
2208 break; 1896 break;
2209 case LDR_s_lit: 1897 case LDR_s_lit:
2210 set_sreg_no_log(rt, MemoryRead<float>(address)); 1898 set_sreg_no_log(rt, MemoryRead<float>(address));
2211 LogVRead(address, rt, kPrintSReg); 1899 LogReadFP(address, kSRegSize, rt);
2212 break; 1900 break;
2213 case LDR_d_lit: 1901 case LDR_d_lit:
2214 set_dreg_no_log(rt, MemoryRead<double>(address)); 1902 set_dreg_no_log(rt, MemoryRead<double>(address));
2215 LogVRead(address, rt, kPrintDReg); 1903 LogReadFP(address, kDRegSize, rt);
2216 break; 1904 break;
2217 default: UNREACHABLE(); 1905 default: UNREACHABLE();
2218 } 1906 }
2219 } 1907 }
2220 1908
2221 1909
2222 uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset, 1910 uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
2223 AddrMode addrmode) { 1911 AddrMode addrmode) {
2224 const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask; 1912 const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
2225 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); 1913 uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
2298 case LDAXR_h: 1986 case LDAXR_h:
2299 set_wreg_no_log(rt, MemoryRead<uint16_t>(address)); 1987 set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
2300 break; 1988 break;
2301 case LDAR_w: 1989 case LDAR_w:
2302 case LDAXR_w: 1990 case LDAXR_w:
2303 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); 1991 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2304 break; 1992 break;
2305 default: 1993 default:
2306 UNIMPLEMENTED(); 1994 UNIMPLEMENTED();
2307 } 1995 }
2308 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size)); 1996 LogRead(address, access_size, rt);
2309 } else { 1997 } else {
2310 if (is_exclusive) { 1998 if (is_exclusive) {
2311 unsigned rs = instr->Rs(); 1999 unsigned rs = instr->Rs();
2312 if (local_monitor_.NotifyStoreExcl(address, 2000 if (local_monitor_.NotifyStoreExcl(address,
2313 get_transaction_size(access_size)) && 2001 get_transaction_size(access_size)) &&
2314 global_monitor_.Pointer()->NotifyStoreExcl_Locked( 2002 global_monitor_.Pointer()->NotifyStoreExcl_Locked(
2315 address, &global_monitor_processor_)) { 2003 address, &global_monitor_processor_)) {
2316 switch (op) { 2004 switch (op) {
2317 case STLXR_b: 2005 case STLXR_b:
2318 MemoryWrite<uint8_t>(address, wreg(rt)); 2006 MemoryWrite<uint8_t>(address, wreg(rt));
2319 break; 2007 break;
2320 case STLXR_h: 2008 case STLXR_h:
2321 MemoryWrite<uint16_t>(address, wreg(rt)); 2009 MemoryWrite<uint16_t>(address, wreg(rt));
2322 break; 2010 break;
2323 case STLXR_w: 2011 case STLXR_w:
2324 MemoryWrite<uint32_t>(address, wreg(rt)); 2012 MemoryWrite<uint32_t>(address, wreg(rt));
2325 break; 2013 break;
2326 default: 2014 default:
2327 UNIMPLEMENTED(); 2015 UNIMPLEMENTED();
2328 } 2016 }
2329 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size)); 2017 LogWrite(address, access_size, rt);
2330 set_wreg(rs, 0); 2018 set_wreg(rs, 0);
2331 } else { 2019 } else {
2332 set_wreg(rs, 1); 2020 set_wreg(rs, 1);
2333 } 2021 }
2334 } else { 2022 } else {
2335 local_monitor_.NotifyStore(address); 2023 local_monitor_.NotifyStore(address);
2336 global_monitor_.Pointer()->NotifyStore_Locked(address, 2024 global_monitor_.Pointer()->NotifyStore_Locked(address,
2337 &global_monitor_processor_); 2025 &global_monitor_processor_);
2338 switch (op) { 2026 switch (op) {
2339 case STLR_b: 2027 case STLR_b:
(...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after
2816 case UCVTF_sw_fixed: { 2504 case UCVTF_sw_fixed: {
2817 set_sreg(dst, 2505 set_sreg(dst,
2818 UFixedToFloat(reg<uint32_t>(src), fbits, round)); 2506 UFixedToFloat(reg<uint32_t>(src), fbits, round));
2819 break; 2507 break;
2820 } 2508 }
2821 default: UNREACHABLE(); 2509 default: UNREACHABLE();
2822 } 2510 }
2823 } 2511 }
2824 2512
2825 2513
2514 int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
2515 value = FPRoundInt(value, rmode);
2516 if (value >= kWMaxInt) {
2517 return kWMaxInt;
2518 } else if (value < kWMinInt) {
2519 return kWMinInt;
2520 }
2521 return std::isnan(value) ? 0 : static_cast<int32_t>(value);
2522 }
2523
2524
2525 int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
2526 value = FPRoundInt(value, rmode);
2527 if (value >= kXMaxInt) {
2528 return kXMaxInt;
2529 } else if (value < kXMinInt) {
2530 return kXMinInt;
2531 }
2532 return std::isnan(value) ? 0 : static_cast<int64_t>(value);
2533 }
2534
2535
2536 uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
2537 value = FPRoundInt(value, rmode);
2538 if (value >= kWMaxUInt) {
2539 return kWMaxUInt;
2540 } else if (value < 0.0) {
2541 return 0;
2542 }
2543 return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
2544 }
2545
2546
2547 uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
2548 value = FPRoundInt(value, rmode);
2549 if (value >= kXMaxUInt) {
2550 return kXMaxUInt;
2551 } else if (value < 0.0) {
2552 return 0;
2553 }
2554 return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
2555 }
2556
2557
2826 void Simulator::VisitFPCompare(Instruction* instr) { 2558 void Simulator::VisitFPCompare(Instruction* instr) {
2827 AssertSupportedFPCR(); 2559 AssertSupportedFPCR();
2828 2560
2561 unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
2562 : kSRegSizeInBits;
2563 double fn_val = fpreg(reg_size, instr->Rn());
2564
2829 switch (instr->Mask(FPCompareMask)) { 2565 switch (instr->Mask(FPCompareMask)) {
2830 case FCMP_s: 2566 case FCMP_s:
2831 FPCompare(sreg(instr->Rn()), sreg(instr->Rm())); 2567 case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
2832 break;
2833 case FCMP_d:
2834 FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
2835 break;
2836 case FCMP_s_zero: 2568 case FCMP_s_zero:
2837 FPCompare(sreg(instr->Rn()), 0.0f); 2569 case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
2838 break;
2839 case FCMP_d_zero:
2840 FPCompare(dreg(instr->Rn()), 0.0);
2841 break;
2842 default: UNIMPLEMENTED(); 2570 default: UNIMPLEMENTED();
2843 } 2571 }
2844 } 2572 }
2845 2573
2846 2574
2847 void Simulator::VisitFPConditionalCompare(Instruction* instr) { 2575 void Simulator::VisitFPConditionalCompare(Instruction* instr) {
2848 AssertSupportedFPCR(); 2576 AssertSupportedFPCR();
2849 2577
2850 switch (instr->Mask(FPConditionalCompareMask)) { 2578 switch (instr->Mask(FPConditionalCompareMask)) {
2851 case FCCMP_s: 2579 case FCCMP_s:
2852 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2853 FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
2854 } else {
2855 nzcv().SetFlags(instr->Nzcv());
2856 LogSystemRegister(NZCV);
2857 }
2858 break;
2859 case FCCMP_d: { 2580 case FCCMP_d: {
2860 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { 2581 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2861 FPCompare(dreg(instr->Rn()), dreg(instr->Rm())); 2582 // If the condition passes, set the status flags to the result of
2583 // comparing the operands.
2584 unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
2585 : kSRegSizeInBits;
2586 FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
2862 } else { 2587 } else {
2863 // If the condition fails, set the status flags to the nzcv immediate. 2588 // If the condition fails, set the status flags to the nzcv immediate.
2864 nzcv().SetFlags(instr->Nzcv()); 2589 nzcv().SetFlags(instr->Nzcv());
2865 LogSystemRegister(NZCV); 2590 LogSystemRegister(NZCV);
2866 } 2591 }
2867 break; 2592 break;
2868 } 2593 }
2869 default: UNIMPLEMENTED(); 2594 default: UNIMPLEMENTED();
2870 } 2595 }
2871 } 2596 }
(...skipping 13 matching lines...) Expand all
2885 case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break; 2610 case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
2886 case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break; 2611 case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
2887 default: UNIMPLEMENTED(); 2612 default: UNIMPLEMENTED();
2888 } 2613 }
2889 } 2614 }
2890 2615
2891 2616
2892 void Simulator::VisitFPDataProcessing1Source(Instruction* instr) { 2617 void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
2893 AssertSupportedFPCR(); 2618 AssertSupportedFPCR();
2894 2619
2895 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
2896 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
2897 SimVRegister& rd = vreg(instr->Rd());
2898 SimVRegister& rn = vreg(instr->Rn());
2899 bool inexact_exception = false;
2900
2901 unsigned fd = instr->Rd(); 2620 unsigned fd = instr->Rd();
2902 unsigned fn = instr->Rn(); 2621 unsigned fn = instr->Rn();
2903 2622
2904 switch (instr->Mask(FPDataProcessing1SourceMask)) { 2623 switch (instr->Mask(FPDataProcessing1SourceMask)) {
2905 case FMOV_s: 2624 case FMOV_s: set_sreg(fd, sreg(fn)); break;
2906 set_sreg(fd, sreg(fn)); 2625 case FMOV_d: set_dreg(fd, dreg(fn)); break;
2907 return; 2626 case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
2908 case FMOV_d: 2627 case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
2909 set_dreg(fd, dreg(fn)); 2628 case FNEG_s: set_sreg(fd, -sreg(fn)); break;
2910 return; 2629 case FNEG_d: set_dreg(fd, -dreg(fn)); break;
2911 case FABS_s: 2630 case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
2912 case FABS_d: 2631 case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
2913 fabs_(vform, vreg(fd), vreg(fn)); 2632 case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
2914 // Explicitly log the register update whilst we have type information. 2633 case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
2915 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2916 return;
2917 case FNEG_s:
2918 case FNEG_d:
2919 fneg(vform, vreg(fd), vreg(fn));
2920 // Explicitly log the register update whilst we have type information.
2921 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2922 return;
2923 case FCVT_ds:
2924 set_dreg(fd, FPToDouble(sreg(fn)));
2925 return;
2926 case FCVT_sd:
2927 set_sreg(fd, FPToFloat(dreg(fn), FPTieEven));
2928 return;
2929 case FCVT_hs:
2930 set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven));
2931 return;
2932 case FCVT_sh:
2933 set_sreg(fd, FPToFloat(hreg(fn)));
2934 return;
2935 case FCVT_dh:
2936 set_dreg(fd, FPToDouble(FPToFloat(hreg(fn))));
2937 return;
2938 case FCVT_hd:
2939 set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven));
2940 return;
2941 case FSQRT_s:
2942 case FSQRT_d:
2943 fsqrt(vform, rd, rn);
2944 // Explicitly log the register update whilst we have type information.
2945 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2946 return;
2947 case FRINTI_s:
2948 case FRINTI_d:
2949 break; // Use FPCR rounding mode.
2950 case FRINTX_s:
2951 case FRINTX_d:
2952 inexact_exception = true;
2953 break;
2954 case FRINTA_s:
2955 case FRINTA_d:
2956 fpcr_rounding = FPTieAway;
2957 break;
2958 case FRINTM_s: 2634 case FRINTM_s:
2635 set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
2959 case FRINTM_d: 2636 case FRINTM_d:
2960 fpcr_rounding = FPNegativeInfinity; 2637 set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
2961 break;
2962 case FRINTN_s:
2963 case FRINTN_d:
2964 fpcr_rounding = FPTieEven;
2965 break;
2966 case FRINTP_s: 2638 case FRINTP_s:
2639 set_sreg(fd, FPRoundInt(sreg(fn), FPPositiveInfinity));
2640 break;
2967 case FRINTP_d: 2641 case FRINTP_d:
2968 fpcr_rounding = FPPositiveInfinity; 2642 set_dreg(fd, FPRoundInt(dreg(fn), FPPositiveInfinity));
2969 break; 2643 break;
2970 case FRINTZ_s: 2644 case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
2971 case FRINTZ_d: 2645 case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
2972 fpcr_rounding = FPZero; 2646 case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
2973 break; 2647 case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
2974 default: 2648 case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
2975 UNIMPLEMENTED(); 2649 case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
2976 } 2650 default: UNIMPLEMENTED();
2977 2651 }
2978 // Only FRINT* instructions fall through the switch above. 2652 }
2979 frint(vform, rd, rn, fpcr_rounding, inexact_exception); 2653
2980 // Explicitly log the register update whilst we have type information 2654
2981 LogVRegister(fd, GetPrintRegisterFormatFP(vform)); 2655 // Assemble the specified IEEE-754 components into the target type and apply
2982 } 2656 // appropriate rounding.
2657 // sign: 0 = positive, 1 = negative
2658 // exponent: Unbiased IEEE-754 exponent.
2659 // mantissa: The mantissa of the input. The top bit (which is not encoded for
2660 // normal IEEE-754 values) must not be omitted. This bit has the
2661 // value 'pow(2, exponent)'.
2662 //
2663 // The input value is assumed to be a normalized value. That is, the input may
2664 // not be infinity or NaN. If the source value is subnormal, it must be
2665 // normalized before calling this function such that the highest set bit in the
2666 // mantissa has the value 'pow(2, exponent)'.
2667 //
2668 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
2669 // calling a templated FPRound.
2670 template <class T, int ebits, int mbits>
2671 static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
2672 FPRounding round_mode) {
2673 DCHECK((sign == 0) || (sign == 1));
2674
2675 // Only the FPTieEven rounding mode is implemented.
2676 DCHECK(round_mode == FPTieEven);
2677 USE(round_mode);
2678
2679 // Rounding can promote subnormals to normals, and normals to infinities. For
2680 // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
2681 // encodable as a float, but rounding based on the low-order mantissa bits
2682 // could make it overflow. With ties-to-even rounding, this value would become
2683 // an infinity.
2684
2685 // ---- Rounding Method ----
2686 //
2687 // The exponent is irrelevant in the rounding operation, so we treat the
2688 // lowest-order bit that will fit into the result ('onebit') as having
2689 // the value '1'. Similarly, the highest-order bit that won't fit into
2690 // the result ('halfbit') has the value '0.5'. The 'point' sits between
2691 // 'onebit' and 'halfbit':
2692 //
2693 // These bits fit into the result.
2694 // |---------------------|
2695 // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
2696 // ||
2697 // / |
2698 // / halfbit
2699 // onebit
2700 //
2701 // For subnormal outputs, the range of representable bits is smaller and
2702 // the position of onebit and halfbit depends on the exponent of the
2703 // input, but the method is otherwise similar.
2704 //
2705 // onebit(frac)
2706 // |
2707 // | halfbit(frac) halfbit(adjusted)
2708 // | / /
2709 // | | |
2710 // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
2711 // 0b00.0... -> 0b00.0... -> 0b00
2712 // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
2713 // 0b00.1... -> 0b00.1... -> 0b01
2714 // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
2715 // 0b01.0... -> 0b01.0... -> 0b01
2716 // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
2717 // 0b01.1... -> 0b01.1... -> 0b10
2718 // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
2719 // 0b10.0... -> 0b10.0... -> 0b10
2720 // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
2721 // 0b10.1... -> 0b10.1... -> 0b11
2722 // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
2723 // ... / | / |
2724 // / | / |
2725 // / |
2726 // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
2727 //
2728 // mantissa = (mantissa >> shift) + halfbit(adjusted);
2729
2730 static const int mantissa_offset = 0;
2731 static const int exponent_offset = mantissa_offset + mbits;
2732 static const int sign_offset = exponent_offset + ebits;
2733 STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
2734
2735 // Bail out early for zero inputs.
2736 if (mantissa == 0) {
2737 return static_cast<T>(sign << sign_offset);
2738 }
2739
2740 // If all bits in the exponent are set, the value is infinite or NaN.
2741 // This is true for all binary IEEE-754 formats.
2742 static const int infinite_exponent = (1 << ebits) - 1;
2743 static const int max_normal_exponent = infinite_exponent - 1;
2744
2745 // Apply the exponent bias to encode it for the result. Doing this early makes
2746 // it easy to detect values that will be infinite or subnormal.
2747 exponent += max_normal_exponent >> 1;
2748
2749 if (exponent > max_normal_exponent) {
2750 // Overflow: The input is too large for the result type to represent. The
2751 // FPTieEven rounding mode handles overflows using infinities.
2752 exponent = infinite_exponent;
2753 mantissa = 0;
2754 return static_cast<T>((sign << sign_offset) |
2755 (exponent << exponent_offset) |
2756 (mantissa << mantissa_offset));
2757 }
2758
2759 // Calculate the shift required to move the top mantissa bit to the proper
2760 // place in the destination type.
2761 const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
2762 int shift = highest_significant_bit - mbits;
2763
2764 if (exponent <= 0) {
2765 // The output will be subnormal (before rounding).
2766
2767 // For subnormal outputs, the shift must be adjusted by the exponent. The +1
2768 // is necessary because the exponent of a subnormal value (encoded as 0) is
2769 // the same as the exponent of the smallest normal value (encoded as 1).
2770 shift += -exponent + 1;
2771
2772 // Handle inputs that would produce a zero output.
2773 //
2774 // Shifts higher than highest_significant_bit+1 will always produce a zero
2775 // result. A shift of exactly highest_significant_bit+1 might produce a
2776 // non-zero result after rounding.
2777 if (shift > (highest_significant_bit + 1)) {
2778 // The result will always be +/-0.0.
2779 return static_cast<T>(sign << sign_offset);
2780 }
2781
2782 // Properly encode the exponent for a subnormal output.
2783 exponent = 0;
2784 } else {
2785 // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
2786 // normal values.
2787 mantissa &= ~(1UL << highest_significant_bit);
2788 }
2789
2790 if (shift > 0) {
2791 // We have to shift the mantissa to the right. Some precision is lost, so we
2792 // need to apply rounding.
2793 uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
2794 uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
2795 uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
2796 T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
2797
2798 T result =
2799 static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
2800 ((mantissa >> shift) << mantissa_offset));
2801
2802 // A very large mantissa can overflow during rounding. If this happens, the
2803 // exponent should be incremented and the mantissa set to 1.0 (encoded as
2804 // 0). Applying halfbit_adjusted after assembling the float has the nice
2805 // side-effect that this case is handled for free.
2806 //
2807 // This also handles cases where a very large finite value overflows to
2808 // infinity, or where a very large subnormal value overflows to become
2809 // normal.
2810 return result + halfbit_adjusted;
2811 } else {
2812 // We have to shift the mantissa to the left (or not at all). The input
2813 // mantissa is exactly representable in the output mantissa, so apply no
2814 // rounding correction.
2815 return static_cast<T>((sign << sign_offset) |
2816 (exponent << exponent_offset) |
2817 ((mantissa << -shift) << mantissa_offset));
2818 }
2819 }
2820
2821
2822 // See FPRound for a description of this function.
2823 static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
2824 uint64_t mantissa, FPRounding round_mode) {
2825 int64_t bits =
2826 FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
2827 exponent,
2828 mantissa,
2829 round_mode);
2830 return rawbits_to_double(bits);
2831 }
2832
2833
2834 // See FPRound for a description of this function.
2835 static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
2836 uint64_t mantissa, FPRounding round_mode) {
2837 int32_t bits =
2838 FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
2839 exponent,
2840 mantissa,
2841 round_mode);
2842 return rawbits_to_float(bits);
2843 }
2844
2845
2846 double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
2847 if (src >= 0) {
2848 return UFixedToDouble(src, fbits, round);
2849 } else {
2850 // This works for all negative values, including INT64_MIN.
2851 return -UFixedToDouble(-src, fbits, round);
2852 }
2853 }
2854
2855
2856 double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
2857 // An input of 0 is a special case because the result is effectively
2858 // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
2859 if (src == 0) {
2860 return 0.0;
2861 }
2862
2863 // Calculate the exponent. The highest significant bit will have the value
2864 // 2^exponent.
2865 const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
2866 const int64_t exponent = highest_significant_bit - fbits;
2867
2868 return FPRoundToDouble(0, exponent, src, round);
2869 }
2870
2871
2872 float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
2873 if (src >= 0) {
2874 return UFixedToFloat(src, fbits, round);
2875 } else {
2876 // This works for all negative values, including INT64_MIN.
2877 return -UFixedToFloat(-src, fbits, round);
2878 }
2879 }
2880
2881
2882 float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
2883 // An input of 0 is a special case because the result is effectively
2884 // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
2885 if (src == 0) {
2886 return 0.0f;
2887 }
2888
2889 // Calculate the exponent. The highest significant bit will have the value
2890 // 2^exponent.
2891 const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
2892 const int32_t exponent = highest_significant_bit - fbits;
2893
2894 return FPRoundToFloat(0, exponent, src, round);
2895 }
2896
2897
2898 double Simulator::FPRoundInt(double value, FPRounding round_mode) {
2899 if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
2900 (value == kFP64NegativeInfinity)) {
2901 return value;
2902 } else if (std::isnan(value)) {
2903 return FPProcessNaN(value);
2904 }
2905
2906 double int_result = floor(value);
2907 double error = value - int_result;
2908 switch (round_mode) {
2909 case FPTieAway: {
2910 // Take care of correctly handling the range ]-0.5, -0.0], which must
2911 // yield -0.0.
2912 if ((-0.5 < value) && (value < 0.0)) {
2913 int_result = -0.0;
2914
2915 } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
2916 // If the error is greater than 0.5, or is equal to 0.5 and the integer
2917 // result is positive, round up.
2918 int_result++;
2919 }
2920 break;
2921 }
2922 case FPTieEven: {
2923 // Take care of correctly handling the range [-0.5, -0.0], which must
2924 // yield -0.0.
2925 if ((-0.5 <= value) && (value < 0.0)) {
2926 int_result = -0.0;
2927
2928 // If the error is greater than 0.5, or is equal to 0.5 and the integer
2929 // result is odd, round up.
2930 } else if ((error > 0.5) ||
2931 ((error == 0.5) && (modulo(int_result, 2) != 0))) {
2932 int_result++;
2933 }
2934 break;
2935 }
2936 case FPZero: {
2937 // If value > 0 then we take floor(value)
2938 // otherwise, ceil(value)
2939 if (value < 0) {
2940 int_result = ceil(value);
2941 }
2942 break;
2943 }
2944 case FPNegativeInfinity: {
2945 // We always use floor(value).
2946 break;
2947 }
2948 case FPPositiveInfinity: {
2949 int_result = ceil(value);
2950 break;
2951 }
2952 default: UNIMPLEMENTED();
2953 }
2954 return int_result;
2955 }
2956
2957
2958 double Simulator::FPToDouble(float value) {
2959 switch (std::fpclassify(value)) {
2960 case FP_NAN: {
2961 if (fpcr().DN()) return kFP64DefaultNaN;
2962
2963 // Convert NaNs as the processor would:
2964 // - The sign is propagated.
2965 // - The payload (mantissa) is transferred entirely, except that the top
2966 // bit is forced to '1', making the result a quiet NaN. The unused
2967 // (low-order) payload bits are set to 0.
2968 uint32_t raw = float_to_rawbits(value);
2969
2970 uint64_t sign = raw >> 31;
2971 uint64_t exponent = (1 << 11) - 1;
2972 uint64_t payload = unsigned_bitextract_64(21, 0, raw);
2973 payload <<= (52 - 23); // The unused low-order bits should be 0.
2974 payload |= (1L << 51); // Force a quiet NaN.
2975
2976 return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
2977 }
2978
2979 case FP_ZERO:
2980 case FP_NORMAL:
2981 case FP_SUBNORMAL:
2982 case FP_INFINITE: {
2983 // All other inputs are preserved in a standard cast, because every value
2984 // representable using an IEEE-754 float is also representable using an
2985 // IEEE-754 double.
2986 return static_cast<double>(value);
2987 }
2988 }
2989
2990 UNREACHABLE();
2991 return static_cast<double>(value);
2992 }
2993
2994
2995 float Simulator::FPToFloat(double value, FPRounding round_mode) {
2996 // Only the FPTieEven rounding mode is implemented.
2997 DCHECK(round_mode == FPTieEven);
2998 USE(round_mode);
2999
3000 switch (std::fpclassify(value)) {
3001 case FP_NAN: {
3002 if (fpcr().DN()) return kFP32DefaultNaN;
3003
3004 // Convert NaNs as the processor would:
3005 // - The sign is propagated.
3006 // - The payload (mantissa) is transferred as much as possible, except
3007 // that the top bit is forced to '1', making the result a quiet NaN.
3008 uint64_t raw = double_to_rawbits(value);
3009
3010 uint32_t sign = raw >> 63;
3011 uint32_t exponent = (1 << 8) - 1;
3012 uint32_t payload =
3013 static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
3014 payload |= (1 << 22); // Force a quiet NaN.
3015
3016 return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
3017 }
3018
3019 case FP_ZERO:
3020 case FP_INFINITE: {
3021 // In a C++ cast, any value representable in the target type will be
3022 // unchanged. This is always the case for +/-0.0 and infinities.
3023 return static_cast<float>(value);
3024 }
3025
3026 case FP_NORMAL:
3027 case FP_SUBNORMAL: {
3028 // Convert double-to-float as the processor would, assuming that FPCR.FZ
3029 // (flush-to-zero) is not set.
3030 uint64_t raw = double_to_rawbits(value);
3031 // Extract the IEEE-754 double components.
3032 uint32_t sign = raw >> 63;
3033 // Extract the exponent and remove the IEEE-754 encoding bias.
3034 int32_t exponent =
3035 static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
3036 // Extract the mantissa and add the implicit '1' bit.
3037 uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
3038 if (std::fpclassify(value) == FP_NORMAL) {
3039 mantissa |= (1UL << 52);
3040 }
3041 return FPRoundToFloat(sign, exponent, mantissa, round_mode);
3042 }
3043 }
3044
3045 UNREACHABLE();
3046 return value;
3047 }
3048
2983 3049
2984 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) { 3050 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
2985 AssertSupportedFPCR(); 3051 AssertSupportedFPCR();
2986 3052
2987 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; 3053 unsigned fd = instr->Rd();
2988 SimVRegister& rd = vreg(instr->Rd()); 3054 unsigned fn = instr->Rn();
2989 SimVRegister& rn = vreg(instr->Rn()); 3055 unsigned fm = instr->Rm();
2990 SimVRegister& rm = vreg(instr->Rm()); 3056
2991 3057 // Fmaxnm and Fminnm have special NaN handling.
2992 switch (instr->Mask(FPDataProcessing2SourceMask)) { 3058 switch (instr->Mask(FPDataProcessing2SourceMask)) {
2993 case FADD_s: 3059 case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
2994 case FADD_d: 3060 case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
2995 fadd(vform, rd, rn, rm); 3061 case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
2996 break; 3062 case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
2997 case FSUB_s: 3063 default:
2998 case FSUB_d: 3064 break; // Fall through.
2999 fsub(vform, rd, rn, rm); 3065 }
3000 break; 3066
3001 case FMUL_s: 3067 if (FPProcessNaNs(instr)) return;
3002 case FMUL_d: 3068
3003 fmul(vform, rd, rn, rm); 3069 switch (instr->Mask(FPDataProcessing2SourceMask)) {
3004 break; 3070 case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
3005 case FNMUL_s: 3071 case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
3006 case FNMUL_d: 3072 case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
3007 fnmul(vform, rd, rn, rm); 3073 case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
3008 break; 3074 case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
3009 case FDIV_s: 3075 case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
3010 case FDIV_d: 3076 case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
3011 fdiv(vform, rd, rn, rm); 3077 case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
3012 break; 3078 case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
3013 case FMAX_s: 3079 case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
3014 case FMAX_d: 3080 case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
3015 fmax(vform, rd, rn, rm); 3081 case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
3016 break;
3017 case FMIN_s:
3018 case FMIN_d:
3019 fmin(vform, rd, rn, rm);
3020 break;
3021 case FMAXNM_s: 3082 case FMAXNM_s:
3022 case FMAXNM_d: 3083 case FMAXNM_d:
3023 fmaxnm(vform, rd, rn, rm);
3024 break;
3025 case FMINNM_s: 3084 case FMINNM_s:
3026 case FMINNM_d: 3085 case FMINNM_d:
3027 fminnm(vform, rd, rn, rm); 3086 // These were handled before the standard FPProcessNaNs() stage.
3028 break;
3029 default:
3030 UNREACHABLE(); 3087 UNREACHABLE();
3031 } 3088 default: UNIMPLEMENTED();
3032 // Explicitly log the register update whilst we have type information. 3089 }
3033 LogVRegister(instr->Rd(), GetPrintRegisterFormatFP(vform)); 3090 }
3034 } 3091
3035 3092
3036 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { 3093 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
3037 AssertSupportedFPCR(); 3094 AssertSupportedFPCR();
3038 3095
3039 unsigned fd = instr->Rd(); 3096 unsigned fd = instr->Rd();
3040 unsigned fn = instr->Rn(); 3097 unsigned fn = instr->Rn();
3041 unsigned fm = instr->Rm(); 3098 unsigned fm = instr->Rm();
3042 unsigned fa = instr->Ra(); 3099 unsigned fa = instr->Ra();
3043 3100
3044 switch (instr->Mask(FPDataProcessing3SourceMask)) { 3101 switch (instr->Mask(FPDataProcessing3SourceMask)) {
3045 // fd = fa +/- (fn * fm) 3102 // fd = fa +/- (fn * fm)
3046 case FMADD_s: 3103 case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
3047 set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); 3104 case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
3048 break; 3105 case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
3049 case FMSUB_s: 3106 case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
3050 set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm)));
3051 break;
3052 case FMADD_d:
3053 set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm)));
3054 break;
3055 case FMSUB_d:
3056 set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm)));
3057 break;
3058 // Negated variants of the above. 3107 // Negated variants of the above.
3059 case FNMADD_s: 3108 case FNMADD_s:
3060 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm))); 3109 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
3061 break; 3110 break;
3062 case FNMSUB_s: 3111 case FNMSUB_s:
3063 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm))); 3112 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
3064 break; 3113 break;
3065 case FNMADD_d: 3114 case FNMADD_d:
3066 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm))); 3115 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
3067 break; 3116 break;
3068 case FNMSUB_d: 3117 case FNMSUB_d:
3069 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); 3118 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
3070 break; 3119 break;
3071 default: 3120 default: UNIMPLEMENTED();
3072 UNIMPLEMENTED(); 3121 }
3073 } 3122 }
3074 } 3123
3124
3125 template <typename T>
3126 T Simulator::FPAdd(T op1, T op2) {
3127 // NaNs should be handled elsewhere.
3128 DCHECK(!std::isnan(op1) && !std::isnan(op2));
3129
3130 if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
3131 // inf + -inf returns the default NaN.
3132 return FPDefaultNaN<T>();
3133 } else {
3134 // Other cases should be handled by standard arithmetic.
3135 return op1 + op2;
3136 }
3137 }
3138
3139
3140 template <typename T>
3141 T Simulator::FPDiv(T op1, T op2) {
3142 // NaNs should be handled elsewhere.
3143 DCHECK(!std::isnan(op1) && !std::isnan(op2));
3144
3145 if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
3146 // inf / inf and 0.0 / 0.0 return the default NaN.
3147 return FPDefaultNaN<T>();
3148 } else {
3149 // Other cases should be handled by standard arithmetic.
3150 return op1 / op2;
3151 }
3152 }
3153
3154
3155 template <typename T>
3156 T Simulator::FPMax(T a, T b) {
3157 // NaNs should be handled elsewhere.
3158 DCHECK(!std::isnan(a) && !std::isnan(b));
3159
3160 if ((a == 0.0) && (b == 0.0) &&
3161 (copysign(1.0, a) != copysign(1.0, b))) {
3162 // a and b are zero, and the sign differs: return +0.0.
3163 return 0.0;
3164 } else {
3165 return (a > b) ? a : b;
3166 }
3167 }
3168
3169
3170 template <typename T>
3171 T Simulator::FPMaxNM(T a, T b) {
3172 if (IsQuietNaN(a) && !IsQuietNaN(b)) {
3173 a = kFP64NegativeInfinity;
3174 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
3175 b = kFP64NegativeInfinity;
3176 }
3177
3178 T result = FPProcessNaNs(a, b);
3179 return std::isnan(result) ? result : FPMax(a, b);
3180 }
3181
3182 template <typename T>
3183 T Simulator::FPMin(T a, T b) {
3184 // NaNs should be handled elsewhere.
3185 DCHECK(!std::isnan(a) && !std::isnan(b));
3186
3187 if ((a == 0.0) && (b == 0.0) &&
3188 (copysign(1.0, a) != copysign(1.0, b))) {
3189 // a and b are zero, and the sign differs: return -0.0.
3190 return -0.0;
3191 } else {
3192 return (a < b) ? a : b;
3193 }
3194 }
3195
3196
3197 template <typename T>
3198 T Simulator::FPMinNM(T a, T b) {
3199 if (IsQuietNaN(a) && !IsQuietNaN(b)) {
3200 a = kFP64PositiveInfinity;
3201 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
3202 b = kFP64PositiveInfinity;
3203 }
3204
3205 T result = FPProcessNaNs(a, b);
3206 return std::isnan(result) ? result : FPMin(a, b);
3207 }
3208
3209
3210 template <typename T>
3211 T Simulator::FPMul(T op1, T op2) {
3212 // NaNs should be handled elsewhere.
3213 DCHECK(!std::isnan(op1) && !std::isnan(op2));
3214
3215 if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
3216 // inf * 0.0 returns the default NaN.
3217 return FPDefaultNaN<T>();
3218 } else {
3219 // Other cases should be handled by standard arithmetic.
3220 return op1 * op2;
3221 }
3222 }
3223
3224
3225 template<typename T>
3226 T Simulator::FPMulAdd(T a, T op1, T op2) {
3227 T result = FPProcessNaNs3(a, op1, op2);
3228
3229 T sign_a = copysign(1.0, a);
3230 T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
3231 bool isinf_prod = std::isinf(op1) || std::isinf(op2);
3232 bool operation_generates_nan =
3233 (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
3234 (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
3235 (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
3236
3237 if (std::isnan(result)) {
3238 // Generated NaNs override quiet NaNs propagated from a.
3239 if (operation_generates_nan && IsQuietNaN(a)) {
3240 return FPDefaultNaN<T>();
3241 } else {
3242 return result;
3243 }
3244 }
3245
3246 // If the operation would produce a NaN, return the default NaN.
3247 if (operation_generates_nan) {
3248 return FPDefaultNaN<T>();
3249 }
3250
3251 // Work around broken fma implementations for exact zero results: The sign of
3252 // exact 0.0 results is positive unless both a and op1 * op2 are negative.
3253 if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
3254 return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
3255 }
3256
3257 result = FusedMultiplyAdd(op1, op2, a);
3258 DCHECK(!std::isnan(result));
3259
3260 // Work around broken fma implementations for rounded zero results: If a is
3261 // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
3262 if ((a == 0.0) && (result == 0.0)) {
3263 return copysign(0.0, sign_prod);
3264 }
3265
3266 return result;
3267 }
3268
3269
3270 template <typename T>
3271 T Simulator::FPSqrt(T op) {
3272 if (std::isnan(op)) {
3273 return FPProcessNaN(op);
3274 } else if (op < 0.0) {
3275 return FPDefaultNaN<T>();
3276 } else {
3277 lazily_initialize_fast_sqrt(isolate_);
3278 return fast_sqrt(op, isolate_);
3279 }
3280 }
3281
3282
3283 template <typename T>
3284 T Simulator::FPSub(T op1, T op2) {
3285 // NaNs should be handled elsewhere.
3286 DCHECK(!std::isnan(op1) && !std::isnan(op2));
3287
3288 if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
3289 // inf - inf returns the default NaN.
3290 return FPDefaultNaN<T>();
3291 } else {
3292 // Other cases should be handled by standard arithmetic.
3293 return op1 - op2;
3294 }
3295 }
3296
3297
3298 template <typename T>
3299 T Simulator::FPProcessNaN(T op) {
3300 DCHECK(std::isnan(op));
3301 return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
3302 }
3303
3304
3305 template <typename T>
3306 T Simulator::FPProcessNaNs(T op1, T op2) {
3307 if (IsSignallingNaN(op1)) {
3308 return FPProcessNaN(op1);
3309 } else if (IsSignallingNaN(op2)) {
3310 return FPProcessNaN(op2);
3311 } else if (std::isnan(op1)) {
3312 DCHECK(IsQuietNaN(op1));
3313 return FPProcessNaN(op1);
3314 } else if (std::isnan(op2)) {
3315 DCHECK(IsQuietNaN(op2));
3316 return FPProcessNaN(op2);
3317 } else {
3318 return 0.0;
3319 }
3320 }
3321
3322
3323 template <typename T>
3324 T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
3325 if (IsSignallingNaN(op1)) {
3326 return FPProcessNaN(op1);
3327 } else if (IsSignallingNaN(op2)) {
3328 return FPProcessNaN(op2);
3329 } else if (IsSignallingNaN(op3)) {
3330 return FPProcessNaN(op3);
3331 } else if (std::isnan(op1)) {
3332 DCHECK(IsQuietNaN(op1));
3333 return FPProcessNaN(op1);
3334 } else if (std::isnan(op2)) {
3335 DCHECK(IsQuietNaN(op2));
3336 return FPProcessNaN(op2);
3337 } else if (std::isnan(op3)) {
3338 DCHECK(IsQuietNaN(op3));
3339 return FPProcessNaN(op3);
3340 } else {
3341 return 0.0;
3342 }
3343 }
3344
3075 3345
3076 bool Simulator::FPProcessNaNs(Instruction* instr) { 3346 bool Simulator::FPProcessNaNs(Instruction* instr) {
3077 unsigned fd = instr->Rd(); 3347 unsigned fd = instr->Rd();
3078 unsigned fn = instr->Rn(); 3348 unsigned fn = instr->Rn();
3079 unsigned fm = instr->Rm(); 3349 unsigned fm = instr->Rm();
3080 bool done = false; 3350 bool done = false;
3081 3351
3082 if (instr->Mask(FP64) == FP64) { 3352 if (instr->Mask(FP64) == FP64) {
3083 double result = FPProcessNaNs(dreg(fn), dreg(fm)); 3353 double result = FPProcessNaNs(dreg(fn), dreg(fm));
3084 if (std::isnan(result)) { 3354 if (std::isnan(result)) {
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
3175 clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal); 3445 clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
3176 return true; 3446 return true;
3177 } else if (strcmp(desc, "wcsp") == 0) { 3447 } else if (strcmp(desc, "wcsp") == 0) {
3178 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); 3448 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3179 PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n", 3449 PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
3180 clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal); 3450 clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
3181 return true; 3451 return true;
3182 } 3452 }
3183 3453
3184 int i = CodeFromName(desc); 3454 int i = CodeFromName(desc);
3185 static_assert(kNumberOfRegisters == kNumberOfVRegisters, 3455 STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
3186 "Must be same number of Registers as VRegisters."); 3456 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
3187 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfVRegisters) return false;
3188 3457
3189 if (desc[0] == 'v') { 3458 if (desc[0] == 'v') {
3190 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n", 3459 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
3191 clr_vreg_name, VRegNameForCode(i), clr_vreg_value, 3460 clr_fpreg_name, VRegNameForCode(i),
3192 bit_cast<uint64_t>(dreg(i)), clr_normal, clr_vreg_name, 3461 clr_fpreg_value, double_to_rawbits(dreg(i)),
3193 DRegNameForCode(i), clr_vreg_value, dreg(i), clr_vreg_name, 3462 clr_normal,
3194 SRegNameForCode(i), clr_vreg_value, sreg(i), clr_normal); 3463 clr_fpreg_name, DRegNameForCode(i),
3464 clr_fpreg_value, dreg(i),
3465 clr_fpreg_name, SRegNameForCode(i),
3466 clr_fpreg_value, sreg(i),
3467 clr_normal);
3195 return true; 3468 return true;
3196 } else if (desc[0] == 'd') { 3469 } else if (desc[0] == 'd') {
3197 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, DRegNameForCode(i), 3470 PrintF(stream_, "%s %s:%s %g%s\n",
3198 clr_vreg_value, dreg(i), clr_normal); 3471 clr_fpreg_name, DRegNameForCode(i),
3472 clr_fpreg_value, dreg(i),
3473 clr_normal);
3199 return true; 3474 return true;
3200 } else if (desc[0] == 's') { 3475 } else if (desc[0] == 's') {
3201 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, SRegNameForCode(i), 3476 PrintF(stream_, "%s %s:%s %g%s\n",
3202 clr_vreg_value, sreg(i), clr_normal); 3477 clr_fpreg_name, SRegNameForCode(i),
3478 clr_fpreg_value, sreg(i),
3479 clr_normal);
3203 return true; 3480 return true;
3204 } else if (desc[0] == 'w') { 3481 } else if (desc[0] == 'w') {
3205 PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n", 3482 PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n",
3206 clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal); 3483 clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
3207 return true; 3484 return true;
3208 } else { 3485 } else {
3209 // X register names have a wide variety of starting characters, but anything 3486 // X register names have a wide variety of starting characters, but anything
3210 // else will be an X register. 3487 // else will be an X register.
3211 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n", 3488 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n",
3212 clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal); 3489 clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
3318 // Disassemble. 3595 // Disassemble.
3319 PrintInstructionsAt(reinterpret_cast<Instruction*>(address), 3596 PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
3320 n_of_instrs_to_disasm); 3597 n_of_instrs_to_disasm);
3321 PrintF("\n"); 3598 PrintF("\n");
3322 3599
3323 // print / p ------------------------------------------------------------- 3600 // print / p -------------------------------------------------------------
3324 } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) { 3601 } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
3325 if (argc == 2) { 3602 if (argc == 2) {
3326 if (strcmp(arg1, "all") == 0) { 3603 if (strcmp(arg1, "all") == 0) {
3327 PrintRegisters(); 3604 PrintRegisters();
3328 PrintVRegisters(); 3605 PrintFPRegisters();
3329 } else { 3606 } else {
3330 if (!PrintValue(arg1)) { 3607 if (!PrintValue(arg1)) {
3331 PrintF("%s unrecognized\n", arg1); 3608 PrintF("%s unrecognized\n", arg1);
3332 } 3609 }
3333 } 3610 }
3334 } else { 3611 } else {
3335 PrintF( 3612 PrintF(
3336 "print <register>\n" 3613 "print <register>\n"
3337 " Print the content of a register. (alias 'p')\n" 3614 " Print the content of a register. (alias 'p')\n"
3338 " 'print all' will print all registers.\n" 3615 " 'print all' will print all registers.\n"
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
3544 clr_normal); 3821 clr_normal);
3545 } 3822 }
3546 } 3823 }
3547 3824
3548 // Other options. 3825 // Other options.
3549 switch (parameters & kDebuggerTracingDirectivesMask) { 3826 switch (parameters & kDebuggerTracingDirectivesMask) {
3550 case TRACE_ENABLE: 3827 case TRACE_ENABLE:
3551 set_log_parameters(log_parameters() | parameters); 3828 set_log_parameters(log_parameters() | parameters);
3552 if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); } 3829 if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
3553 if (parameters & LOG_REGS) { PrintRegisters(); } 3830 if (parameters & LOG_REGS) { PrintRegisters(); }
3554 if (parameters & LOG_VREGS) { 3831 if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
3555 PrintVRegisters();
3556 }
3557 break; 3832 break;
3558 case TRACE_DISABLE: 3833 case TRACE_DISABLE:
3559 set_log_parameters(log_parameters() & ~parameters); 3834 set_log_parameters(log_parameters() & ~parameters);
3560 break; 3835 break;
3561 case TRACE_OVERRIDE: 3836 case TRACE_OVERRIDE:
3562 set_log_parameters(parameters); 3837 set_log_parameters(parameters);
3563 break; 3838 break;
3564 default: 3839 default:
3565 // We don't support a one-shot LOG_DISASM. 3840 // We don't support a one-shot LOG_DISASM.
3566 DCHECK((parameters & LOG_DISASM) == 0); 3841 DCHECK((parameters & LOG_DISASM) == 0);
3567 // Don't print information that is already being traced. 3842 // Don't print information that is already being traced.
3568 parameters &= ~log_parameters(); 3843 parameters &= ~log_parameters();
3569 // Print the requested information. 3844 // Print the requested information.
3570 if (parameters & LOG_SYS_REGS) PrintSystemRegisters(); 3845 if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
3571 if (parameters & LOG_REGS) PrintRegisters(); 3846 if (parameters & LOG_REGS) PrintRegisters();
3572 if (parameters & LOG_VREGS) PrintVRegisters(); 3847 if (parameters & LOG_FP_REGS) PrintFPRegisters();
3573 } 3848 }
3574 3849
3575 // The stop parameters are inlined in the code. Skip them: 3850 // The stop parameters are inlined in the code. Skip them:
3576 // - Skip to the end of the message string. 3851 // - Skip to the end of the message string.
3577 size_t size = kDebugMessageOffset + strlen(message) + 1; 3852 size_t size = kDebugMessageOffset + strlen(message) + 1;
3578 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize)); 3853 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
3579 // - Verify that the unreachable marker is present. 3854 // - Verify that the unreachable marker is present.
3580 DCHECK(pc_->Mask(ExceptionMask) == HLT); 3855 DCHECK(pc_->Mask(ExceptionMask) == HLT);
3581 DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable); 3856 DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable);
3582 // - Skip past the unreachable marker. 3857 // - Skip past the unreachable marker.
(...skipping 16 matching lines...) Expand all
3599 base::OS::DebugBreak(); 3874 base::OS::DebugBreak();
3600 } 3875 }
3601 break; 3876 break;
3602 } 3877 }
3603 3878
3604 default: 3879 default:
3605 UNIMPLEMENTED(); 3880 UNIMPLEMENTED();
3606 } 3881 }
3607 } 3882 }
3608 3883
3609 void Simulator::VisitNEON2RegMisc(Instruction* instr) {
3610 NEONFormatDecoder nfd(instr);
3611 VectorFormat vf = nfd.GetVectorFormat();
3612
3613 // Format mapping for "long pair" instructions, [su]addlp, [su]adalp.
3614 static const NEONFormatMap map_lp = {
3615 {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
3616 VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
3617
3618 static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
3619 VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
3620
3621 static const NEONFormatMap map_fcvtn = {{22, 30},
3622 {NF_4H, NF_8H, NF_2S, NF_4S}};
3623 VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
3624
3625 SimVRegister& rd = vreg(instr->Rd());
3626 SimVRegister& rn = vreg(instr->Rn());
3627
3628 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
3629 // These instructions all use a two bit size field, except NOT and RBIT,
3630 // which use the field to encode the operation.
3631 switch (instr->Mask(NEON2RegMiscMask)) {
3632 case NEON_REV64:
3633 rev64(vf, rd, rn);
3634 break;
3635 case NEON_REV32:
3636 rev32(vf, rd, rn);
3637 break;
3638 case NEON_REV16:
3639 rev16(vf, rd, rn);
3640 break;
3641 case NEON_SUQADD:
3642 suqadd(vf, rd, rn);
3643 break;
3644 case NEON_USQADD:
3645 usqadd(vf, rd, rn);
3646 break;
3647 case NEON_CLS:
3648 cls(vf, rd, rn);
3649 break;
3650 case NEON_CLZ:
3651 clz(vf, rd, rn);
3652 break;
3653 case NEON_CNT:
3654 cnt(vf, rd, rn);
3655 break;
3656 case NEON_SQABS:
3657 abs(vf, rd, rn).SignedSaturate(vf);
3658 break;
3659 case NEON_SQNEG:
3660 neg(vf, rd, rn).SignedSaturate(vf);
3661 break;
3662 case NEON_CMGT_zero:
3663 cmp(vf, rd, rn, 0, gt);
3664 break;
3665 case NEON_CMGE_zero:
3666 cmp(vf, rd, rn, 0, ge);
3667 break;
3668 case NEON_CMEQ_zero:
3669 cmp(vf, rd, rn, 0, eq);
3670 break;
3671 case NEON_CMLE_zero:
3672 cmp(vf, rd, rn, 0, le);
3673 break;
3674 case NEON_CMLT_zero:
3675 cmp(vf, rd, rn, 0, lt);
3676 break;
3677 case NEON_ABS:
3678 abs(vf, rd, rn);
3679 break;
3680 case NEON_NEG:
3681 neg(vf, rd, rn);
3682 break;
3683 case NEON_SADDLP:
3684 saddlp(vf_lp, rd, rn);
3685 break;
3686 case NEON_UADDLP:
3687 uaddlp(vf_lp, rd, rn);
3688 break;
3689 case NEON_SADALP:
3690 sadalp(vf_lp, rd, rn);
3691 break;
3692 case NEON_UADALP:
3693 uadalp(vf_lp, rd, rn);
3694 break;
3695 case NEON_RBIT_NOT:
3696 vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
3697 switch (instr->FPType()) {
3698 case 0:
3699 not_(vf, rd, rn);
3700 break;
3701 case 1:
3702 rbit(vf, rd, rn);
3703 break;
3704 default:
3705 UNIMPLEMENTED();
3706 }
3707 break;
3708 }
3709 } else {
3710 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
3711 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
3712 bool inexact_exception = false;
3713
3714 // These instructions all use a one bit size field, except XTN, SQXTUN,
3715 // SHLL, SQXTN and UQXTN, which use a two bit size field.
3716 switch (instr->Mask(NEON2RegMiscFPMask)) {
3717 case NEON_FABS:
3718 fabs_(fpf, rd, rn);
3719 return;
3720 case NEON_FNEG:
3721 fneg(fpf, rd, rn);
3722 return;
3723 case NEON_FSQRT:
3724 fsqrt(fpf, rd, rn);
3725 return;
3726 case NEON_FCVTL:
3727 if (instr->Mask(NEON_Q)) {
3728 fcvtl2(vf_fcvtl, rd, rn);
3729 } else {
3730 fcvtl(vf_fcvtl, rd, rn);
3731 }
3732 return;
3733 case NEON_FCVTN:
3734 if (instr->Mask(NEON_Q)) {
3735 fcvtn2(vf_fcvtn, rd, rn);
3736 } else {
3737 fcvtn(vf_fcvtn, rd, rn);
3738 }
3739 return;
3740 case NEON_FCVTXN:
3741 if (instr->Mask(NEON_Q)) {
3742 fcvtxn2(vf_fcvtn, rd, rn);
3743 } else {
3744 fcvtxn(vf_fcvtn, rd, rn);
3745 }
3746 return;
3747
3748 // The following instructions break from the switch statement, rather
3749 // than return.
3750 case NEON_FRINTI:
3751 break; // Use FPCR rounding mode.
3752 case NEON_FRINTX:
3753 inexact_exception = true;
3754 break;
3755 case NEON_FRINTA:
3756 fpcr_rounding = FPTieAway;
3757 break;
3758 case NEON_FRINTM:
3759 fpcr_rounding = FPNegativeInfinity;
3760 break;
3761 case NEON_FRINTN:
3762 fpcr_rounding = FPTieEven;
3763 break;
3764 case NEON_FRINTP:
3765 fpcr_rounding = FPPositiveInfinity;
3766 break;
3767 case NEON_FRINTZ:
3768 fpcr_rounding = FPZero;
3769 break;
3770
3771 // The remaining cases return to the caller.
3772 case NEON_FCVTNS:
3773 fcvts(fpf, rd, rn, FPTieEven);
3774 return;
3775 case NEON_FCVTNU:
3776 fcvtu(fpf, rd, rn, FPTieEven);
3777 return;
3778 case NEON_FCVTPS:
3779 fcvts(fpf, rd, rn, FPPositiveInfinity);
3780 return;
3781 case NEON_FCVTPU:
3782 fcvtu(fpf, rd, rn, FPPositiveInfinity);
3783 return;
3784 case NEON_FCVTMS:
3785 fcvts(fpf, rd, rn, FPNegativeInfinity);
3786 return;
3787 case NEON_FCVTMU:
3788 fcvtu(fpf, rd, rn, FPNegativeInfinity);
3789 return;
3790 case NEON_FCVTZS:
3791 fcvts(fpf, rd, rn, FPZero);
3792 return;
3793 case NEON_FCVTZU:
3794 fcvtu(fpf, rd, rn, FPZero);
3795 return;
3796 case NEON_FCVTAS:
3797 fcvts(fpf, rd, rn, FPTieAway);
3798 return;
3799 case NEON_FCVTAU:
3800 fcvtu(fpf, rd, rn, FPTieAway);
3801 return;
3802 case NEON_SCVTF:
3803 scvtf(fpf, rd, rn, 0, fpcr_rounding);
3804 return;
3805 case NEON_UCVTF:
3806 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
3807 return;
3808 case NEON_URSQRTE:
3809 ursqrte(fpf, rd, rn);
3810 return;
3811 case NEON_URECPE:
3812 urecpe(fpf, rd, rn);
3813 return;
3814 case NEON_FRSQRTE:
3815 frsqrte(fpf, rd, rn);
3816 return;
3817 case NEON_FRECPE:
3818 frecpe(fpf, rd, rn, fpcr_rounding);
3819 return;
3820 case NEON_FCMGT_zero:
3821 fcmp_zero(fpf, rd, rn, gt);
3822 return;
3823 case NEON_FCMGE_zero:
3824 fcmp_zero(fpf, rd, rn, ge);
3825 return;
3826 case NEON_FCMEQ_zero:
3827 fcmp_zero(fpf, rd, rn, eq);
3828 return;
3829 case NEON_FCMLE_zero:
3830 fcmp_zero(fpf, rd, rn, le);
3831 return;
3832 case NEON_FCMLT_zero:
3833 fcmp_zero(fpf, rd, rn, lt);
3834 return;
3835 default:
3836 if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
3837 (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
3838 switch (instr->Mask(NEON2RegMiscMask)) {
3839 case NEON_XTN:
3840 xtn(vf, rd, rn);
3841 return;
3842 case NEON_SQXTN:
3843 sqxtn(vf, rd, rn);
3844 return;
3845 case NEON_UQXTN:
3846 uqxtn(vf, rd, rn);
3847 return;
3848 case NEON_SQXTUN:
3849 sqxtun(vf, rd, rn);
3850 return;
3851 case NEON_SHLL:
3852 vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
3853 if (instr->Mask(NEON_Q)) {
3854 shll2(vf, rd, rn);
3855 } else {
3856 shll(vf, rd, rn);
3857 }
3858 return;
3859 default:
3860 UNIMPLEMENTED();
3861 }
3862 } else {
3863 UNIMPLEMENTED();
3864 }
3865 }
3866
3867 // Only FRINT* instructions fall through the switch above.
3868 frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
3869 }
3870 }
3871
3872 void Simulator::VisitNEON3Same(Instruction* instr) {
3873 NEONFormatDecoder nfd(instr);
3874 SimVRegister& rd = vreg(instr->Rd());
3875 SimVRegister& rn = vreg(instr->Rn());
3876 SimVRegister& rm = vreg(instr->Rm());
3877
3878 if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
3879 VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
3880 switch (instr->Mask(NEON3SameLogicalMask)) {
3881 case NEON_AND:
3882 and_(vf, rd, rn, rm);
3883 break;
3884 case NEON_ORR:
3885 orr(vf, rd, rn, rm);
3886 break;
3887 case NEON_ORN:
3888 orn(vf, rd, rn, rm);
3889 break;
3890 case NEON_EOR:
3891 eor(vf, rd, rn, rm);
3892 break;
3893 case NEON_BIC:
3894 bic(vf, rd, rn, rm);
3895 break;
3896 case NEON_BIF:
3897 bif(vf, rd, rn, rm);
3898 break;
3899 case NEON_BIT:
3900 bit(vf, rd, rn, rm);
3901 break;
3902 case NEON_BSL:
3903 bsl(vf, rd, rn, rm);
3904 break;
3905 default:
3906 UNIMPLEMENTED();
3907 }
3908 } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
3909 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
3910 switch (instr->Mask(NEON3SameFPMask)) {
3911 case NEON_FADD:
3912 fadd(vf, rd, rn, rm);
3913 break;
3914 case NEON_FSUB:
3915 fsub(vf, rd, rn, rm);
3916 break;
3917 case NEON_FMUL:
3918 fmul(vf, rd, rn, rm);
3919 break;
3920 case NEON_FDIV:
3921 fdiv(vf, rd, rn, rm);
3922 break;
3923 case NEON_FMAX:
3924 fmax(vf, rd, rn, rm);
3925 break;
3926 case NEON_FMIN:
3927 fmin(vf, rd, rn, rm);
3928 break;
3929 case NEON_FMAXNM:
3930 fmaxnm(vf, rd, rn, rm);
3931 break;
3932 case NEON_FMINNM:
3933 fminnm(vf, rd, rn, rm);
3934 break;
3935 case NEON_FMLA:
3936 fmla(vf, rd, rn, rm);
3937 break;
3938 case NEON_FMLS:
3939 fmls(vf, rd, rn, rm);
3940 break;
3941 case NEON_FMULX:
3942 fmulx(vf, rd, rn, rm);
3943 break;
3944 case NEON_FACGE:
3945 fabscmp(vf, rd, rn, rm, ge);
3946 break;
3947 case NEON_FACGT:
3948 fabscmp(vf, rd, rn, rm, gt);
3949 break;
3950 case NEON_FCMEQ:
3951 fcmp(vf, rd, rn, rm, eq);
3952 break;
3953 case NEON_FCMGE:
3954 fcmp(vf, rd, rn, rm, ge);
3955 break;
3956 case NEON_FCMGT:
3957 fcmp(vf, rd, rn, rm, gt);
3958 break;
3959 case NEON_FRECPS:
3960 frecps(vf, rd, rn, rm);
3961 break;
3962 case NEON_FRSQRTS:
3963 frsqrts(vf, rd, rn, rm);
3964 break;
3965 case NEON_FABD:
3966 fabd(vf, rd, rn, rm);
3967 break;
3968 case NEON_FADDP:
3969 faddp(vf, rd, rn, rm);
3970 break;
3971 case NEON_FMAXP:
3972 fmaxp(vf, rd, rn, rm);
3973 break;
3974 case NEON_FMAXNMP:
3975 fmaxnmp(vf, rd, rn, rm);
3976 break;
3977 case NEON_FMINP:
3978 fminp(vf, rd, rn, rm);
3979 break;
3980 case NEON_FMINNMP:
3981 fminnmp(vf, rd, rn, rm);
3982 break;
3983 default:
3984 UNIMPLEMENTED();
3985 }
3986 } else {
3987 VectorFormat vf = nfd.GetVectorFormat();
3988 switch (instr->Mask(NEON3SameMask)) {
3989 case NEON_ADD:
3990 add(vf, rd, rn, rm);
3991 break;
3992 case NEON_ADDP:
3993 addp(vf, rd, rn, rm);
3994 break;
3995 case NEON_CMEQ:
3996 cmp(vf, rd, rn, rm, eq);
3997 break;
3998 case NEON_CMGE:
3999 cmp(vf, rd, rn, rm, ge);
4000 break;
4001 case NEON_CMGT:
4002 cmp(vf, rd, rn, rm, gt);
4003 break;
4004 case NEON_CMHI:
4005 cmp(vf, rd, rn, rm, hi);
4006 break;
4007 case NEON_CMHS:
4008 cmp(vf, rd, rn, rm, hs);
4009 break;
4010 case NEON_CMTST:
4011 cmptst(vf, rd, rn, rm);
4012 break;
4013 case NEON_MLS:
4014 mls(vf, rd, rn, rm);
4015 break;
4016 case NEON_MLA:
4017 mla(vf, rd, rn, rm);
4018 break;
4019 case NEON_MUL:
4020 mul(vf, rd, rn, rm);
4021 break;
4022 case NEON_PMUL:
4023 pmul(vf, rd, rn, rm);
4024 break;
4025 case NEON_SMAX:
4026 smax(vf, rd, rn, rm);
4027 break;
4028 case NEON_SMAXP:
4029 smaxp(vf, rd, rn, rm);
4030 break;
4031 case NEON_SMIN:
4032 smin(vf, rd, rn, rm);
4033 break;
4034 case NEON_SMINP:
4035 sminp(vf, rd, rn, rm);
4036 break;
4037 case NEON_SUB:
4038 sub(vf, rd, rn, rm);
4039 break;
4040 case NEON_UMAX:
4041 umax(vf, rd, rn, rm);
4042 break;
4043 case NEON_UMAXP:
4044 umaxp(vf, rd, rn, rm);
4045 break;
4046 case NEON_UMIN:
4047 umin(vf, rd, rn, rm);
4048 break;
4049 case NEON_UMINP:
4050 uminp(vf, rd, rn, rm);
4051 break;
4052 case NEON_SSHL:
4053 sshl(vf, rd, rn, rm);
4054 break;
4055 case NEON_USHL:
4056 ushl(vf, rd, rn, rm);
4057 break;
4058 case NEON_SABD:
4059 AbsDiff(vf, rd, rn, rm, true);
4060 break;
4061 case NEON_UABD:
4062 AbsDiff(vf, rd, rn, rm, false);
4063 break;
4064 case NEON_SABA:
4065 saba(vf, rd, rn, rm);
4066 break;
4067 case NEON_UABA:
4068 uaba(vf, rd, rn, rm);
4069 break;
4070 case NEON_UQADD:
4071 add(vf, rd, rn, rm).UnsignedSaturate(vf);
4072 break;
4073 case NEON_SQADD:
4074 add(vf, rd, rn, rm).SignedSaturate(vf);
4075 break;
4076 case NEON_UQSUB:
4077 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
4078 break;
4079 case NEON_SQSUB:
4080 sub(vf, rd, rn, rm).SignedSaturate(vf);
4081 break;
4082 case NEON_SQDMULH:
4083 sqdmulh(vf, rd, rn, rm);
4084 break;
4085 case NEON_SQRDMULH:
4086 sqrdmulh(vf, rd, rn, rm);
4087 break;
4088 case NEON_UQSHL:
4089 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
4090 break;
4091 case NEON_SQSHL:
4092 sshl(vf, rd, rn, rm).SignedSaturate(vf);
4093 break;
4094 case NEON_URSHL:
4095 ushl(vf, rd, rn, rm).Round(vf);
4096 break;
4097 case NEON_SRSHL:
4098 sshl(vf, rd, rn, rm).Round(vf);
4099 break;
4100 case NEON_UQRSHL:
4101 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
4102 break;
4103 case NEON_SQRSHL:
4104 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
4105 break;
4106 case NEON_UHADD:
4107 add(vf, rd, rn, rm).Uhalve(vf);
4108 break;
4109 case NEON_URHADD:
4110 add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
4111 break;
4112 case NEON_SHADD:
4113 add(vf, rd, rn, rm).Halve(vf);
4114 break;
4115 case NEON_SRHADD:
4116 add(vf, rd, rn, rm).Halve(vf).Round(vf);
4117 break;
4118 case NEON_UHSUB:
4119 sub(vf, rd, rn, rm).Uhalve(vf);
4120 break;
4121 case NEON_SHSUB:
4122 sub(vf, rd, rn, rm).Halve(vf);
4123 break;
4124 default:
4125 UNIMPLEMENTED();
4126 }
4127 }
4128 }
4129
4130 void Simulator::VisitNEON3Different(Instruction* instr) {
4131 NEONFormatDecoder nfd(instr);
4132 VectorFormat vf = nfd.GetVectorFormat();
4133 VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4134
4135 SimVRegister& rd = vreg(instr->Rd());
4136 SimVRegister& rn = vreg(instr->Rn());
4137 SimVRegister& rm = vreg(instr->Rm());
4138
4139 switch (instr->Mask(NEON3DifferentMask)) {
4140 case NEON_PMULL:
4141 pmull(vf_l, rd, rn, rm);
4142 break;
4143 case NEON_PMULL2:
4144 pmull2(vf_l, rd, rn, rm);
4145 break;
4146 case NEON_UADDL:
4147 uaddl(vf_l, rd, rn, rm);
4148 break;
4149 case NEON_UADDL2:
4150 uaddl2(vf_l, rd, rn, rm);
4151 break;
4152 case NEON_SADDL:
4153 saddl(vf_l, rd, rn, rm);
4154 break;
4155 case NEON_SADDL2:
4156 saddl2(vf_l, rd, rn, rm);
4157 break;
4158 case NEON_USUBL:
4159 usubl(vf_l, rd, rn, rm);
4160 break;
4161 case NEON_USUBL2:
4162 usubl2(vf_l, rd, rn, rm);
4163 break;
4164 case NEON_SSUBL:
4165 ssubl(vf_l, rd, rn, rm);
4166 break;
4167 case NEON_SSUBL2:
4168 ssubl2(vf_l, rd, rn, rm);
4169 break;
4170 case NEON_SABAL:
4171 sabal(vf_l, rd, rn, rm);
4172 break;
4173 case NEON_SABAL2:
4174 sabal2(vf_l, rd, rn, rm);
4175 break;
4176 case NEON_UABAL:
4177 uabal(vf_l, rd, rn, rm);
4178 break;
4179 case NEON_UABAL2:
4180 uabal2(vf_l, rd, rn, rm);
4181 break;
4182 case NEON_SABDL:
4183 sabdl(vf_l, rd, rn, rm);
4184 break;
4185 case NEON_SABDL2:
4186 sabdl2(vf_l, rd, rn, rm);
4187 break;
4188 case NEON_UABDL:
4189 uabdl(vf_l, rd, rn, rm);
4190 break;
4191 case NEON_UABDL2:
4192 uabdl2(vf_l, rd, rn, rm);
4193 break;
4194 case NEON_SMLAL:
4195 smlal(vf_l, rd, rn, rm);
4196 break;
4197 case NEON_SMLAL2:
4198 smlal2(vf_l, rd, rn, rm);
4199 break;
4200 case NEON_UMLAL:
4201 umlal(vf_l, rd, rn, rm);
4202 break;
4203 case NEON_UMLAL2:
4204 umlal2(vf_l, rd, rn, rm);
4205 break;
4206 case NEON_SMLSL:
4207 smlsl(vf_l, rd, rn, rm);
4208 break;
4209 case NEON_SMLSL2:
4210 smlsl2(vf_l, rd, rn, rm);
4211 break;
4212 case NEON_UMLSL:
4213 umlsl(vf_l, rd, rn, rm);
4214 break;
4215 case NEON_UMLSL2:
4216 umlsl2(vf_l, rd, rn, rm);
4217 break;
4218 case NEON_SMULL:
4219 smull(vf_l, rd, rn, rm);
4220 break;
4221 case NEON_SMULL2:
4222 smull2(vf_l, rd, rn, rm);
4223 break;
4224 case NEON_UMULL:
4225 umull(vf_l, rd, rn, rm);
4226 break;
4227 case NEON_UMULL2:
4228 umull2(vf_l, rd, rn, rm);
4229 break;
4230 case NEON_SQDMLAL:
4231 sqdmlal(vf_l, rd, rn, rm);
4232 break;
4233 case NEON_SQDMLAL2:
4234 sqdmlal2(vf_l, rd, rn, rm);
4235 break;
4236 case NEON_SQDMLSL:
4237 sqdmlsl(vf_l, rd, rn, rm);
4238 break;
4239 case NEON_SQDMLSL2:
4240 sqdmlsl2(vf_l, rd, rn, rm);
4241 break;
4242 case NEON_SQDMULL:
4243 sqdmull(vf_l, rd, rn, rm);
4244 break;
4245 case NEON_SQDMULL2:
4246 sqdmull2(vf_l, rd, rn, rm);
4247 break;
4248 case NEON_UADDW:
4249 uaddw(vf_l, rd, rn, rm);
4250 break;
4251 case NEON_UADDW2:
4252 uaddw2(vf_l, rd, rn, rm);
4253 break;
4254 case NEON_SADDW:
4255 saddw(vf_l, rd, rn, rm);
4256 break;
4257 case NEON_SADDW2:
4258 saddw2(vf_l, rd, rn, rm);
4259 break;
4260 case NEON_USUBW:
4261 usubw(vf_l, rd, rn, rm);
4262 break;
4263 case NEON_USUBW2:
4264 usubw2(vf_l, rd, rn, rm);
4265 break;
4266 case NEON_SSUBW:
4267 ssubw(vf_l, rd, rn, rm);
4268 break;
4269 case NEON_SSUBW2:
4270 ssubw2(vf_l, rd, rn, rm);
4271 break;
4272 case NEON_ADDHN:
4273 addhn(vf, rd, rn, rm);
4274 break;
4275 case NEON_ADDHN2:
4276 addhn2(vf, rd, rn, rm);
4277 break;
4278 case NEON_RADDHN:
4279 raddhn(vf, rd, rn, rm);
4280 break;
4281 case NEON_RADDHN2:
4282 raddhn2(vf, rd, rn, rm);
4283 break;
4284 case NEON_SUBHN:
4285 subhn(vf, rd, rn, rm);
4286 break;
4287 case NEON_SUBHN2:
4288 subhn2(vf, rd, rn, rm);
4289 break;
4290 case NEON_RSUBHN:
4291 rsubhn(vf, rd, rn, rm);
4292 break;
4293 case NEON_RSUBHN2:
4294 rsubhn2(vf, rd, rn, rm);
4295 break;
4296 default:
4297 UNIMPLEMENTED();
4298 }
4299 }
4300
4301 void Simulator::VisitNEONAcrossLanes(Instruction* instr) {
4302 NEONFormatDecoder nfd(instr);
4303
4304 SimVRegister& rd = vreg(instr->Rd());
4305 SimVRegister& rn = vreg(instr->Rn());
4306
4307 // The input operand's VectorFormat is passed for these instructions.
4308 if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
4309 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
4310
4311 switch (instr->Mask(NEONAcrossLanesFPMask)) {
4312 case NEON_FMAXV:
4313 fmaxv(vf, rd, rn);
4314 break;
4315 case NEON_FMINV:
4316 fminv(vf, rd, rn);
4317 break;
4318 case NEON_FMAXNMV:
4319 fmaxnmv(vf, rd, rn);
4320 break;
4321 case NEON_FMINNMV:
4322 fminnmv(vf, rd, rn);
4323 break;
4324 default:
4325 UNIMPLEMENTED();
4326 }
4327 } else {
4328 VectorFormat vf = nfd.GetVectorFormat();
4329
4330 switch (instr->Mask(NEONAcrossLanesMask)) {
4331 case NEON_ADDV:
4332 addv(vf, rd, rn);
4333 break;
4334 case NEON_SMAXV:
4335 smaxv(vf, rd, rn);
4336 break;
4337 case NEON_SMINV:
4338 sminv(vf, rd, rn);
4339 break;
4340 case NEON_UMAXV:
4341 umaxv(vf, rd, rn);
4342 break;
4343 case NEON_UMINV:
4344 uminv(vf, rd, rn);
4345 break;
4346 case NEON_SADDLV:
4347 saddlv(vf, rd, rn);
4348 break;
4349 case NEON_UADDLV:
4350 uaddlv(vf, rd, rn);
4351 break;
4352 default:
4353 UNIMPLEMENTED();
4354 }
4355 }
4356 }
4357
4358 void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
4359 NEONFormatDecoder nfd(instr);
4360 VectorFormat vf_r = nfd.GetVectorFormat();
4361 VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4362
4363 SimVRegister& rd = vreg(instr->Rd());
4364 SimVRegister& rn = vreg(instr->Rn());
4365
4366 ByElementOp Op = NULL;
4367
4368 int rm_reg = instr->Rm();
4369 int index = (instr->NEONH() << 1) | instr->NEONL();
4370 if (instr->NEONSize() == 1) {
4371 rm_reg &= 0xf;
4372 index = (index << 1) | instr->NEONM();
4373 }
4374
4375 switch (instr->Mask(NEONByIndexedElementMask)) {
4376 case NEON_MUL_byelement:
4377 Op = &Simulator::mul;
4378 vf = vf_r;
4379 break;
4380 case NEON_MLA_byelement:
4381 Op = &Simulator::mla;
4382 vf = vf_r;
4383 break;
4384 case NEON_MLS_byelement:
4385 Op = &Simulator::mls;
4386 vf = vf_r;
4387 break;
4388 case NEON_SQDMULH_byelement:
4389 Op = &Simulator::sqdmulh;
4390 vf = vf_r;
4391 break;
4392 case NEON_SQRDMULH_byelement:
4393 Op = &Simulator::sqrdmulh;
4394 vf = vf_r;
4395 break;
4396 case NEON_SMULL_byelement:
4397 if (instr->Mask(NEON_Q)) {
4398 Op = &Simulator::smull2;
4399 } else {
4400 Op = &Simulator::smull;
4401 }
4402 break;
4403 case NEON_UMULL_byelement:
4404 if (instr->Mask(NEON_Q)) {
4405 Op = &Simulator::umull2;
4406 } else {
4407 Op = &Simulator::umull;
4408 }
4409 break;
4410 case NEON_SMLAL_byelement:
4411 if (instr->Mask(NEON_Q)) {
4412 Op = &Simulator::smlal2;
4413 } else {
4414 Op = &Simulator::smlal;
4415 }
4416 break;
4417 case NEON_UMLAL_byelement:
4418 if (instr->Mask(NEON_Q)) {
4419 Op = &Simulator::umlal2;
4420 } else {
4421 Op = &Simulator::umlal;
4422 }
4423 break;
4424 case NEON_SMLSL_byelement:
4425 if (instr->Mask(NEON_Q)) {
4426 Op = &Simulator::smlsl2;
4427 } else {
4428 Op = &Simulator::smlsl;
4429 }
4430 break;
4431 case NEON_UMLSL_byelement:
4432 if (instr->Mask(NEON_Q)) {
4433 Op = &Simulator::umlsl2;
4434 } else {
4435 Op = &Simulator::umlsl;
4436 }
4437 break;
4438 case NEON_SQDMULL_byelement:
4439 if (instr->Mask(NEON_Q)) {
4440 Op = &Simulator::sqdmull2;
4441 } else {
4442 Op = &Simulator::sqdmull;
4443 }
4444 break;
4445 case NEON_SQDMLAL_byelement:
4446 if (instr->Mask(NEON_Q)) {
4447 Op = &Simulator::sqdmlal2;
4448 } else {
4449 Op = &Simulator::sqdmlal;
4450 }
4451 break;
4452 case NEON_SQDMLSL_byelement:
4453 if (instr->Mask(NEON_Q)) {
4454 Op = &Simulator::sqdmlsl2;
4455 } else {
4456 Op = &Simulator::sqdmlsl;
4457 }
4458 break;
4459 default:
4460 index = instr->NEONH();
4461 if ((instr->FPType() & 1) == 0) {
4462 index = (index << 1) | instr->NEONL();
4463 }
4464
4465 vf = nfd.GetVectorFormat(nfd.FPFormatMap());
4466
4467 switch (instr->Mask(NEONByIndexedElementFPMask)) {
4468 case NEON_FMUL_byelement:
4469 Op = &Simulator::fmul;
4470 break;
4471 case NEON_FMLA_byelement:
4472 Op = &Simulator::fmla;
4473 break;
4474 case NEON_FMLS_byelement:
4475 Op = &Simulator::fmls;
4476 break;
4477 case NEON_FMULX_byelement:
4478 Op = &Simulator::fmulx;
4479 break;
4480 default:
4481 UNIMPLEMENTED();
4482 }
4483 }
4484
4485 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
4486 }
4487
4488 void Simulator::VisitNEONCopy(Instruction* instr) {
4489 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
4490 VectorFormat vf = nfd.GetVectorFormat();
4491
4492 SimVRegister& rd = vreg(instr->Rd());
4493 SimVRegister& rn = vreg(instr->Rn());
4494 int imm5 = instr->ImmNEON5();
4495 int lsb = LowestSetBitPosition(imm5);
4496 int reg_index = imm5 >> lsb;
4497
4498 if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
4499 int imm4 = instr->ImmNEON4();
4500 DCHECK_GE(lsb, 1);
4501 int rn_index = imm4 >> (lsb - 1);
4502 ins_element(vf, rd, reg_index, rn, rn_index);
4503 } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
4504 ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
4505 } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
4506 uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
4507 value &= MaxUintFromFormat(vf);
4508 set_xreg(instr->Rd(), value);
4509 } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
4510 int64_t value = LogicVRegister(rn).Int(vf, reg_index);
4511 if (instr->NEONQ()) {
4512 set_xreg(instr->Rd(), value);
4513 } else {
4514 DCHECK(is_int32(value));
4515 set_wreg(instr->Rd(), static_cast<int32_t>(value));
4516 }
4517 } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
4518 dup_element(vf, rd, rn, reg_index);
4519 } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
4520 dup_immediate(vf, rd, xreg(instr->Rn()));
4521 } else {
4522 UNIMPLEMENTED();
4523 }
4524 }
4525
4526 void Simulator::VisitNEONExtract(Instruction* instr) {
4527 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
4528 VectorFormat vf = nfd.GetVectorFormat();
4529 SimVRegister& rd = vreg(instr->Rd());
4530 SimVRegister& rn = vreg(instr->Rn());
4531 SimVRegister& rm = vreg(instr->Rm());
4532 if (instr->Mask(NEONExtractMask) == NEON_EXT) {
4533 int index = instr->ImmNEONExt();
4534 ext(vf, rd, rn, rm, index);
4535 } else {
4536 UNIMPLEMENTED();
4537 }
4538 }
4539
4540 void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
4541 AddrMode addr_mode) {
4542 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
4543 VectorFormat vf = nfd.GetVectorFormat();
4544
4545 uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
4546 int reg_size = RegisterSizeInBytesFromFormat(vf);
4547
4548 int reg[4];
4549 uint64_t addr[4];
4550 for (int i = 0; i < 4; i++) {
4551 reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
4552 addr[i] = addr_base + (i * reg_size);
4553 }
4554 int count = 1;
4555 bool log_read = true;
4556
4557 // Bit 23 determines whether this is an offset or post-index addressing mode.
4558 // In offset mode, bits 20 to 16 should be zero; these bits encode the
4559 // register of immediate in post-index mode.
4560 if ((instr->Bit(23) == 0) && (instr->Bits(20, 16) != 0)) {
4561 UNREACHABLE();
4562 }
4563
4564 // We use the PostIndex mask here, as it works in this case for both Offset
4565 // and PostIndex addressing.
4566 switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
4567 case NEON_LD1_4v:
4568 case NEON_LD1_4v_post:
4569 ld1(vf, vreg(reg[3]), addr[3]);
4570 count++; // Fall through.
4571 case NEON_LD1_3v:
4572 case NEON_LD1_3v_post:
4573 ld1(vf, vreg(reg[2]), addr[2]);
4574 count++; // Fall through.
4575 case NEON_LD1_2v:
4576 case NEON_LD1_2v_post:
4577 ld1(vf, vreg(reg[1]), addr[1]);
4578 count++; // Fall through.
4579 case NEON_LD1_1v:
4580 case NEON_LD1_1v_post:
4581 ld1(vf, vreg(reg[0]), addr[0]);
4582 break;
4583 case NEON_ST1_4v:
4584 case NEON_ST1_4v_post:
4585 st1(vf, vreg(reg[3]), addr[3]);
4586 count++; // Fall through.
4587 case NEON_ST1_3v:
4588 case NEON_ST1_3v_post:
4589 st1(vf, vreg(reg[2]), addr[2]);
4590 count++; // Fall through.
4591 case NEON_ST1_2v:
4592 case NEON_ST1_2v_post:
4593 st1(vf, vreg(reg[1]), addr[1]);
4594 count++; // Fall through.
4595 case NEON_ST1_1v:
4596 case NEON_ST1_1v_post:
4597 st1(vf, vreg(reg[0]), addr[0]);
4598 log_read = false;
4599 break;
4600 case NEON_LD2_post:
4601 case NEON_LD2:
4602 ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
4603 count = 2;
4604 break;
4605 case NEON_ST2:
4606 case NEON_ST2_post:
4607 st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
4608 count = 2;
4609 log_read = false;
4610 break;
4611 case NEON_LD3_post:
4612 case NEON_LD3:
4613 ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
4614 count = 3;
4615 break;
4616 case NEON_ST3:
4617 case NEON_ST3_post:
4618 st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
4619 count = 3;
4620 log_read = false;
4621 break;
4622 case NEON_LD4_post:
4623 case NEON_LD4:
4624 ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
4625 count = 4;
4626 break;
4627 case NEON_ST4:
4628 case NEON_ST4_post:
4629 st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
4630 count = 4;
4631 log_read = false;
4632 break;
4633 default:
4634 UNIMPLEMENTED();
4635 }
4636
4637 // Explicitly log the register update whilst we have type information.
4638 for (int i = 0; i < count; i++) {
4639 // For de-interleaving loads, only print the base address.
4640 int lane_size = LaneSizeInBytesFromFormat(vf);
4641 PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
4642 GetPrintRegisterFormatForSize(reg_size, lane_size));
4643 if (log_read) {
4644 LogVRead(addr_base, reg[i], format);
4645 } else {
4646 LogVWrite(addr_base, reg[i], format);
4647 }
4648 }
4649
4650 if (addr_mode == PostIndex) {
4651 int rm = instr->Rm();
4652 // The immediate post index addressing mode is indicated by rm = 31.
4653 // The immediate is implied by the number of vector registers used.
4654 addr_base +=
4655 (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count : xreg(rm);
4656 set_xreg(instr->Rn(), addr_base);
4657 } else {
4658 DCHECK_EQ(addr_mode, Offset);
4659 }
4660 }
4661
4662 void Simulator::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
4663 NEONLoadStoreMultiStructHelper(instr, Offset);
4664 }
4665
4666 void Simulator::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
4667 NEONLoadStoreMultiStructHelper(instr, PostIndex);
4668 }
4669
4670 void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
4671 AddrMode addr_mode) {
4672 uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
4673 int rt = instr->Rt();
4674
4675 // Bit 23 determines whether this is an offset or post-index addressing mode.
4676 // In offset mode, bits 20 to 16 should be zero; these bits encode the
4677 // register of immediate in post-index mode.
4678 DCHECK_IMPLIES(instr->Bit(23) == 0, instr->Bits(20, 16) == 0);
4679
4680 bool do_load = false;
4681
4682 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
4683 VectorFormat vf_t = nfd.GetVectorFormat();
4684
4685 VectorFormat vf = kFormat16B;
4686 // We use the PostIndex mask here, as it works in this case for both Offset
4687 // and PostIndex addressing.
4688 switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
4689 case NEON_LD1_b:
4690 case NEON_LD1_b_post:
4691 case NEON_LD2_b:
4692 case NEON_LD2_b_post:
4693 case NEON_LD3_b:
4694 case NEON_LD3_b_post:
4695 case NEON_LD4_b:
4696 case NEON_LD4_b_post:
4697 do_load = true; // Fall through.
4698 case NEON_ST1_b:
4699 case NEON_ST1_b_post:
4700 case NEON_ST2_b:
4701 case NEON_ST2_b_post:
4702 case NEON_ST3_b:
4703 case NEON_ST3_b_post:
4704 case NEON_ST4_b:
4705 case NEON_ST4_b_post:
4706 break;
4707
4708 case NEON_LD1_h:
4709 case NEON_LD1_h_post:
4710 case NEON_LD2_h:
4711 case NEON_LD2_h_post:
4712 case NEON_LD3_h:
4713 case NEON_LD3_h_post:
4714 case NEON_LD4_h:
4715 case NEON_LD4_h_post:
4716 do_load = true; // Fall through.
4717 case NEON_ST1_h:
4718 case NEON_ST1_h_post:
4719 case NEON_ST2_h:
4720 case NEON_ST2_h_post:
4721 case NEON_ST3_h:
4722 case NEON_ST3_h_post:
4723 case NEON_ST4_h:
4724 case NEON_ST4_h_post:
4725 vf = kFormat8H;
4726 break;
4727
4728 case NEON_LD1_s:
4729 case NEON_LD1_s_post:
4730 case NEON_LD2_s:
4731 case NEON_LD2_s_post:
4732 case NEON_LD3_s:
4733 case NEON_LD3_s_post:
4734 case NEON_LD4_s:
4735 case NEON_LD4_s_post:
4736 do_load = true; // Fall through.
4737 case NEON_ST1_s:
4738 case NEON_ST1_s_post:
4739 case NEON_ST2_s:
4740 case NEON_ST2_s_post:
4741 case NEON_ST3_s:
4742 case NEON_ST3_s_post:
4743 case NEON_ST4_s:
4744 case NEON_ST4_s_post: {
4745 static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
4746 "LSB of size distinguishes S and D registers.");
4747 static_assert(
4748 (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post,
4749 "LSB of size distinguishes S and D registers.");
4750 static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
4751 "LSB of size distinguishes S and D registers.");
4752 static_assert(
4753 (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post,
4754 "LSB of size distinguishes S and D registers.");
4755 vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
4756 break;
4757 }
4758
4759 case NEON_LD1R:
4760 case NEON_LD1R_post: {
4761 vf = vf_t;
4762 ld1r(vf, vreg(rt), addr);
4763 do_load = true;
4764 break;
4765 }
4766
4767 case NEON_LD2R:
4768 case NEON_LD2R_post: {
4769 vf = vf_t;
4770 int rt2 = (rt + 1) % kNumberOfVRegisters;
4771 ld2r(vf, vreg(rt), vreg(rt2), addr);
4772 do_load = true;
4773 break;
4774 }
4775
4776 case NEON_LD3R:
4777 case NEON_LD3R_post: {
4778 vf = vf_t;
4779 int rt2 = (rt + 1) % kNumberOfVRegisters;
4780 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4781 ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
4782 do_load = true;
4783 break;
4784 }
4785
4786 case NEON_LD4R:
4787 case NEON_LD4R_post: {
4788 vf = vf_t;
4789 int rt2 = (rt + 1) % kNumberOfVRegisters;
4790 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4791 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
4792 ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
4793 do_load = true;
4794 break;
4795 }
4796 default:
4797 UNIMPLEMENTED();
4798 }
4799
4800 PrintRegisterFormat print_format =
4801 GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
4802 // Make sure that the print_format only includes a single lane.
4803 print_format =
4804 static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
4805
4806 int esize = LaneSizeInBytesFromFormat(vf);
4807 int index_shift = LaneSizeInBytesLog2FromFormat(vf);
4808 int lane = instr->NEONLSIndex(index_shift);
4809 int scale = 0;
4810 int rt2 = (rt + 1) % kNumberOfVRegisters;
4811 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4812 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
4813 switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
4814 case NEONLoadStoreSingle1:
4815 scale = 1;
4816 if (do_load) {
4817 ld1(vf, vreg(rt), lane, addr);
4818 LogVRead(addr, rt, print_format, lane);
4819 } else {
4820 st1(vf, vreg(rt), lane, addr);
4821 LogVWrite(addr, rt, print_format, lane);
4822 }
4823 break;
4824 case NEONLoadStoreSingle2:
4825 scale = 2;
4826 if (do_load) {
4827 ld2(vf, vreg(rt), vreg(rt2), lane, addr);
4828 LogVRead(addr, rt, print_format, lane);
4829 LogVRead(addr + esize, rt2, print_format, lane);
4830 } else {
4831 st2(vf, vreg(rt), vreg(rt2), lane, addr);
4832 LogVWrite(addr, rt, print_format, lane);
4833 LogVWrite(addr + esize, rt2, print_format, lane);
4834 }
4835 break;
4836 case NEONLoadStoreSingle3:
4837 scale = 3;
4838 if (do_load) {
4839 ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
4840 LogVRead(addr, rt, print_format, lane);
4841 LogVRead(addr + esize, rt2, print_format, lane);
4842 LogVRead(addr + (2 * esize), rt3, print_format, lane);
4843 } else {
4844 st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
4845 LogVWrite(addr, rt, print_format, lane);
4846 LogVWrite(addr + esize, rt2, print_format, lane);
4847 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
4848 }
4849 break;
4850 case NEONLoadStoreSingle4:
4851 scale = 4;
4852 if (do_load) {
4853 ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
4854 LogVRead(addr, rt, print_format, lane);
4855 LogVRead(addr + esize, rt2, print_format, lane);
4856 LogVRead(addr + (2 * esize), rt3, print_format, lane);
4857 LogVRead(addr + (3 * esize), rt4, print_format, lane);
4858 } else {
4859 st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
4860 LogVWrite(addr, rt, print_format, lane);
4861 LogVWrite(addr + esize, rt2, print_format, lane);
4862 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
4863 LogVWrite(addr + (3 * esize), rt4, print_format, lane);
4864 }
4865 break;
4866 default:
4867 UNIMPLEMENTED();
4868 }
4869
4870 if (addr_mode == PostIndex) {
4871 int rm = instr->Rm();
4872 int lane_size = LaneSizeInBytesFromFormat(vf);
4873 set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
4874 }
4875 }
4876
4877 void Simulator::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
4878 NEONLoadStoreSingleStructHelper(instr, Offset);
4879 }
4880
4881 void Simulator::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
4882 NEONLoadStoreSingleStructHelper(instr, PostIndex);
4883 }
4884
4885 void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
4886 SimVRegister& rd = vreg(instr->Rd());
4887 int cmode = instr->NEONCmode();
4888 int cmode_3_1 = (cmode >> 1) & 7;
4889 int cmode_3 = (cmode >> 3) & 1;
4890 int cmode_2 = (cmode >> 2) & 1;
4891 int cmode_1 = (cmode >> 1) & 1;
4892 int cmode_0 = cmode & 1;
4893 int q = instr->NEONQ();
4894 int op_bit = instr->NEONModImmOp();
4895 uint64_t imm8 = instr->ImmNEONabcdefgh();
4896
4897 // Find the format and immediate value
4898 uint64_t imm = 0;
4899 VectorFormat vform = kFormatUndefined;
4900 switch (cmode_3_1) {
4901 case 0x0:
4902 case 0x1:
4903 case 0x2:
4904 case 0x3:
4905 vform = (q == 1) ? kFormat4S : kFormat2S;
4906 imm = imm8 << (8 * cmode_3_1);
4907 break;
4908 case 0x4:
4909 case 0x5:
4910 vform = (q == 1) ? kFormat8H : kFormat4H;
4911 imm = imm8 << (8 * cmode_1);
4912 break;
4913 case 0x6:
4914 vform = (q == 1) ? kFormat4S : kFormat2S;
4915 if (cmode_0 == 0) {
4916 imm = imm8 << 8 | 0x000000ff;
4917 } else {
4918 imm = imm8 << 16 | 0x0000ffff;
4919 }
4920 break;
4921 case 0x7:
4922 if (cmode_0 == 0 && op_bit == 0) {
4923 vform = q ? kFormat16B : kFormat8B;
4924 imm = imm8;
4925 } else if (cmode_0 == 0 && op_bit == 1) {
4926 vform = q ? kFormat2D : kFormat1D;
4927 imm = 0;
4928 for (int i = 0; i < 8; ++i) {
4929 if (imm8 & (1 << i)) {
4930 imm |= (UINT64_C(0xff) << (8 * i));
4931 }
4932 }
4933 } else { // cmode_0 == 1, cmode == 0xf.
4934 if (op_bit == 0) {
4935 vform = q ? kFormat4S : kFormat2S;
4936 imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
4937 } else if (q == 1) {
4938 vform = kFormat2D;
4939 imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
4940 } else {
4941 DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf));
4942 VisitUnallocated(instr);
4943 }
4944 }
4945 break;
4946 default:
4947 UNREACHABLE();
4948 break;
4949 }
4950
4951 // Find the operation.
4952 NEONModifiedImmediateOp op;
4953 if (cmode_3 == 0) {
4954 if (cmode_0 == 0) {
4955 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4956 } else { // cmode<0> == '1'
4957 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
4958 }
4959 } else { // cmode<3> == '1'
4960 if (cmode_2 == 0) {
4961 if (cmode_0 == 0) {
4962 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4963 } else { // cmode<0> == '1'
4964 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
4965 }
4966 } else { // cmode<2> == '1'
4967 if (cmode_1 == 0) {
4968 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4969 } else { // cmode<1> == '1'
4970 if (cmode_0 == 0) {
4971 op = NEONModifiedImmediate_MOVI;
4972 } else { // cmode<0> == '1'
4973 op = NEONModifiedImmediate_MOVI;
4974 }
4975 }
4976 }
4977 }
4978
4979 // Call the logic function.
4980 switch (op) {
4981 case NEONModifiedImmediate_ORR:
4982 orr(vform, rd, rd, imm);
4983 break;
4984 case NEONModifiedImmediate_BIC:
4985 bic(vform, rd, rd, imm);
4986 break;
4987 case NEONModifiedImmediate_MOVI:
4988 movi(vform, rd, imm);
4989 break;
4990 case NEONModifiedImmediate_MVNI:
4991 mvni(vform, rd, imm);
4992 break;
4993 default:
4994 VisitUnimplemented(instr);
4995 }
4996 }
4997
4998 void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
4999 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
5000 VectorFormat vf = nfd.GetVectorFormat();
5001
5002 SimVRegister& rd = vreg(instr->Rd());
5003 SimVRegister& rn = vreg(instr->Rn());
5004
5005 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
5006 // These instructions all use a two bit size field, except NOT and RBIT,
5007 // which use the field to encode the operation.
5008 switch (instr->Mask(NEONScalar2RegMiscMask)) {
5009 case NEON_CMEQ_zero_scalar:
5010 cmp(vf, rd, rn, 0, eq);
5011 break;
5012 case NEON_CMGE_zero_scalar:
5013 cmp(vf, rd, rn, 0, ge);
5014 break;
5015 case NEON_CMGT_zero_scalar:
5016 cmp(vf, rd, rn, 0, gt);
5017 break;
5018 case NEON_CMLT_zero_scalar:
5019 cmp(vf, rd, rn, 0, lt);
5020 break;
5021 case NEON_CMLE_zero_scalar:
5022 cmp(vf, rd, rn, 0, le);
5023 break;
5024 case NEON_ABS_scalar:
5025 abs(vf, rd, rn);
5026 break;
5027 case NEON_SQABS_scalar:
5028 abs(vf, rd, rn).SignedSaturate(vf);
5029 break;
5030 case NEON_NEG_scalar:
5031 neg(vf, rd, rn);
5032 break;
5033 case NEON_SQNEG_scalar:
5034 neg(vf, rd, rn).SignedSaturate(vf);
5035 break;
5036 case NEON_SUQADD_scalar:
5037 suqadd(vf, rd, rn);
5038 break;
5039 case NEON_USQADD_scalar:
5040 usqadd(vf, rd, rn);
5041 break;
5042 default:
5043 UNIMPLEMENTED();
5044 break;
5045 }
5046 } else {
5047 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5048 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5049
5050 // These instructions all use a one bit size field, except SQXTUN, SQXTN
5051 // and UQXTN, which use a two bit size field.
5052 switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
5053 case NEON_FRECPE_scalar:
5054 frecpe(fpf, rd, rn, fpcr_rounding);
5055 break;
5056 case NEON_FRECPX_scalar:
5057 frecpx(fpf, rd, rn);
5058 break;
5059 case NEON_FRSQRTE_scalar:
5060 frsqrte(fpf, rd, rn);
5061 break;
5062 case NEON_FCMGT_zero_scalar:
5063 fcmp_zero(fpf, rd, rn, gt);
5064 break;
5065 case NEON_FCMGE_zero_scalar:
5066 fcmp_zero(fpf, rd, rn, ge);
5067 break;
5068 case NEON_FCMEQ_zero_scalar:
5069 fcmp_zero(fpf, rd, rn, eq);
5070 break;
5071 case NEON_FCMLE_zero_scalar:
5072 fcmp_zero(fpf, rd, rn, le);
5073 break;
5074 case NEON_FCMLT_zero_scalar:
5075 fcmp_zero(fpf, rd, rn, lt);
5076 break;
5077 case NEON_SCVTF_scalar:
5078 scvtf(fpf, rd, rn, 0, fpcr_rounding);
5079 break;
5080 case NEON_UCVTF_scalar:
5081 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
5082 break;
5083 case NEON_FCVTNS_scalar:
5084 fcvts(fpf, rd, rn, FPTieEven);
5085 break;
5086 case NEON_FCVTNU_scalar:
5087 fcvtu(fpf, rd, rn, FPTieEven);
5088 break;
5089 case NEON_FCVTPS_scalar:
5090 fcvts(fpf, rd, rn, FPPositiveInfinity);
5091 break;
5092 case NEON_FCVTPU_scalar:
5093 fcvtu(fpf, rd, rn, FPPositiveInfinity);
5094 break;
5095 case NEON_FCVTMS_scalar:
5096 fcvts(fpf, rd, rn, FPNegativeInfinity);
5097 break;
5098 case NEON_FCVTMU_scalar:
5099 fcvtu(fpf, rd, rn, FPNegativeInfinity);
5100 break;
5101 case NEON_FCVTZS_scalar:
5102 fcvts(fpf, rd, rn, FPZero);
5103 break;
5104 case NEON_FCVTZU_scalar:
5105 fcvtu(fpf, rd, rn, FPZero);
5106 break;
5107 case NEON_FCVTAS_scalar:
5108 fcvts(fpf, rd, rn, FPTieAway);
5109 break;
5110 case NEON_FCVTAU_scalar:
5111 fcvtu(fpf, rd, rn, FPTieAway);
5112 break;
5113 case NEON_FCVTXN_scalar:
5114 // Unlike all of the other FP instructions above, fcvtxn encodes dest
5115 // size S as size<0>=1. There's only one case, so we ignore the form.
5116 DCHECK_EQ(instr->Bit(22), 1);
5117 fcvtxn(kFormatS, rd, rn);
5118 break;
5119 default:
5120 switch (instr->Mask(NEONScalar2RegMiscMask)) {
5121 case NEON_SQXTN_scalar:
5122 sqxtn(vf, rd, rn);
5123 break;
5124 case NEON_UQXTN_scalar:
5125 uqxtn(vf, rd, rn);
5126 break;
5127 case NEON_SQXTUN_scalar:
5128 sqxtun(vf, rd, rn);
5129 break;
5130 default:
5131 UNIMPLEMENTED();
5132 }
5133 }
5134 }
5135 }
5136
5137 void Simulator::VisitNEONScalar3Diff(Instruction* instr) {
5138 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
5139 VectorFormat vf = nfd.GetVectorFormat();
5140
5141 SimVRegister& rd = vreg(instr->Rd());
5142 SimVRegister& rn = vreg(instr->Rn());
5143 SimVRegister& rm = vreg(instr->Rm());
5144 switch (instr->Mask(NEONScalar3DiffMask)) {
5145 case NEON_SQDMLAL_scalar:
5146 sqdmlal(vf, rd, rn, rm);
5147 break;
5148 case NEON_SQDMLSL_scalar:
5149 sqdmlsl(vf, rd, rn, rm);
5150 break;
5151 case NEON_SQDMULL_scalar:
5152 sqdmull(vf, rd, rn, rm);
5153 break;
5154 default:
5155 UNIMPLEMENTED();
5156 }
5157 }
5158
5159 void Simulator::VisitNEONScalar3Same(Instruction* instr) {
5160 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
5161 VectorFormat vf = nfd.GetVectorFormat();
5162
5163 SimVRegister& rd = vreg(instr->Rd());
5164 SimVRegister& rn = vreg(instr->Rn());
5165 SimVRegister& rm = vreg(instr->Rm());
5166
5167 if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
5168 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5169 switch (instr->Mask(NEONScalar3SameFPMask)) {
5170 case NEON_FMULX_scalar:
5171 fmulx(vf, rd, rn, rm);
5172 break;
5173 case NEON_FACGE_scalar:
5174 fabscmp(vf, rd, rn, rm, ge);
5175 break;
5176 case NEON_FACGT_scalar:
5177 fabscmp(vf, rd, rn, rm, gt);
5178 break;
5179 case NEON_FCMEQ_scalar:
5180 fcmp(vf, rd, rn, rm, eq);
5181 break;
5182 case NEON_FCMGE_scalar:
5183 fcmp(vf, rd, rn, rm, ge);
5184 break;
5185 case NEON_FCMGT_scalar:
5186 fcmp(vf, rd, rn, rm, gt);
5187 break;
5188 case NEON_FRECPS_scalar:
5189 frecps(vf, rd, rn, rm);
5190 break;
5191 case NEON_FRSQRTS_scalar:
5192 frsqrts(vf, rd, rn, rm);
5193 break;
5194 case NEON_FABD_scalar:
5195 fabd(vf, rd, rn, rm);
5196 break;
5197 default:
5198 UNIMPLEMENTED();
5199 }
5200 } else {
5201 switch (instr->Mask(NEONScalar3SameMask)) {
5202 case NEON_ADD_scalar:
5203 add(vf, rd, rn, rm);
5204 break;
5205 case NEON_SUB_scalar:
5206 sub(vf, rd, rn, rm);
5207 break;
5208 case NEON_CMEQ_scalar:
5209 cmp(vf, rd, rn, rm, eq);
5210 break;
5211 case NEON_CMGE_scalar:
5212 cmp(vf, rd, rn, rm, ge);
5213 break;
5214 case NEON_CMGT_scalar:
5215 cmp(vf, rd, rn, rm, gt);
5216 break;
5217 case NEON_CMHI_scalar:
5218 cmp(vf, rd, rn, rm, hi);
5219 break;
5220 case NEON_CMHS_scalar:
5221 cmp(vf, rd, rn, rm, hs);
5222 break;
5223 case NEON_CMTST_scalar:
5224 cmptst(vf, rd, rn, rm);
5225 break;
5226 case NEON_USHL_scalar:
5227 ushl(vf, rd, rn, rm);
5228 break;
5229 case NEON_SSHL_scalar:
5230 sshl(vf, rd, rn, rm);
5231 break;
5232 case NEON_SQDMULH_scalar:
5233 sqdmulh(vf, rd, rn, rm);
5234 break;
5235 case NEON_SQRDMULH_scalar:
5236 sqrdmulh(vf, rd, rn, rm);
5237 break;
5238 case NEON_UQADD_scalar:
5239 add(vf, rd, rn, rm).UnsignedSaturate(vf);
5240 break;
5241 case NEON_SQADD_scalar:
5242 add(vf, rd, rn, rm).SignedSaturate(vf);
5243 break;
5244 case NEON_UQSUB_scalar:
5245 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
5246 break;
5247 case NEON_SQSUB_scalar:
5248 sub(vf, rd, rn, rm).SignedSaturate(vf);
5249 break;
5250 case NEON_UQSHL_scalar:
5251 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
5252 break;
5253 case NEON_SQSHL_scalar:
5254 sshl(vf, rd, rn, rm).SignedSaturate(vf);
5255 break;
5256 case NEON_URSHL_scalar:
5257 ushl(vf, rd, rn, rm).Round(vf);
5258 break;
5259 case NEON_SRSHL_scalar:
5260 sshl(vf, rd, rn, rm).Round(vf);
5261 break;
5262 case NEON_UQRSHL_scalar:
5263 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
5264 break;
5265 case NEON_SQRSHL_scalar:
5266 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
5267 break;
5268 default:
5269 UNIMPLEMENTED();
5270 }
5271 }
5272 }
5273
5274 void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
5275 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
5276 VectorFormat vf = nfd.GetVectorFormat();
5277 VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
5278
5279 SimVRegister& rd = vreg(instr->Rd());
5280 SimVRegister& rn = vreg(instr->Rn());
5281 ByElementOp Op = NULL;
5282
5283 int rm_reg = instr->Rm();
5284 int index = (instr->NEONH() << 1) | instr->NEONL();
5285 if (instr->NEONSize() == 1) {
5286 rm_reg &= 0xf;
5287 index = (index << 1) | instr->NEONM();
5288 }
5289
5290 switch (instr->Mask(NEONScalarByIndexedElementMask)) {
5291 case NEON_SQDMULL_byelement_scalar:
5292 Op = &Simulator::sqdmull;
5293 break;
5294 case NEON_SQDMLAL_byelement_scalar:
5295 Op = &Simulator::sqdmlal;
5296 break;
5297 case NEON_SQDMLSL_byelement_scalar:
5298 Op = &Simulator::sqdmlsl;
5299 break;
5300 case NEON_SQDMULH_byelement_scalar:
5301 Op = &Simulator::sqdmulh;
5302 vf = vf_r;
5303 break;
5304 case NEON_SQRDMULH_byelement_scalar:
5305 Op = &Simulator::sqrdmulh;
5306 vf = vf_r;
5307 break;
5308 default:
5309 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5310 index = instr->NEONH();
5311 if ((instr->FPType() & 1) == 0) {
5312 index = (index << 1) | instr->NEONL();
5313 }
5314 switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
5315 case NEON_FMUL_byelement_scalar:
5316 Op = &Simulator::fmul;
5317 break;
5318 case NEON_FMLA_byelement_scalar:
5319 Op = &Simulator::fmla;
5320 break;
5321 case NEON_FMLS_byelement_scalar:
5322 Op = &Simulator::fmls;
5323 break;
5324 case NEON_FMULX_byelement_scalar:
5325 Op = &Simulator::fmulx;
5326 break;
5327 default:
5328 UNIMPLEMENTED();
5329 }
5330 }
5331
5332 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
5333 }
5334
5335 void Simulator::VisitNEONScalarCopy(Instruction* instr) {
5336 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
5337 VectorFormat vf = nfd.GetVectorFormat();
5338
5339 SimVRegister& rd = vreg(instr->Rd());
5340 SimVRegister& rn = vreg(instr->Rn());
5341
5342 if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
5343 int imm5 = instr->ImmNEON5();
5344 int lsb = LowestSetBitPosition(imm5);
5345 int rn_index = imm5 >> lsb;
5346 dup_element(vf, rd, rn, rn_index);
5347 } else {
5348 UNIMPLEMENTED();
5349 }
5350 }
5351
5352 void Simulator::VisitNEONScalarPairwise(Instruction* instr) {
5353 NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
5354 VectorFormat vf = nfd.GetVectorFormat();
5355
5356 SimVRegister& rd = vreg(instr->Rd());
5357 SimVRegister& rn = vreg(instr->Rn());
5358 switch (instr->Mask(NEONScalarPairwiseMask)) {
5359 case NEON_ADDP_scalar:
5360 addp(vf, rd, rn);
5361 break;
5362 case NEON_FADDP_scalar:
5363 faddp(vf, rd, rn);
5364 break;
5365 case NEON_FMAXP_scalar:
5366 fmaxp(vf, rd, rn);
5367 break;
5368 case NEON_FMAXNMP_scalar:
5369 fmaxnmp(vf, rd, rn);
5370 break;
5371 case NEON_FMINP_scalar:
5372 fminp(vf, rd, rn);
5373 break;
5374 case NEON_FMINNMP_scalar:
5375 fminnmp(vf, rd, rn);
5376 break;
5377 default:
5378 UNIMPLEMENTED();
5379 }
5380 }
5381
5382 void Simulator::VisitNEONScalarShiftImmediate(Instruction* instr) {
5383 SimVRegister& rd = vreg(instr->Rd());
5384 SimVRegister& rn = vreg(instr->Rn());
5385 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5386
5387 static const NEONFormatMap map = {
5388 {22, 21, 20, 19},
5389 {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D,
5390 NF_D, NF_D, NF_D, NF_D, NF_D}};
5391 NEONFormatDecoder nfd(instr, &map);
5392 VectorFormat vf = nfd.GetVectorFormat();
5393
5394 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
5395 int immhimmb = instr->ImmNEONImmhImmb();
5396 int right_shift = (16 << highestSetBit) - immhimmb;
5397 int left_shift = immhimmb - (8 << highestSetBit);
5398 switch (instr->Mask(NEONScalarShiftImmediateMask)) {
5399 case NEON_SHL_scalar:
5400 shl(vf, rd, rn, left_shift);
5401 break;
5402 case NEON_SLI_scalar:
5403 sli(vf, rd, rn, left_shift);
5404 break;
5405 case NEON_SQSHL_imm_scalar:
5406 sqshl(vf, rd, rn, left_shift);
5407 break;
5408 case NEON_UQSHL_imm_scalar:
5409 uqshl(vf, rd, rn, left_shift);
5410 break;
5411 case NEON_SQSHLU_scalar:
5412 sqshlu(vf, rd, rn, left_shift);
5413 break;
5414 case NEON_SRI_scalar:
5415 sri(vf, rd, rn, right_shift);
5416 break;
5417 case NEON_SSHR_scalar:
5418 sshr(vf, rd, rn, right_shift);
5419 break;
5420 case NEON_USHR_scalar:
5421 ushr(vf, rd, rn, right_shift);
5422 break;
5423 case NEON_SRSHR_scalar:
5424 sshr(vf, rd, rn, right_shift).Round(vf);
5425 break;
5426 case NEON_URSHR_scalar:
5427 ushr(vf, rd, rn, right_shift).Round(vf);
5428 break;
5429 case NEON_SSRA_scalar:
5430 ssra(vf, rd, rn, right_shift);
5431 break;
5432 case NEON_USRA_scalar:
5433 usra(vf, rd, rn, right_shift);
5434 break;
5435 case NEON_SRSRA_scalar:
5436 srsra(vf, rd, rn, right_shift);
5437 break;
5438 case NEON_URSRA_scalar:
5439 ursra(vf, rd, rn, right_shift);
5440 break;
5441 case NEON_UQSHRN_scalar:
5442 uqshrn(vf, rd, rn, right_shift);
5443 break;
5444 case NEON_UQRSHRN_scalar:
5445 uqrshrn(vf, rd, rn, right_shift);
5446 break;
5447 case NEON_SQSHRN_scalar:
5448 sqshrn(vf, rd, rn, right_shift);
5449 break;
5450 case NEON_SQRSHRN_scalar:
5451 sqrshrn(vf, rd, rn, right_shift);
5452 break;
5453 case NEON_SQSHRUN_scalar:
5454 sqshrun(vf, rd, rn, right_shift);
5455 break;
5456 case NEON_SQRSHRUN_scalar:
5457 sqrshrun(vf, rd, rn, right_shift);
5458 break;
5459 case NEON_FCVTZS_imm_scalar:
5460 fcvts(vf, rd, rn, FPZero, right_shift);
5461 break;
5462 case NEON_FCVTZU_imm_scalar:
5463 fcvtu(vf, rd, rn, FPZero, right_shift);
5464 break;
5465 case NEON_SCVTF_imm_scalar:
5466 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
5467 break;
5468 case NEON_UCVTF_imm_scalar:
5469 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
5470 break;
5471 default:
5472 UNIMPLEMENTED();
5473 }
5474 }
5475
5476 void Simulator::VisitNEONShiftImmediate(Instruction* instr) {
5477 SimVRegister& rd = vreg(instr->Rd());
5478 SimVRegister& rn = vreg(instr->Rn());
5479 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5480
5481 // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
5482 // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
5483 static const NEONFormatMap map = {
5484 {22, 21, 20, 19, 30},
5485 {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
5486 NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
5487 NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
5488 NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}};
5489 NEONFormatDecoder nfd(instr, &map);
5490 VectorFormat vf = nfd.GetVectorFormat();
5491
5492 // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
5493 static const NEONFormatMap map_l = {
5494 {22, 21, 20, 19},
5495 {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
5496 VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
5497
5498 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
5499 int immhimmb = instr->ImmNEONImmhImmb();
5500 int right_shift = (16 << highestSetBit) - immhimmb;
5501 int left_shift = immhimmb - (8 << highestSetBit);
5502
5503 switch (instr->Mask(NEONShiftImmediateMask)) {
5504 case NEON_SHL:
5505 shl(vf, rd, rn, left_shift);
5506 break;
5507 case NEON_SLI:
5508 sli(vf, rd, rn, left_shift);
5509 break;
5510 case NEON_SQSHLU:
5511 sqshlu(vf, rd, rn, left_shift);
5512 break;
5513 case NEON_SRI:
5514 sri(vf, rd, rn, right_shift);
5515 break;
5516 case NEON_SSHR:
5517 sshr(vf, rd, rn, right_shift);
5518 break;
5519 case NEON_USHR:
5520 ushr(vf, rd, rn, right_shift);
5521 break;
5522 case NEON_SRSHR:
5523 sshr(vf, rd, rn, right_shift).Round(vf);
5524 break;
5525 case NEON_URSHR:
5526 ushr(vf, rd, rn, right_shift).Round(vf);
5527 break;
5528 case NEON_SSRA:
5529 ssra(vf, rd, rn, right_shift);
5530 break;
5531 case NEON_USRA:
5532 usra(vf, rd, rn, right_shift);
5533 break;
5534 case NEON_SRSRA:
5535 srsra(vf, rd, rn, right_shift);
5536 break;
5537 case NEON_URSRA:
5538 ursra(vf, rd, rn, right_shift);
5539 break;
5540 case NEON_SQSHL_imm:
5541 sqshl(vf, rd, rn, left_shift);
5542 break;
5543 case NEON_UQSHL_imm:
5544 uqshl(vf, rd, rn, left_shift);
5545 break;
5546 case NEON_SCVTF_imm:
5547 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
5548 break;
5549 case NEON_UCVTF_imm:
5550 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
5551 break;
5552 case NEON_FCVTZS_imm:
5553 fcvts(vf, rd, rn, FPZero, right_shift);
5554 break;
5555 case NEON_FCVTZU_imm:
5556 fcvtu(vf, rd, rn, FPZero, right_shift);
5557 break;
5558 case NEON_SSHLL:
5559 vf = vf_l;
5560 if (instr->Mask(NEON_Q)) {
5561 sshll2(vf, rd, rn, left_shift);
5562 } else {
5563 sshll(vf, rd, rn, left_shift);
5564 }
5565 break;
5566 case NEON_USHLL:
5567 vf = vf_l;
5568 if (instr->Mask(NEON_Q)) {
5569 ushll2(vf, rd, rn, left_shift);
5570 } else {
5571 ushll(vf, rd, rn, left_shift);
5572 }
5573 break;
5574 case NEON_SHRN:
5575 if (instr->Mask(NEON_Q)) {
5576 shrn2(vf, rd, rn, right_shift);
5577 } else {
5578 shrn(vf, rd, rn, right_shift);
5579 }
5580 break;
5581 case NEON_RSHRN:
5582 if (instr->Mask(NEON_Q)) {
5583 rshrn2(vf, rd, rn, right_shift);
5584 } else {
5585 rshrn(vf, rd, rn, right_shift);
5586 }
5587 break;
5588 case NEON_UQSHRN:
5589 if (instr->Mask(NEON_Q)) {
5590 uqshrn2(vf, rd, rn, right_shift);
5591 } else {
5592 uqshrn(vf, rd, rn, right_shift);
5593 }
5594 break;
5595 case NEON_UQRSHRN:
5596 if (instr->Mask(NEON_Q)) {
5597 uqrshrn2(vf, rd, rn, right_shift);
5598 } else {
5599 uqrshrn(vf, rd, rn, right_shift);
5600 }
5601 break;
5602 case NEON_SQSHRN:
5603 if (instr->Mask(NEON_Q)) {
5604 sqshrn2(vf, rd, rn, right_shift);
5605 } else {
5606 sqshrn(vf, rd, rn, right_shift);
5607 }
5608 break;
5609 case NEON_SQRSHRN:
5610 if (instr->Mask(NEON_Q)) {
5611 sqrshrn2(vf, rd, rn, right_shift);
5612 } else {
5613 sqrshrn(vf, rd, rn, right_shift);
5614 }
5615 break;
5616 case NEON_SQSHRUN:
5617 if (instr->Mask(NEON_Q)) {
5618 sqshrun2(vf, rd, rn, right_shift);
5619 } else {
5620 sqshrun(vf, rd, rn, right_shift);
5621 }
5622 break;
5623 case NEON_SQRSHRUN:
5624 if (instr->Mask(NEON_Q)) {
5625 sqrshrun2(vf, rd, rn, right_shift);
5626 } else {
5627 sqrshrun(vf, rd, rn, right_shift);
5628 }
5629 break;
5630 default:
5631 UNIMPLEMENTED();
5632 }
5633 }
5634
5635 void Simulator::VisitNEONTable(Instruction* instr) {
5636 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
5637 VectorFormat vf = nfd.GetVectorFormat();
5638
5639 SimVRegister& rd = vreg(instr->Rd());
5640 SimVRegister& rn = vreg(instr->Rn());
5641 SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
5642 SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
5643 SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
5644 SimVRegister& rm = vreg(instr->Rm());
5645
5646 switch (instr->Mask(NEONTableMask)) {
5647 case NEON_TBL_1v:
5648 tbl(vf, rd, rn, rm);
5649 break;
5650 case NEON_TBL_2v:
5651 tbl(vf, rd, rn, rn2, rm);
5652 break;
5653 case NEON_TBL_3v:
5654 tbl(vf, rd, rn, rn2, rn3, rm);
5655 break;
5656 case NEON_TBL_4v:
5657 tbl(vf, rd, rn, rn2, rn3, rn4, rm);
5658 break;
5659 case NEON_TBX_1v:
5660 tbx(vf, rd, rn, rm);
5661 break;
5662 case NEON_TBX_2v:
5663 tbx(vf, rd, rn, rn2, rm);
5664 break;
5665 case NEON_TBX_3v:
5666 tbx(vf, rd, rn, rn2, rn3, rm);
5667 break;
5668 case NEON_TBX_4v:
5669 tbx(vf, rd, rn, rn2, rn3, rn4, rm);
5670 break;
5671 default:
5672 UNIMPLEMENTED();
5673 }
5674 }
5675
5676 void Simulator::VisitNEONPerm(Instruction* instr) {
5677 NEONFormatDecoder nfd(instr);
5678 VectorFormat vf = nfd.GetVectorFormat();
5679
5680 SimVRegister& rd = vreg(instr->Rd());
5681 SimVRegister& rn = vreg(instr->Rn());
5682 SimVRegister& rm = vreg(instr->Rm());
5683
5684 switch (instr->Mask(NEONPermMask)) {
5685 case NEON_TRN1:
5686 trn1(vf, rd, rn, rm);
5687 break;
5688 case NEON_TRN2:
5689 trn2(vf, rd, rn, rm);
5690 break;
5691 case NEON_UZP1:
5692 uzp1(vf, rd, rn, rm);
5693 break;
5694 case NEON_UZP2:
5695 uzp2(vf, rd, rn, rm);
5696 break;
5697 case NEON_ZIP1:
5698 zip1(vf, rd, rn, rm);
5699 break;
5700 case NEON_ZIP2:
5701 zip2(vf, rd, rn, rm);
5702 break;
5703 default:
5704 UNIMPLEMENTED();
5705 }
5706 }
5707 3884
5708 void Simulator::DoPrintf(Instruction* instr) { 3885 void Simulator::DoPrintf(Instruction* instr) {
5709 DCHECK((instr->Mask(ExceptionMask) == HLT) && 3886 DCHECK((instr->Mask(ExceptionMask) == HLT) &&
5710 (instr->ImmException() == kImmExceptionIsPrintf)); 3887 (instr->ImmException() == kImmExceptionIsPrintf));
5711 3888
5712 // Read the arguments encoded inline in the instruction stream. 3889 // Read the arguments encoded inline in the instruction stream.
5713 uint32_t arg_count; 3890 uint32_t arg_count;
5714 uint32_t arg_pattern_list; 3891 uint32_t arg_pattern_list;
5715 STATIC_ASSERT(sizeof(*instr) == 1); 3892 STATIC_ASSERT(sizeof(*instr) == 1);
5716 memcpy(&arg_count, 3893 memcpy(&arg_count,
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
6010 processor->prev_ = nullptr; 4187 processor->prev_ = nullptr;
6011 processor->next_ = nullptr; 4188 processor->next_ = nullptr;
6012 } 4189 }
6013 4190
6014 #endif // USE_SIMULATOR 4191 #endif // USE_SIMULATOR
6015 4192
6016 } // namespace internal 4193 } // namespace internal
6017 } // namespace v8 4194 } // namespace v8
6018 4195
6019 #endif // V8_TARGET_ARCH_ARM64 4196 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/simulator-arm64.h ('k') | src/arm64/simulator-logic-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698