| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <stdlib.h> | 5 #include <stdlib.h> |
| 6 #include <cmath> | 6 #include <cmath> |
| 7 #include <cstdarg> | 7 #include <cstdarg> |
| 8 #include "src/v8.h" | 8 #include "src/v8.h" |
| 9 | 9 |
| 10 #if V8_TARGET_ARCH_ARM64 | 10 #if V8_TARGET_ARCH_ARM64 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 66 va_end(arguments); | 66 va_end(arguments); |
| 67 } | 67 } |
| 68 } | 68 } |
| 69 | 69 |
| 70 | 70 |
| 71 const Instruction* Simulator::kEndOfSimAddress = NULL; | 71 const Instruction* Simulator::kEndOfSimAddress = NULL; |
| 72 | 72 |
| 73 | 73 |
| 74 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { | 74 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { |
| 75 int width = msb - lsb + 1; | 75 int width = msb - lsb + 1; |
| 76 ASSERT(is_uintn(bits, width) || is_intn(bits, width)); | 76 DCHECK(is_uintn(bits, width) || is_intn(bits, width)); |
| 77 | 77 |
| 78 bits <<= lsb; | 78 bits <<= lsb; |
| 79 uint32_t mask = ((1 << width) - 1) << lsb; | 79 uint32_t mask = ((1 << width) - 1) << lsb; |
| 80 ASSERT((mask & write_ignore_mask_) == 0); | 80 DCHECK((mask & write_ignore_mask_) == 0); |
| 81 | 81 |
| 82 value_ = (value_ & ~mask) | (bits & mask); | 82 value_ = (value_ & ~mask) | (bits & mask); |
| 83 } | 83 } |
| 84 | 84 |
| 85 | 85 |
| 86 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { | 86 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { |
| 87 switch (id) { | 87 switch (id) { |
| 88 case NZCV: | 88 case NZCV: |
| 89 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); | 89 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); |
| 90 case FPCR: | 90 case FPCR: |
| 91 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); | 91 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); |
| 92 default: | 92 default: |
| 93 UNREACHABLE(); | 93 UNREACHABLE(); |
| 94 return SimSystemRegister(); | 94 return SimSystemRegister(); |
| 95 } | 95 } |
| 96 } | 96 } |
| 97 | 97 |
| 98 | 98 |
| 99 void Simulator::Initialize(Isolate* isolate) { | 99 void Simulator::Initialize(Isolate* isolate) { |
| 100 if (isolate->simulator_initialized()) return; | 100 if (isolate->simulator_initialized()) return; |
| 101 isolate->set_simulator_initialized(true); | 101 isolate->set_simulator_initialized(true); |
| 102 ExternalReference::set_redirector(isolate, &RedirectExternalReference); | 102 ExternalReference::set_redirector(isolate, &RedirectExternalReference); |
| 103 } | 103 } |
| 104 | 104 |
| 105 | 105 |
| 106 // Get the active Simulator for the current thread. | 106 // Get the active Simulator for the current thread. |
| 107 Simulator* Simulator::current(Isolate* isolate) { | 107 Simulator* Simulator::current(Isolate* isolate) { |
| 108 Isolate::PerIsolateThreadData* isolate_data = | 108 Isolate::PerIsolateThreadData* isolate_data = |
| 109 isolate->FindOrAllocatePerThreadDataForThisThread(); | 109 isolate->FindOrAllocatePerThreadDataForThisThread(); |
| 110 ASSERT(isolate_data != NULL); | 110 DCHECK(isolate_data != NULL); |
| 111 | 111 |
| 112 Simulator* sim = isolate_data->simulator(); | 112 Simulator* sim = isolate_data->simulator(); |
| 113 if (sim == NULL) { | 113 if (sim == NULL) { |
| 114 if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) { | 114 if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) { |
| 115 sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate); | 115 sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate); |
| 116 } else { | 116 } else { |
| 117 sim = new Decoder<Simulator>(); | 117 sim = new Decoder<Simulator>(); |
| 118 sim->isolate_ = isolate; | 118 sim->isolate_ = isolate; |
| 119 } | 119 } |
| 120 isolate_data->set_simulator(sim); | 120 isolate_data->set_simulator(sim); |
| 121 } | 121 } |
| 122 return sim; | 122 return sim; |
| 123 } | 123 } |
| 124 | 124 |
| 125 | 125 |
| 126 void Simulator::CallVoid(byte* entry, CallArgument* args) { | 126 void Simulator::CallVoid(byte* entry, CallArgument* args) { |
| 127 int index_x = 0; | 127 int index_x = 0; |
| 128 int index_d = 0; | 128 int index_d = 0; |
| 129 | 129 |
| 130 std::vector<int64_t> stack_args(0); | 130 std::vector<int64_t> stack_args(0); |
| 131 for (int i = 0; !args[i].IsEnd(); i++) { | 131 for (int i = 0; !args[i].IsEnd(); i++) { |
| 132 CallArgument arg = args[i]; | 132 CallArgument arg = args[i]; |
| 133 if (arg.IsX() && (index_x < 8)) { | 133 if (arg.IsX() && (index_x < 8)) { |
| 134 set_xreg(index_x++, arg.bits()); | 134 set_xreg(index_x++, arg.bits()); |
| 135 } else if (arg.IsD() && (index_d < 8)) { | 135 } else if (arg.IsD() && (index_d < 8)) { |
| 136 set_dreg_bits(index_d++, arg.bits()); | 136 set_dreg_bits(index_d++, arg.bits()); |
| 137 } else { | 137 } else { |
| 138 ASSERT(arg.IsD() || arg.IsX()); | 138 DCHECK(arg.IsD() || arg.IsX()); |
| 139 stack_args.push_back(arg.bits()); | 139 stack_args.push_back(arg.bits()); |
| 140 } | 140 } |
| 141 } | 141 } |
| 142 | 142 |
| 143 // Process stack arguments, and make sure the stack is suitably aligned. | 143 // Process stack arguments, and make sure the stack is suitably aligned. |
| 144 uintptr_t original_stack = sp(); | 144 uintptr_t original_stack = sp(); |
| 145 uintptr_t entry_stack = original_stack - | 145 uintptr_t entry_stack = original_stack - |
| 146 stack_args.size() * sizeof(stack_args[0]); | 146 stack_args.size() * sizeof(stack_args[0]); |
| 147 if (base::OS::ActivationFrameAlignment() != 0) { | 147 if (base::OS::ActivationFrameAlignment() != 0) { |
| 148 entry_stack &= -base::OS::ActivationFrameAlignment(); | 148 entry_stack &= -base::OS::ActivationFrameAlignment(); |
| 149 } | 149 } |
| 150 char * stack = reinterpret_cast<char*>(entry_stack); | 150 char * stack = reinterpret_cast<char*>(entry_stack); |
| 151 std::vector<int64_t>::const_iterator it; | 151 std::vector<int64_t>::const_iterator it; |
| 152 for (it = stack_args.begin(); it != stack_args.end(); it++) { | 152 for (it = stack_args.begin(); it != stack_args.end(); it++) { |
| 153 memcpy(stack, &(*it), sizeof(*it)); | 153 memcpy(stack, &(*it), sizeof(*it)); |
| 154 stack += sizeof(*it); | 154 stack += sizeof(*it); |
| 155 } | 155 } |
| 156 | 156 |
| 157 ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack); | 157 DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack); |
| 158 set_sp(entry_stack); | 158 set_sp(entry_stack); |
| 159 | 159 |
| 160 // Call the generated code. | 160 // Call the generated code. |
| 161 set_pc(entry); | 161 set_pc(entry); |
| 162 set_lr(kEndOfSimAddress); | 162 set_lr(kEndOfSimAddress); |
| 163 CheckPCSComplianceAndRun(); | 163 CheckPCSComplianceAndRun(); |
| 164 | 164 |
| 165 set_sp(original_stack); | 165 set_sp(original_stack); |
| 166 } | 166 } |
| 167 | 167 |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 249 Run(); | 249 Run(); |
| 250 #ifdef DEBUG | 250 #ifdef DEBUG |
| 251 CHECK_EQ(original_stack, sp()); | 251 CHECK_EQ(original_stack, sp()); |
| 252 // Check that callee-saved registers have been preserved. | 252 // Check that callee-saved registers have been preserved. |
| 253 register_list = kCalleeSaved; | 253 register_list = kCalleeSaved; |
| 254 fpregister_list = kCalleeSavedFP; | 254 fpregister_list = kCalleeSavedFP; |
| 255 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { | 255 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { |
| 256 CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); | 256 CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); |
| 257 } | 257 } |
| 258 for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) { | 258 for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) { |
| 259 ASSERT(saved_fpregisters[i] == | 259 DCHECK(saved_fpregisters[i] == |
| 260 dreg_bits(fpregister_list.PopLowestIndex().code())); | 260 dreg_bits(fpregister_list.PopLowestIndex().code())); |
| 261 } | 261 } |
| 262 | 262 |
| 263 // Corrupt caller saved register minus the return regiters. | 263 // Corrupt caller saved register minus the return regiters. |
| 264 | 264 |
| 265 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1 | 265 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1 |
| 266 // for now . | 266 // for now . |
| 267 register_list = kCallerSaved; | 267 register_list = kCallerSaved; |
| 268 register_list.Remove(x0); | 268 register_list.Remove(x0); |
| 269 register_list.Remove(x1); | 269 register_list.Remove(x1); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 282 #ifdef DEBUG | 282 #ifdef DEBUG |
| 283 // The least significant byte of the curruption value holds the corresponding | 283 // The least significant byte of the curruption value holds the corresponding |
| 284 // register's code. | 284 // register's code. |
| 285 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { | 285 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { |
| 286 if (list->type() == CPURegister::kRegister) { | 286 if (list->type() == CPURegister::kRegister) { |
| 287 while (!list->IsEmpty()) { | 287 while (!list->IsEmpty()) { |
| 288 unsigned code = list->PopLowestIndex().code(); | 288 unsigned code = list->PopLowestIndex().code(); |
| 289 set_xreg(code, value | code); | 289 set_xreg(code, value | code); |
| 290 } | 290 } |
| 291 } else { | 291 } else { |
| 292 ASSERT(list->type() == CPURegister::kFPRegister); | 292 DCHECK(list->type() == CPURegister::kFPRegister); |
| 293 while (!list->IsEmpty()) { | 293 while (!list->IsEmpty()) { |
| 294 unsigned code = list->PopLowestIndex().code(); | 294 unsigned code = list->PopLowestIndex().code(); |
| 295 set_dreg_bits(code, value | code); | 295 set_dreg_bits(code, value | code); |
| 296 } | 296 } |
| 297 } | 297 } |
| 298 } | 298 } |
| 299 | 299 |
| 300 | 300 |
| 301 void Simulator::CorruptAllCallerSavedCPURegisters() { | 301 void Simulator::CorruptAllCallerSavedCPURegisters() { |
| 302 // Corrupt alters its parameter so copy them first. | 302 // Corrupt alters its parameter so copy them first. |
| 303 CPURegList register_list = kCallerSaved; | 303 CPURegList register_list = kCallerSaved; |
| 304 CPURegList fpregister_list = kCallerSavedFP; | 304 CPURegList fpregister_list = kCallerSavedFP; |
| 305 | 305 |
| 306 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); | 306 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); |
| 307 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); | 307 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); |
| 308 } | 308 } |
| 309 #endif | 309 #endif |
| 310 | 310 |
| 311 | 311 |
| 312 // Extending the stack by 2 * 64 bits is required for stack alignment purposes. | 312 // Extending the stack by 2 * 64 bits is required for stack alignment purposes. |
| 313 uintptr_t Simulator::PushAddress(uintptr_t address) { | 313 uintptr_t Simulator::PushAddress(uintptr_t address) { |
| 314 ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); | 314 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize); |
| 315 intptr_t new_sp = sp() - 2 * kXRegSize; | 315 intptr_t new_sp = sp() - 2 * kXRegSize; |
| 316 uintptr_t* alignment_slot = | 316 uintptr_t* alignment_slot = |
| 317 reinterpret_cast<uintptr_t*>(new_sp + kXRegSize); | 317 reinterpret_cast<uintptr_t*>(new_sp + kXRegSize); |
| 318 memcpy(alignment_slot, &kSlotsZapValue, kPointerSize); | 318 memcpy(alignment_slot, &kSlotsZapValue, kPointerSize); |
| 319 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); | 319 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); |
| 320 memcpy(stack_slot, &address, kPointerSize); | 320 memcpy(stack_slot, &address, kPointerSize); |
| 321 set_sp(new_sp); | 321 set_sp(new_sp); |
| 322 return new_sp; | 322 return new_sp; |
| 323 } | 323 } |
| 324 | 324 |
| 325 | 325 |
| 326 uintptr_t Simulator::PopAddress() { | 326 uintptr_t Simulator::PopAddress() { |
| 327 intptr_t current_sp = sp(); | 327 intptr_t current_sp = sp(); |
| 328 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); | 328 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); |
| 329 uintptr_t address = *stack_slot; | 329 uintptr_t address = *stack_slot; |
| 330 ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); | 330 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize); |
| 331 set_sp(current_sp + 2 * kXRegSize); | 331 set_sp(current_sp + 2 * kXRegSize); |
| 332 return address; | 332 return address; |
| 333 } | 333 } |
| 334 | 334 |
| 335 | 335 |
| 336 // Returns the limit of the stack area to enable checking for stack overflows. | 336 // Returns the limit of the stack area to enable checking for stack overflows. |
| 337 uintptr_t Simulator::StackLimit() const { | 337 uintptr_t Simulator::StackLimit() const { |
| 338 // Leave a safety margin of 1024 bytes to prevent overrunning the stack when | 338 // Leave a safety margin of 1024 bytes to prevent overrunning the stack when |
| 339 // pushing values. | 339 // pushing values. |
| 340 return reinterpret_cast<uintptr_t>(stack_limit_) + 1024; | 340 return reinterpret_cast<uintptr_t>(stack_limit_) + 1024; |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 474 T external_function() { return reinterpret_cast<T>(external_function_); } | 474 T external_function() { return reinterpret_cast<T>(external_function_); } |
| 475 | 475 |
| 476 ExternalReference::Type type() { return type_; } | 476 ExternalReference::Type type() { return type_; } |
| 477 | 477 |
| 478 static Redirection* Get(void* external_function, | 478 static Redirection* Get(void* external_function, |
| 479 ExternalReference::Type type) { | 479 ExternalReference::Type type) { |
| 480 Isolate* isolate = Isolate::Current(); | 480 Isolate* isolate = Isolate::Current(); |
| 481 Redirection* current = isolate->simulator_redirection(); | 481 Redirection* current = isolate->simulator_redirection(); |
| 482 for (; current != NULL; current = current->next_) { | 482 for (; current != NULL; current = current->next_) { |
| 483 if (current->external_function_ == external_function) { | 483 if (current->external_function_ == external_function) { |
| 484 ASSERT_EQ(current->type(), type); | 484 DCHECK_EQ(current->type(), type); |
| 485 return current; | 485 return current; |
| 486 } | 486 } |
| 487 } | 487 } |
| 488 return new Redirection(external_function, type); | 488 return new Redirection(external_function, type); |
| 489 } | 489 } |
| 490 | 490 |
| 491 static Redirection* FromHltInstruction(Instruction* redirect_call) { | 491 static Redirection* FromHltInstruction(Instruction* redirect_call) { |
| 492 char* addr_of_hlt = reinterpret_cast<char*>(redirect_call); | 492 char* addr_of_hlt = reinterpret_cast<char*>(redirect_call); |
| 493 char* addr_of_redirection = | 493 char* addr_of_redirection = |
| 494 addr_of_hlt - OFFSET_OF(Redirection, redirect_call_); | 494 addr_of_hlt - OFFSET_OF(Redirection, redirect_call_); |
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 758 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; | 758 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; |
| 759 | 759 |
| 760 const char* Simulator::vreg_names[] = { | 760 const char* Simulator::vreg_names[] = { |
| 761 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", | 761 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", |
| 762 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", | 762 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", |
| 763 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", | 763 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", |
| 764 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; | 764 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"}; |
| 765 | 765 |
| 766 | 766 |
| 767 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { | 767 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { |
| 768 ASSERT(code < kNumberOfRegisters); | 768 DCHECK(code < kNumberOfRegisters); |
| 769 // If the code represents the stack pointer, index the name after zr. | 769 // If the code represents the stack pointer, index the name after zr. |
| 770 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { | 770 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { |
| 771 code = kZeroRegCode + 1; | 771 code = kZeroRegCode + 1; |
| 772 } | 772 } |
| 773 return wreg_names[code]; | 773 return wreg_names[code]; |
| 774 } | 774 } |
| 775 | 775 |
| 776 | 776 |
| 777 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { | 777 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { |
| 778 ASSERT(code < kNumberOfRegisters); | 778 DCHECK(code < kNumberOfRegisters); |
| 779 // If the code represents the stack pointer, index the name after zr. | 779 // If the code represents the stack pointer, index the name after zr. |
| 780 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { | 780 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { |
| 781 code = kZeroRegCode + 1; | 781 code = kZeroRegCode + 1; |
| 782 } | 782 } |
| 783 return xreg_names[code]; | 783 return xreg_names[code]; |
| 784 } | 784 } |
| 785 | 785 |
| 786 | 786 |
| 787 const char* Simulator::SRegNameForCode(unsigned code) { | 787 const char* Simulator::SRegNameForCode(unsigned code) { |
| 788 ASSERT(code < kNumberOfFPRegisters); | 788 DCHECK(code < kNumberOfFPRegisters); |
| 789 return sreg_names[code]; | 789 return sreg_names[code]; |
| 790 } | 790 } |
| 791 | 791 |
| 792 | 792 |
| 793 const char* Simulator::DRegNameForCode(unsigned code) { | 793 const char* Simulator::DRegNameForCode(unsigned code) { |
| 794 ASSERT(code < kNumberOfFPRegisters); | 794 DCHECK(code < kNumberOfFPRegisters); |
| 795 return dreg_names[code]; | 795 return dreg_names[code]; |
| 796 } | 796 } |
| 797 | 797 |
| 798 | 798 |
| 799 const char* Simulator::VRegNameForCode(unsigned code) { | 799 const char* Simulator::VRegNameForCode(unsigned code) { |
| 800 ASSERT(code < kNumberOfFPRegisters); | 800 DCHECK(code < kNumberOfFPRegisters); |
| 801 return vreg_names[code]; | 801 return vreg_names[code]; |
| 802 } | 802 } |
| 803 | 803 |
| 804 | 804 |
| 805 int Simulator::CodeFromName(const char* name) { | 805 int Simulator::CodeFromName(const char* name) { |
| 806 for (unsigned i = 0; i < kNumberOfRegisters; i++) { | 806 for (unsigned i = 0; i < kNumberOfRegisters; i++) { |
| 807 if ((strcmp(xreg_names[i], name) == 0) || | 807 if ((strcmp(xreg_names[i], name) == 0) || |
| 808 (strcmp(wreg_names[i], name) == 0)) { | 808 (strcmp(wreg_names[i], name) == 0)) { |
| 809 return i; | 809 return i; |
| 810 } | 810 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 823 } | 823 } |
| 824 | 824 |
| 825 | 825 |
| 826 // Helpers --------------------------------------------------------------------- | 826 // Helpers --------------------------------------------------------------------- |
| 827 template <typename T> | 827 template <typename T> |
| 828 T Simulator::AddWithCarry(bool set_flags, | 828 T Simulator::AddWithCarry(bool set_flags, |
| 829 T src1, | 829 T src1, |
| 830 T src2, | 830 T src2, |
| 831 T carry_in) { | 831 T carry_in) { |
| 832 typedef typename make_unsigned<T>::type unsignedT; | 832 typedef typename make_unsigned<T>::type unsignedT; |
| 833 ASSERT((carry_in == 0) || (carry_in == 1)); | 833 DCHECK((carry_in == 0) || (carry_in == 1)); |
| 834 | 834 |
| 835 T signed_sum = src1 + src2 + carry_in; | 835 T signed_sum = src1 + src2 + carry_in; |
| 836 T result = signed_sum; | 836 T result = signed_sum; |
| 837 | 837 |
| 838 bool N, Z, C, V; | 838 bool N, Z, C, V; |
| 839 | 839 |
| 840 // Compute the C flag | 840 // Compute the C flag |
| 841 unsignedT u1 = static_cast<unsignedT>(src1); | 841 unsignedT u1 = static_cast<unsignedT>(src1); |
| 842 unsignedT u2 = static_cast<unsignedT>(src2); | 842 unsignedT u2 = static_cast<unsignedT>(src2); |
| 843 unsignedT urest = std::numeric_limits<unsignedT>::max() - u1; | 843 unsignedT urest = std::numeric_limits<unsignedT>::max() - u1; |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1058 last_nzcv = nzcv(); | 1058 last_nzcv = nzcv(); |
| 1059 | 1059 |
| 1060 static SimSystemRegister last_fpcr; | 1060 static SimSystemRegister last_fpcr; |
| 1061 if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) { | 1061 if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) { |
| 1062 static const char * rmode[] = { | 1062 static const char * rmode[] = { |
| 1063 "0b00 (Round to Nearest)", | 1063 "0b00 (Round to Nearest)", |
| 1064 "0b01 (Round towards Plus Infinity)", | 1064 "0b01 (Round towards Plus Infinity)", |
| 1065 "0b10 (Round towards Minus Infinity)", | 1065 "0b10 (Round towards Minus Infinity)", |
| 1066 "0b11 (Round towards Zero)" | 1066 "0b11 (Round towards Zero)" |
| 1067 }; | 1067 }; |
| 1068 ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0]))); | 1068 DCHECK(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0]))); |
| 1069 fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", | 1069 fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", |
| 1070 clr_flag_name, | 1070 clr_flag_name, |
| 1071 clr_flag_value, | 1071 clr_flag_value, |
| 1072 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()], | 1072 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()], |
| 1073 clr_normal); | 1073 clr_normal); |
| 1074 } | 1074 } |
| 1075 last_fpcr = fpcr(); | 1075 last_fpcr = fpcr(); |
| 1076 | 1076 |
| 1077 first_run = false; | 1077 first_run = false; |
| 1078 } | 1078 } |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1198 case B: | 1198 case B: |
| 1199 set_pc(instr->ImmPCOffsetTarget()); | 1199 set_pc(instr->ImmPCOffsetTarget()); |
| 1200 break; | 1200 break; |
| 1201 default: | 1201 default: |
| 1202 UNREACHABLE(); | 1202 UNREACHABLE(); |
| 1203 } | 1203 } |
| 1204 } | 1204 } |
| 1205 | 1205 |
| 1206 | 1206 |
| 1207 void Simulator::VisitConditionalBranch(Instruction* instr) { | 1207 void Simulator::VisitConditionalBranch(Instruction* instr) { |
| 1208 ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); | 1208 DCHECK(instr->Mask(ConditionalBranchMask) == B_cond); |
| 1209 if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) { | 1209 if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) { |
| 1210 set_pc(instr->ImmPCOffsetTarget()); | 1210 set_pc(instr->ImmPCOffsetTarget()); |
| 1211 } | 1211 } |
| 1212 } | 1212 } |
| 1213 | 1213 |
| 1214 | 1214 |
| 1215 void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) { | 1215 void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) { |
| 1216 Instruction* target = reg<Instruction*>(instr->Rn()); | 1216 Instruction* target = reg<Instruction*>(instr->Rn()); |
| 1217 switch (instr->Mask(UnconditionalBranchToRegisterMask)) { | 1217 switch (instr->Mask(UnconditionalBranchToRegisterMask)) { |
| 1218 case BLR: { | 1218 case BLR: { |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1411 template<typename T> | 1411 template<typename T> |
| 1412 void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) { | 1412 void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) { |
| 1413 T op1 = reg<T>(instr->Rn()); | 1413 T op1 = reg<T>(instr->Rn()); |
| 1414 | 1414 |
| 1415 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { | 1415 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { |
| 1416 // If the condition passes, set the status flags to the result of comparing | 1416 // If the condition passes, set the status flags to the result of comparing |
| 1417 // the operands. | 1417 // the operands. |
| 1418 if (instr->Mask(ConditionalCompareMask) == CCMP) { | 1418 if (instr->Mask(ConditionalCompareMask) == CCMP) { |
| 1419 AddWithCarry<T>(true, op1, ~op2, 1); | 1419 AddWithCarry<T>(true, op1, ~op2, 1); |
| 1420 } else { | 1420 } else { |
| 1421 ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); | 1421 DCHECK(instr->Mask(ConditionalCompareMask) == CCMN); |
| 1422 AddWithCarry<T>(true, op1, op2, 0); | 1422 AddWithCarry<T>(true, op1, op2, 0); |
| 1423 } | 1423 } |
| 1424 } else { | 1424 } else { |
| 1425 // If the condition fails, set the status flags to the nzcv immediate. | 1425 // If the condition fails, set the status flags to the nzcv immediate. |
| 1426 nzcv().SetFlags(instr->Nzcv()); | 1426 nzcv().SetFlags(instr->Nzcv()); |
| 1427 } | 1427 } |
| 1428 } | 1428 } |
| 1429 | 1429 |
| 1430 | 1430 |
| 1431 void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) { | 1431 void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1444 } | 1444 } |
| 1445 | 1445 |
| 1446 | 1446 |
| 1447 void Simulator::VisitLoadStorePostIndex(Instruction* instr) { | 1447 void Simulator::VisitLoadStorePostIndex(Instruction* instr) { |
| 1448 LoadStoreHelper(instr, instr->ImmLS(), PostIndex); | 1448 LoadStoreHelper(instr, instr->ImmLS(), PostIndex); |
| 1449 } | 1449 } |
| 1450 | 1450 |
| 1451 | 1451 |
| 1452 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) { | 1452 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) { |
| 1453 Extend ext = static_cast<Extend>(instr->ExtendMode()); | 1453 Extend ext = static_cast<Extend>(instr->ExtendMode()); |
| 1454 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); | 1454 DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); |
| 1455 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); | 1455 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); |
| 1456 | 1456 |
| 1457 int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount); | 1457 int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount); |
| 1458 LoadStoreHelper(instr, offset, Offset); | 1458 LoadStoreHelper(instr, offset, Offset); |
| 1459 } | 1459 } |
| 1460 | 1460 |
| 1461 | 1461 |
| 1462 void Simulator::LoadStoreHelper(Instruction* instr, | 1462 void Simulator::LoadStoreHelper(Instruction* instr, |
| 1463 int64_t offset, | 1463 int64_t offset, |
| 1464 AddrMode addrmode) { | 1464 AddrMode addrmode) { |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1579 | 1579 |
| 1580 // For store the address post writeback is used to check access below the | 1580 // For store the address post writeback is used to check access below the |
| 1581 // stack. | 1581 // stack. |
| 1582 stack = reinterpret_cast<uint8_t*>(sp()); | 1582 stack = reinterpret_cast<uint8_t*>(sp()); |
| 1583 } | 1583 } |
| 1584 | 1584 |
| 1585 LoadStorePairOp op = | 1585 LoadStorePairOp op = |
| 1586 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); | 1586 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); |
| 1587 | 1587 |
| 1588 // 'rt' and 'rt2' can only be aliased for stores. | 1588 // 'rt' and 'rt2' can only be aliased for stores. |
| 1589 ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); | 1589 DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2)); |
| 1590 | 1590 |
| 1591 switch (op) { | 1591 switch (op) { |
| 1592 case LDP_w: { | 1592 case LDP_w: { |
| 1593 set_wreg(rt, MemoryRead32(address)); | 1593 set_wreg(rt, MemoryRead32(address)); |
| 1594 set_wreg(rt2, MemoryRead32(address + kWRegSize)); | 1594 set_wreg(rt2, MemoryRead32(address + kWRegSize)); |
| 1595 break; | 1595 break; |
| 1596 } | 1596 } |
| 1597 case LDP_s: { | 1597 case LDP_s: { |
| 1598 set_sreg(rt, MemoryReadFP32(address)); | 1598 set_sreg(rt, MemoryReadFP32(address)); |
| 1599 set_sreg(rt2, MemoryReadFP32(address + kSRegSize)); | 1599 set_sreg(rt2, MemoryReadFP32(address + kSRegSize)); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1687 } | 1687 } |
| 1688 | 1688 |
| 1689 return reinterpret_cast<uint8_t*>(address); | 1689 return reinterpret_cast<uint8_t*>(address); |
| 1690 } | 1690 } |
| 1691 | 1691 |
| 1692 | 1692 |
| 1693 void Simulator::LoadStoreWriteBack(unsigned addr_reg, | 1693 void Simulator::LoadStoreWriteBack(unsigned addr_reg, |
| 1694 int64_t offset, | 1694 int64_t offset, |
| 1695 AddrMode addrmode) { | 1695 AddrMode addrmode) { |
| 1696 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { | 1696 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { |
| 1697 ASSERT(offset != 0); | 1697 DCHECK(offset != 0); |
| 1698 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); | 1698 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); |
| 1699 set_reg(addr_reg, address + offset, Reg31IsStackPointer); | 1699 set_reg(addr_reg, address + offset, Reg31IsStackPointer); |
| 1700 } | 1700 } |
| 1701 } | 1701 } |
| 1702 | 1702 |
| 1703 | 1703 |
| 1704 void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) { | 1704 void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) { |
| 1705 if ((address >= stack_limit_) && (address < stack)) { | 1705 if ((address >= stack_limit_) && (address < stack)) { |
| 1706 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); | 1706 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); |
| 1707 fprintf(stream_, " sp is here: 0x%16p\n", stack); | 1707 fprintf(stream_, " sp is here: 0x%16p\n", stack); |
| 1708 fprintf(stream_, " access was here: 0x%16p\n", address); | 1708 fprintf(stream_, " access was here: 0x%16p\n", address); |
| 1709 fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_); | 1709 fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_); |
| 1710 fprintf(stream_, "\n"); | 1710 fprintf(stream_, "\n"); |
| 1711 FATAL("ACCESS BELOW STACK POINTER"); | 1711 FATAL("ACCESS BELOW STACK POINTER"); |
| 1712 } | 1712 } |
| 1713 } | 1713 } |
| 1714 | 1714 |
| 1715 | 1715 |
| 1716 uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) { | 1716 uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) { |
| 1717 ASSERT(address != NULL); | 1717 DCHECK(address != NULL); |
| 1718 ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); | 1718 DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); |
| 1719 uint64_t read = 0; | 1719 uint64_t read = 0; |
| 1720 memcpy(&read, address, num_bytes); | 1720 memcpy(&read, address, num_bytes); |
| 1721 return read; | 1721 return read; |
| 1722 } | 1722 } |
| 1723 | 1723 |
| 1724 | 1724 |
| 1725 uint8_t Simulator::MemoryRead8(uint8_t* address) { | 1725 uint8_t Simulator::MemoryRead8(uint8_t* address) { |
| 1726 return MemoryRead(address, sizeof(uint8_t)); | 1726 return MemoryRead(address, sizeof(uint8_t)); |
| 1727 } | 1727 } |
| 1728 | 1728 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1748 | 1748 |
| 1749 | 1749 |
| 1750 double Simulator::MemoryReadFP64(uint8_t* address) { | 1750 double Simulator::MemoryReadFP64(uint8_t* address) { |
| 1751 return rawbits_to_double(MemoryRead64(address)); | 1751 return rawbits_to_double(MemoryRead64(address)); |
| 1752 } | 1752 } |
| 1753 | 1753 |
| 1754 | 1754 |
| 1755 void Simulator::MemoryWrite(uint8_t* address, | 1755 void Simulator::MemoryWrite(uint8_t* address, |
| 1756 uint64_t value, | 1756 uint64_t value, |
| 1757 unsigned num_bytes) { | 1757 unsigned num_bytes) { |
| 1758 ASSERT(address != NULL); | 1758 DCHECK(address != NULL); |
| 1759 ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); | 1759 DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t))); |
| 1760 | 1760 |
| 1761 LogWrite(address, value, num_bytes); | 1761 LogWrite(address, value, num_bytes); |
| 1762 memcpy(address, &value, num_bytes); | 1762 memcpy(address, &value, num_bytes); |
| 1763 } | 1763 } |
| 1764 | 1764 |
| 1765 | 1765 |
| 1766 void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) { | 1766 void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) { |
| 1767 MemoryWrite(address, value, sizeof(uint32_t)); | 1767 MemoryWrite(address, value, sizeof(uint32_t)); |
| 1768 } | 1768 } |
| 1769 | 1769 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1783 } | 1783 } |
| 1784 | 1784 |
| 1785 | 1785 |
| 1786 void Simulator::VisitMoveWideImmediate(Instruction* instr) { | 1786 void Simulator::VisitMoveWideImmediate(Instruction* instr) { |
| 1787 MoveWideImmediateOp mov_op = | 1787 MoveWideImmediateOp mov_op = |
| 1788 static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask)); | 1788 static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask)); |
| 1789 int64_t new_xn_val = 0; | 1789 int64_t new_xn_val = 0; |
| 1790 | 1790 |
| 1791 bool is_64_bits = instr->SixtyFourBits() == 1; | 1791 bool is_64_bits = instr->SixtyFourBits() == 1; |
| 1792 // Shift is limited for W operations. | 1792 // Shift is limited for W operations. |
| 1793 ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2)); | 1793 DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2)); |
| 1794 | 1794 |
| 1795 // Get the shifted immediate. | 1795 // Get the shifted immediate. |
| 1796 int64_t shift = instr->ShiftMoveWide() * 16; | 1796 int64_t shift = instr->ShiftMoveWide() * 16; |
| 1797 int64_t shifted_imm16 = instr->ImmMoveWide() << shift; | 1797 int64_t shifted_imm16 = instr->ImmMoveWide() << shift; |
| 1798 | 1798 |
| 1799 // Compute the new value. | 1799 // Compute the new value. |
| 1800 switch (mov_op) { | 1800 switch (mov_op) { |
| 1801 case MOVN_w: | 1801 case MOVN_w: |
| 1802 case MOVN_x: { | 1802 case MOVN_x: { |
| 1803 new_xn_val = ~shifted_imm16; | 1803 new_xn_val = ~shifted_imm16; |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1873 case CLS_x: { | 1873 case CLS_x: { |
| 1874 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits)); | 1874 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits)); |
| 1875 break; | 1875 break; |
| 1876 } | 1876 } |
| 1877 default: UNIMPLEMENTED(); | 1877 default: UNIMPLEMENTED(); |
| 1878 } | 1878 } |
| 1879 } | 1879 } |
| 1880 | 1880 |
| 1881 | 1881 |
| 1882 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) { | 1882 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) { |
| 1883 ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits)); | 1883 DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits)); |
| 1884 uint64_t result = 0; | 1884 uint64_t result = 0; |
| 1885 for (unsigned i = 0; i < num_bits; i++) { | 1885 for (unsigned i = 0; i < num_bits; i++) { |
| 1886 result = (result << 1) | (value & 1); | 1886 result = (result << 1) | (value & 1); |
| 1887 value >>= 1; | 1887 value >>= 1; |
| 1888 } | 1888 } |
| 1889 return result; | 1889 return result; |
| 1890 } | 1890 } |
| 1891 | 1891 |
| 1892 | 1892 |
| 1893 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) { | 1893 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) { |
| 1894 // Split the 64-bit value into an 8-bit array, where b[0] is the least | 1894 // Split the 64-bit value into an 8-bit array, where b[0] is the least |
| 1895 // significant byte, and b[7] is the most significant. | 1895 // significant byte, and b[7] is the most significant. |
| 1896 uint8_t bytes[8]; | 1896 uint8_t bytes[8]; |
| 1897 uint64_t mask = 0xff00000000000000UL; | 1897 uint64_t mask = 0xff00000000000000UL; |
| 1898 for (int i = 7; i >= 0; i--) { | 1898 for (int i = 7; i >= 0; i--) { |
| 1899 bytes[i] = (value & mask) >> (i * 8); | 1899 bytes[i] = (value & mask) >> (i * 8); |
| 1900 mask >>= 8; | 1900 mask >>= 8; |
| 1901 } | 1901 } |
| 1902 | 1902 |
| 1903 // Permutation tables for REV instructions. | 1903 // Permutation tables for REV instructions. |
| 1904 // permute_table[Reverse16] is used by REV16_x, REV16_w | 1904 // permute_table[Reverse16] is used by REV16_x, REV16_w |
| 1905 // permute_table[Reverse32] is used by REV32_x, REV_w | 1905 // permute_table[Reverse32] is used by REV32_x, REV_w |
| 1906 // permute_table[Reverse64] is used by REV_x | 1906 // permute_table[Reverse64] is used by REV_x |
| 1907 ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2)); | 1907 DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2)); |
| 1908 static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1}, | 1908 static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1}, |
| 1909 {4, 5, 6, 7, 0, 1, 2, 3}, | 1909 {4, 5, 6, 7, 0, 1, 2, 3}, |
| 1910 {0, 1, 2, 3, 4, 5, 6, 7} }; | 1910 {0, 1, 2, 3, 4, 5, 6, 7} }; |
| 1911 uint64_t result = 0; | 1911 uint64_t result = 0; |
| 1912 for (int i = 0; i < 8; i++) { | 1912 for (int i = 0; i < 8; i++) { |
| 1913 result <<= 8; | 1913 result <<= 8; |
| 1914 result |= bytes[permute_table[mode][i]]; | 1914 result |= bytes[permute_table[mode][i]]; |
| 1915 } | 1915 } |
| 1916 return result; | 1916 return result; |
| 1917 } | 1917 } |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2020 break; | 2020 break; |
| 2021 case MSUB_w: | 2021 case MSUB_w: |
| 2022 case MSUB_x: | 2022 case MSUB_x: |
| 2023 result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm())); | 2023 result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm())); |
| 2024 break; | 2024 break; |
| 2025 case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break; | 2025 case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break; |
| 2026 case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break; | 2026 case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break; |
| 2027 case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break; | 2027 case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break; |
| 2028 case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break; | 2028 case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break; |
| 2029 case SMULH_x: | 2029 case SMULH_x: |
| 2030 ASSERT(instr->Ra() == kZeroRegCode); | 2030 DCHECK(instr->Ra() == kZeroRegCode); |
| 2031 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm())); | 2031 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm())); |
| 2032 break; | 2032 break; |
| 2033 default: UNIMPLEMENTED(); | 2033 default: UNIMPLEMENTED(); |
| 2034 } | 2034 } |
| 2035 | 2035 |
| 2036 if (instr->SixtyFourBits()) { | 2036 if (instr->SixtyFourBits()) { |
| 2037 set_xreg(instr->Rd(), result); | 2037 set_xreg(instr->Rd(), result); |
| 2038 } else { | 2038 } else { |
| 2039 set_wreg(instr->Rd(), result); | 2039 set_wreg(instr->Rd(), result); |
| 2040 } | 2040 } |
| (...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2400 // The input value is assumed to be a normalized value. That is, the input may | 2400 // The input value is assumed to be a normalized value. That is, the input may |
| 2401 // not be infinity or NaN. If the source value is subnormal, it must be | 2401 // not be infinity or NaN. If the source value is subnormal, it must be |
| 2402 // normalized before calling this function such that the highest set bit in the | 2402 // normalized before calling this function such that the highest set bit in the |
| 2403 // mantissa has the value 'pow(2, exponent)'. | 2403 // mantissa has the value 'pow(2, exponent)'. |
| 2404 // | 2404 // |
| 2405 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than | 2405 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than |
| 2406 // calling a templated FPRound. | 2406 // calling a templated FPRound. |
| 2407 template <class T, int ebits, int mbits> | 2407 template <class T, int ebits, int mbits> |
| 2408 static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, | 2408 static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, |
| 2409 FPRounding round_mode) { | 2409 FPRounding round_mode) { |
| 2410 ASSERT((sign == 0) || (sign == 1)); | 2410 DCHECK((sign == 0) || (sign == 1)); |
| 2411 | 2411 |
| 2412 // Only the FPTieEven rounding mode is implemented. | 2412 // Only the FPTieEven rounding mode is implemented. |
| 2413 ASSERT(round_mode == FPTieEven); | 2413 DCHECK(round_mode == FPTieEven); |
| 2414 USE(round_mode); | 2414 USE(round_mode); |
| 2415 | 2415 |
| 2416 // Rounding can promote subnormals to normals, and normals to infinities. For | 2416 // Rounding can promote subnormals to normals, and normals to infinities. For |
| 2417 // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be | 2417 // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be |
| 2418 // encodable as a float, but rounding based on the low-order mantissa bits | 2418 // encodable as a float, but rounding based on the low-order mantissa bits |
| 2419 // could make it overflow. With ties-to-even rounding, this value would become | 2419 // could make it overflow. With ties-to-even rounding, this value would become |
| 2420 // an infinity. | 2420 // an infinity. |
| 2421 | 2421 |
| 2422 // ---- Rounding Method ---- | 2422 // ---- Rounding Method ---- |
| 2423 // | 2423 // |
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2720 } | 2720 } |
| 2721 } | 2721 } |
| 2722 | 2722 |
| 2723 UNREACHABLE(); | 2723 UNREACHABLE(); |
| 2724 return static_cast<double>(value); | 2724 return static_cast<double>(value); |
| 2725 } | 2725 } |
| 2726 | 2726 |
| 2727 | 2727 |
| 2728 float Simulator::FPToFloat(double value, FPRounding round_mode) { | 2728 float Simulator::FPToFloat(double value, FPRounding round_mode) { |
| 2729 // Only the FPTieEven rounding mode is implemented. | 2729 // Only the FPTieEven rounding mode is implemented. |
| 2730 ASSERT(round_mode == FPTieEven); | 2730 DCHECK(round_mode == FPTieEven); |
| 2731 USE(round_mode); | 2731 USE(round_mode); |
| 2732 | 2732 |
| 2733 switch (std::fpclassify(value)) { | 2733 switch (std::fpclassify(value)) { |
| 2734 case FP_NAN: { | 2734 case FP_NAN: { |
| 2735 if (fpcr().DN()) return kFP32DefaultNaN; | 2735 if (fpcr().DN()) return kFP32DefaultNaN; |
| 2736 | 2736 |
| 2737 // Convert NaNs as the processor would: | 2737 // Convert NaNs as the processor would: |
| 2738 // - The sign is propagated. | 2738 // - The sign is propagated. |
| 2739 // - The payload (mantissa) is transferred as much as possible, except | 2739 // - The payload (mantissa) is transferred as much as possible, except |
| 2740 // that the top bit is forced to '1', making the result a quiet NaN. | 2740 // that the top bit is forced to '1', making the result a quiet NaN. |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2849 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); | 2849 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); |
| 2850 break; | 2850 break; |
| 2851 default: UNIMPLEMENTED(); | 2851 default: UNIMPLEMENTED(); |
| 2852 } | 2852 } |
| 2853 } | 2853 } |
| 2854 | 2854 |
| 2855 | 2855 |
| 2856 template <typename T> | 2856 template <typename T> |
| 2857 T Simulator::FPAdd(T op1, T op2) { | 2857 T Simulator::FPAdd(T op1, T op2) { |
| 2858 // NaNs should be handled elsewhere. | 2858 // NaNs should be handled elsewhere. |
| 2859 ASSERT(!std::isnan(op1) && !std::isnan(op2)); | 2859 DCHECK(!std::isnan(op1) && !std::isnan(op2)); |
| 2860 | 2860 |
| 2861 if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { | 2861 if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { |
| 2862 // inf + -inf returns the default NaN. | 2862 // inf + -inf returns the default NaN. |
| 2863 return FPDefaultNaN<T>(); | 2863 return FPDefaultNaN<T>(); |
| 2864 } else { | 2864 } else { |
| 2865 // Other cases should be handled by standard arithmetic. | 2865 // Other cases should be handled by standard arithmetic. |
| 2866 return op1 + op2; | 2866 return op1 + op2; |
| 2867 } | 2867 } |
| 2868 } | 2868 } |
| 2869 | 2869 |
| 2870 | 2870 |
| 2871 template <typename T> | 2871 template <typename T> |
| 2872 T Simulator::FPDiv(T op1, T op2) { | 2872 T Simulator::FPDiv(T op1, T op2) { |
| 2873 // NaNs should be handled elsewhere. | 2873 // NaNs should be handled elsewhere. |
| 2874 ASSERT(!std::isnan(op1) && !std::isnan(op2)); | 2874 DCHECK(!std::isnan(op1) && !std::isnan(op2)); |
| 2875 | 2875 |
| 2876 if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { | 2876 if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { |
| 2877 // inf / inf and 0.0 / 0.0 return the default NaN. | 2877 // inf / inf and 0.0 / 0.0 return the default NaN. |
| 2878 return FPDefaultNaN<T>(); | 2878 return FPDefaultNaN<T>(); |
| 2879 } else { | 2879 } else { |
| 2880 // Other cases should be handled by standard arithmetic. | 2880 // Other cases should be handled by standard arithmetic. |
| 2881 return op1 / op2; | 2881 return op1 / op2; |
| 2882 } | 2882 } |
| 2883 } | 2883 } |
| 2884 | 2884 |
| 2885 | 2885 |
| 2886 template <typename T> | 2886 template <typename T> |
| 2887 T Simulator::FPMax(T a, T b) { | 2887 T Simulator::FPMax(T a, T b) { |
| 2888 // NaNs should be handled elsewhere. | 2888 // NaNs should be handled elsewhere. |
| 2889 ASSERT(!std::isnan(a) && !std::isnan(b)); | 2889 DCHECK(!std::isnan(a) && !std::isnan(b)); |
| 2890 | 2890 |
| 2891 if ((a == 0.0) && (b == 0.0) && | 2891 if ((a == 0.0) && (b == 0.0) && |
| 2892 (copysign(1.0, a) != copysign(1.0, b))) { | 2892 (copysign(1.0, a) != copysign(1.0, b))) { |
| 2893 // a and b are zero, and the sign differs: return +0.0. | 2893 // a and b are zero, and the sign differs: return +0.0. |
| 2894 return 0.0; | 2894 return 0.0; |
| 2895 } else { | 2895 } else { |
| 2896 return (a > b) ? a : b; | 2896 return (a > b) ? a : b; |
| 2897 } | 2897 } |
| 2898 } | 2898 } |
| 2899 | 2899 |
| 2900 | 2900 |
| 2901 template <typename T> | 2901 template <typename T> |
| 2902 T Simulator::FPMaxNM(T a, T b) { | 2902 T Simulator::FPMaxNM(T a, T b) { |
| 2903 if (IsQuietNaN(a) && !IsQuietNaN(b)) { | 2903 if (IsQuietNaN(a) && !IsQuietNaN(b)) { |
| 2904 a = kFP64NegativeInfinity; | 2904 a = kFP64NegativeInfinity; |
| 2905 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { | 2905 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { |
| 2906 b = kFP64NegativeInfinity; | 2906 b = kFP64NegativeInfinity; |
| 2907 } | 2907 } |
| 2908 | 2908 |
| 2909 T result = FPProcessNaNs(a, b); | 2909 T result = FPProcessNaNs(a, b); |
| 2910 return std::isnan(result) ? result : FPMax(a, b); | 2910 return std::isnan(result) ? result : FPMax(a, b); |
| 2911 } | 2911 } |
| 2912 | 2912 |
| 2913 template <typename T> | 2913 template <typename T> |
| 2914 T Simulator::FPMin(T a, T b) { | 2914 T Simulator::FPMin(T a, T b) { |
| 2915 // NaNs should be handled elsewhere. | 2915 // NaNs should be handled elsewhere. |
| 2916 ASSERT(!std::isnan(a) && !std::isnan(b)); | 2916 DCHECK(!std::isnan(a) && !std::isnan(b)); |
| 2917 | 2917 |
| 2918 if ((a == 0.0) && (b == 0.0) && | 2918 if ((a == 0.0) && (b == 0.0) && |
| 2919 (copysign(1.0, a) != copysign(1.0, b))) { | 2919 (copysign(1.0, a) != copysign(1.0, b))) { |
| 2920 // a and b are zero, and the sign differs: return -0.0. | 2920 // a and b are zero, and the sign differs: return -0.0. |
| 2921 return -0.0; | 2921 return -0.0; |
| 2922 } else { | 2922 } else { |
| 2923 return (a < b) ? a : b; | 2923 return (a < b) ? a : b; |
| 2924 } | 2924 } |
| 2925 } | 2925 } |
| 2926 | 2926 |
| 2927 | 2927 |
| 2928 template <typename T> | 2928 template <typename T> |
| 2929 T Simulator::FPMinNM(T a, T b) { | 2929 T Simulator::FPMinNM(T a, T b) { |
| 2930 if (IsQuietNaN(a) && !IsQuietNaN(b)) { | 2930 if (IsQuietNaN(a) && !IsQuietNaN(b)) { |
| 2931 a = kFP64PositiveInfinity; | 2931 a = kFP64PositiveInfinity; |
| 2932 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { | 2932 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { |
| 2933 b = kFP64PositiveInfinity; | 2933 b = kFP64PositiveInfinity; |
| 2934 } | 2934 } |
| 2935 | 2935 |
| 2936 T result = FPProcessNaNs(a, b); | 2936 T result = FPProcessNaNs(a, b); |
| 2937 return std::isnan(result) ? result : FPMin(a, b); | 2937 return std::isnan(result) ? result : FPMin(a, b); |
| 2938 } | 2938 } |
| 2939 | 2939 |
| 2940 | 2940 |
| 2941 template <typename T> | 2941 template <typename T> |
| 2942 T Simulator::FPMul(T op1, T op2) { | 2942 T Simulator::FPMul(T op1, T op2) { |
| 2943 // NaNs should be handled elsewhere. | 2943 // NaNs should be handled elsewhere. |
| 2944 ASSERT(!std::isnan(op1) && !std::isnan(op2)); | 2944 DCHECK(!std::isnan(op1) && !std::isnan(op2)); |
| 2945 | 2945 |
| 2946 if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { | 2946 if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { |
| 2947 // inf * 0.0 returns the default NaN. | 2947 // inf * 0.0 returns the default NaN. |
| 2948 return FPDefaultNaN<T>(); | 2948 return FPDefaultNaN<T>(); |
| 2949 } else { | 2949 } else { |
| 2950 // Other cases should be handled by standard arithmetic. | 2950 // Other cases should be handled by standard arithmetic. |
| 2951 return op1 * op2; | 2951 return op1 * op2; |
| 2952 } | 2952 } |
| 2953 } | 2953 } |
| 2954 | 2954 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2979 return FPDefaultNaN<T>(); | 2979 return FPDefaultNaN<T>(); |
| 2980 } | 2980 } |
| 2981 | 2981 |
| 2982 // Work around broken fma implementations for exact zero results: The sign of | 2982 // Work around broken fma implementations for exact zero results: The sign of |
| 2983 // exact 0.0 results is positive unless both a and op1 * op2 are negative. | 2983 // exact 0.0 results is positive unless both a and op1 * op2 are negative. |
| 2984 if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { | 2984 if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { |
| 2985 return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; | 2985 return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; |
| 2986 } | 2986 } |
| 2987 | 2987 |
| 2988 result = FusedMultiplyAdd(op1, op2, a); | 2988 result = FusedMultiplyAdd(op1, op2, a); |
| 2989 ASSERT(!std::isnan(result)); | 2989 DCHECK(!std::isnan(result)); |
| 2990 | 2990 |
| 2991 // Work around broken fma implementations for rounded zero results: If a is | 2991 // Work around broken fma implementations for rounded zero results: If a is |
| 2992 // 0.0, the sign of the result is the sign of op1 * op2 before rounding. | 2992 // 0.0, the sign of the result is the sign of op1 * op2 before rounding. |
| 2993 if ((a == 0.0) && (result == 0.0)) { | 2993 if ((a == 0.0) && (result == 0.0)) { |
| 2994 return copysign(0.0, sign_prod); | 2994 return copysign(0.0, sign_prod); |
| 2995 } | 2995 } |
| 2996 | 2996 |
| 2997 return result; | 2997 return result; |
| 2998 } | 2998 } |
| 2999 | 2999 |
| 3000 | 3000 |
| 3001 template <typename T> | 3001 template <typename T> |
| 3002 T Simulator::FPSqrt(T op) { | 3002 T Simulator::FPSqrt(T op) { |
| 3003 if (std::isnan(op)) { | 3003 if (std::isnan(op)) { |
| 3004 return FPProcessNaN(op); | 3004 return FPProcessNaN(op); |
| 3005 } else if (op < 0.0) { | 3005 } else if (op < 0.0) { |
| 3006 return FPDefaultNaN<T>(); | 3006 return FPDefaultNaN<T>(); |
| 3007 } else { | 3007 } else { |
| 3008 return std::sqrt(op); | 3008 return std::sqrt(op); |
| 3009 } | 3009 } |
| 3010 } | 3010 } |
| 3011 | 3011 |
| 3012 | 3012 |
| 3013 template <typename T> | 3013 template <typename T> |
| 3014 T Simulator::FPSub(T op1, T op2) { | 3014 T Simulator::FPSub(T op1, T op2) { |
| 3015 // NaNs should be handled elsewhere. | 3015 // NaNs should be handled elsewhere. |
| 3016 ASSERT(!std::isnan(op1) && !std::isnan(op2)); | 3016 DCHECK(!std::isnan(op1) && !std::isnan(op2)); |
| 3017 | 3017 |
| 3018 if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { | 3018 if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { |
| 3019 // inf - inf returns the default NaN. | 3019 // inf - inf returns the default NaN. |
| 3020 return FPDefaultNaN<T>(); | 3020 return FPDefaultNaN<T>(); |
| 3021 } else { | 3021 } else { |
| 3022 // Other cases should be handled by standard arithmetic. | 3022 // Other cases should be handled by standard arithmetic. |
| 3023 return op1 - op2; | 3023 return op1 - op2; |
| 3024 } | 3024 } |
| 3025 } | 3025 } |
| 3026 | 3026 |
| 3027 | 3027 |
| 3028 template <typename T> | 3028 template <typename T> |
| 3029 T Simulator::FPProcessNaN(T op) { | 3029 T Simulator::FPProcessNaN(T op) { |
| 3030 ASSERT(std::isnan(op)); | 3030 DCHECK(std::isnan(op)); |
| 3031 return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op); | 3031 return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op); |
| 3032 } | 3032 } |
| 3033 | 3033 |
| 3034 | 3034 |
| 3035 template <typename T> | 3035 template <typename T> |
| 3036 T Simulator::FPProcessNaNs(T op1, T op2) { | 3036 T Simulator::FPProcessNaNs(T op1, T op2) { |
| 3037 if (IsSignallingNaN(op1)) { | 3037 if (IsSignallingNaN(op1)) { |
| 3038 return FPProcessNaN(op1); | 3038 return FPProcessNaN(op1); |
| 3039 } else if (IsSignallingNaN(op2)) { | 3039 } else if (IsSignallingNaN(op2)) { |
| 3040 return FPProcessNaN(op2); | 3040 return FPProcessNaN(op2); |
| 3041 } else if (std::isnan(op1)) { | 3041 } else if (std::isnan(op1)) { |
| 3042 ASSERT(IsQuietNaN(op1)); | 3042 DCHECK(IsQuietNaN(op1)); |
| 3043 return FPProcessNaN(op1); | 3043 return FPProcessNaN(op1); |
| 3044 } else if (std::isnan(op2)) { | 3044 } else if (std::isnan(op2)) { |
| 3045 ASSERT(IsQuietNaN(op2)); | 3045 DCHECK(IsQuietNaN(op2)); |
| 3046 return FPProcessNaN(op2); | 3046 return FPProcessNaN(op2); |
| 3047 } else { | 3047 } else { |
| 3048 return 0.0; | 3048 return 0.0; |
| 3049 } | 3049 } |
| 3050 } | 3050 } |
| 3051 | 3051 |
| 3052 | 3052 |
| 3053 template <typename T> | 3053 template <typename T> |
| 3054 T Simulator::FPProcessNaNs3(T op1, T op2, T op3) { | 3054 T Simulator::FPProcessNaNs3(T op1, T op2, T op3) { |
| 3055 if (IsSignallingNaN(op1)) { | 3055 if (IsSignallingNaN(op1)) { |
| 3056 return FPProcessNaN(op1); | 3056 return FPProcessNaN(op1); |
| 3057 } else if (IsSignallingNaN(op2)) { | 3057 } else if (IsSignallingNaN(op2)) { |
| 3058 return FPProcessNaN(op2); | 3058 return FPProcessNaN(op2); |
| 3059 } else if (IsSignallingNaN(op3)) { | 3059 } else if (IsSignallingNaN(op3)) { |
| 3060 return FPProcessNaN(op3); | 3060 return FPProcessNaN(op3); |
| 3061 } else if (std::isnan(op1)) { | 3061 } else if (std::isnan(op1)) { |
| 3062 ASSERT(IsQuietNaN(op1)); | 3062 DCHECK(IsQuietNaN(op1)); |
| 3063 return FPProcessNaN(op1); | 3063 return FPProcessNaN(op1); |
| 3064 } else if (std::isnan(op2)) { | 3064 } else if (std::isnan(op2)) { |
| 3065 ASSERT(IsQuietNaN(op2)); | 3065 DCHECK(IsQuietNaN(op2)); |
| 3066 return FPProcessNaN(op2); | 3066 return FPProcessNaN(op2); |
| 3067 } else if (std::isnan(op3)) { | 3067 } else if (std::isnan(op3)) { |
| 3068 ASSERT(IsQuietNaN(op3)); | 3068 DCHECK(IsQuietNaN(op3)); |
| 3069 return FPProcessNaN(op3); | 3069 return FPProcessNaN(op3); |
| 3070 } else { | 3070 } else { |
| 3071 return 0.0; | 3071 return 0.0; |
| 3072 } | 3072 } |
| 3073 } | 3073 } |
| 3074 | 3074 |
| 3075 | 3075 |
| 3076 bool Simulator::FPProcessNaNs(Instruction* instr) { | 3076 bool Simulator::FPProcessNaNs(Instruction* instr) { |
| 3077 unsigned fd = instr->Rd(); | 3077 unsigned fd = instr->Rd(); |
| 3078 unsigned fn = instr->Rn(); | 3078 unsigned fn = instr->Rn(); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3114 case MSR: { | 3114 case MSR: { |
| 3115 switch (instr->ImmSystemRegister()) { | 3115 switch (instr->ImmSystemRegister()) { |
| 3116 case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break; | 3116 case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break; |
| 3117 case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break; | 3117 case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break; |
| 3118 default: UNIMPLEMENTED(); | 3118 default: UNIMPLEMENTED(); |
| 3119 } | 3119 } |
| 3120 break; | 3120 break; |
| 3121 } | 3121 } |
| 3122 } | 3122 } |
| 3123 } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { | 3123 } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { |
| 3124 ASSERT(instr->Mask(SystemHintMask) == HINT); | 3124 DCHECK(instr->Mask(SystemHintMask) == HINT); |
| 3125 switch (instr->ImmHint()) { | 3125 switch (instr->ImmHint()) { |
| 3126 case NOP: break; | 3126 case NOP: break; |
| 3127 default: UNIMPLEMENTED(); | 3127 default: UNIMPLEMENTED(); |
| 3128 } | 3128 } |
| 3129 } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { | 3129 } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { |
| 3130 __sync_synchronize(); | 3130 __sync_synchronize(); |
| 3131 } else { | 3131 } else { |
| 3132 UNIMPLEMENTED(); | 3132 UNIMPLEMENTED(); |
| 3133 } | 3133 } |
| 3134 } | 3134 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3157 reinterpret_cast<uint64_t*>(value)) == 1; | 3157 reinterpret_cast<uint64_t*>(value)) == 1; |
| 3158 } else { | 3158 } else { |
| 3159 return SScanF(desc, "%" SCNu64, | 3159 return SScanF(desc, "%" SCNu64, |
| 3160 reinterpret_cast<uint64_t*>(value)) == 1; | 3160 reinterpret_cast<uint64_t*>(value)) == 1; |
| 3161 } | 3161 } |
| 3162 } | 3162 } |
| 3163 | 3163 |
| 3164 | 3164 |
| 3165 bool Simulator::PrintValue(const char* desc) { | 3165 bool Simulator::PrintValue(const char* desc) { |
| 3166 if (strcmp(desc, "csp") == 0) { | 3166 if (strcmp(desc, "csp") == 0) { |
| 3167 ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); | 3167 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); |
| 3168 PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n", | 3168 PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n", |
| 3169 clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal); | 3169 clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal); |
| 3170 return true; | 3170 return true; |
| 3171 } else if (strcmp(desc, "wcsp") == 0) { | 3171 } else if (strcmp(desc, "wcsp") == 0) { |
| 3172 ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); | 3172 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode)); |
| 3173 PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n", | 3173 PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n", |
| 3174 clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal); | 3174 clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal); |
| 3175 return true; | 3175 return true; |
| 3176 } | 3176 } |
| 3177 | 3177 |
| 3178 int i = CodeFromName(desc); | 3178 int i = CodeFromName(desc); |
| 3179 STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters); | 3179 STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters); |
| 3180 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false; | 3180 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false; |
| 3181 | 3181 |
| 3182 if (desc[0] == 'v') { | 3182 if (desc[0] == 'v') { |
| (...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3554 if (parameters & LOG_FP_REGS) { PrintFPRegisters(); } | 3554 if (parameters & LOG_FP_REGS) { PrintFPRegisters(); } |
| 3555 break; | 3555 break; |
| 3556 case TRACE_DISABLE: | 3556 case TRACE_DISABLE: |
| 3557 set_log_parameters(log_parameters() & ~parameters); | 3557 set_log_parameters(log_parameters() & ~parameters); |
| 3558 break; | 3558 break; |
| 3559 case TRACE_OVERRIDE: | 3559 case TRACE_OVERRIDE: |
| 3560 set_log_parameters(parameters); | 3560 set_log_parameters(parameters); |
| 3561 break; | 3561 break; |
| 3562 default: | 3562 default: |
| 3563 // We don't support a one-shot LOG_DISASM. | 3563 // We don't support a one-shot LOG_DISASM. |
| 3564 ASSERT((parameters & LOG_DISASM) == 0); | 3564 DCHECK((parameters & LOG_DISASM) == 0); |
| 3565 // Don't print information that is already being traced. | 3565 // Don't print information that is already being traced. |
| 3566 parameters &= ~log_parameters(); | 3566 parameters &= ~log_parameters(); |
| 3567 // Print the requested information. | 3567 // Print the requested information. |
| 3568 if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true); | 3568 if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true); |
| 3569 if (parameters & LOG_REGS) PrintRegisters(true); | 3569 if (parameters & LOG_REGS) PrintRegisters(true); |
| 3570 if (parameters & LOG_FP_REGS) PrintFPRegisters(true); | 3570 if (parameters & LOG_FP_REGS) PrintFPRegisters(true); |
| 3571 } | 3571 } |
| 3572 | 3572 |
| 3573 // The stop parameters are inlined in the code. Skip them: | 3573 // The stop parameters are inlined in the code. Skip them: |
| 3574 // - Skip to the end of the message string. | 3574 // - Skip to the end of the message string. |
| 3575 size_t size = kDebugMessageOffset + strlen(message) + 1; | 3575 size_t size = kDebugMessageOffset + strlen(message) + 1; |
| 3576 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize)); | 3576 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize)); |
| 3577 // - Verify that the unreachable marker is present. | 3577 // - Verify that the unreachable marker is present. |
| 3578 ASSERT(pc_->Mask(ExceptionMask) == HLT); | 3578 DCHECK(pc_->Mask(ExceptionMask) == HLT); |
| 3579 ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable); | 3579 DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable); |
| 3580 // - Skip past the unreachable marker. | 3580 // - Skip past the unreachable marker. |
| 3581 set_pc(pc_->following()); | 3581 set_pc(pc_->following()); |
| 3582 | 3582 |
| 3583 // Check if the debugger should break. | 3583 // Check if the debugger should break. |
| 3584 if (parameters & BREAK) Debug(); | 3584 if (parameters & BREAK) Debug(); |
| 3585 | 3585 |
| 3586 } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) { | 3586 } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) { |
| 3587 DoRuntimeCall(instr); | 3587 DoRuntimeCall(instr); |
| 3588 } else if (instr->ImmException() == kImmExceptionIsPrintf) { | 3588 } else if (instr->ImmException() == kImmExceptionIsPrintf) { |
| 3589 DoPrintf(instr); | 3589 DoPrintf(instr); |
| 3590 | 3590 |
| 3591 } else if (instr->ImmException() == kImmExceptionIsUnreachable) { | 3591 } else if (instr->ImmException() == kImmExceptionIsUnreachable) { |
| 3592 fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n", | 3592 fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n", |
| 3593 reinterpret_cast<void*>(pc_)); | 3593 reinterpret_cast<void*>(pc_)); |
| 3594 abort(); | 3594 abort(); |
| 3595 | 3595 |
| 3596 } else { | 3596 } else { |
| 3597 base::OS::DebugBreak(); | 3597 base::OS::DebugBreak(); |
| 3598 } | 3598 } |
| 3599 break; | 3599 break; |
| 3600 } | 3600 } |
| 3601 | 3601 |
| 3602 default: | 3602 default: |
| 3603 UNIMPLEMENTED(); | 3603 UNIMPLEMENTED(); |
| 3604 } | 3604 } |
| 3605 } | 3605 } |
| 3606 | 3606 |
| 3607 | 3607 |
| 3608 void Simulator::DoPrintf(Instruction* instr) { | 3608 void Simulator::DoPrintf(Instruction* instr) { |
| 3609 ASSERT((instr->Mask(ExceptionMask) == HLT) && | 3609 DCHECK((instr->Mask(ExceptionMask) == HLT) && |
| 3610 (instr->ImmException() == kImmExceptionIsPrintf)); | 3610 (instr->ImmException() == kImmExceptionIsPrintf)); |
| 3611 | 3611 |
| 3612 // Read the arguments encoded inline in the instruction stream. | 3612 // Read the arguments encoded inline in the instruction stream. |
| 3613 uint32_t arg_count; | 3613 uint32_t arg_count; |
| 3614 uint32_t arg_pattern_list; | 3614 uint32_t arg_pattern_list; |
| 3615 STATIC_ASSERT(sizeof(*instr) == 1); | 3615 STATIC_ASSERT(sizeof(*instr) == 1); |
| 3616 memcpy(&arg_count, | 3616 memcpy(&arg_count, |
| 3617 instr + kPrintfArgCountOffset, | 3617 instr + kPrintfArgCountOffset, |
| 3618 sizeof(arg_count)); | 3618 sizeof(arg_count)); |
| 3619 memcpy(&arg_pattern_list, | 3619 memcpy(&arg_pattern_list, |
| 3620 instr + kPrintfArgPatternListOffset, | 3620 instr + kPrintfArgPatternListOffset, |
| 3621 sizeof(arg_pattern_list)); | 3621 sizeof(arg_pattern_list)); |
| 3622 | 3622 |
| 3623 ASSERT(arg_count <= kPrintfMaxArgCount); | 3623 DCHECK(arg_count <= kPrintfMaxArgCount); |
| 3624 ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); | 3624 DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); |
| 3625 | 3625 |
| 3626 // We need to call the host printf function with a set of arguments defined by | 3626 // We need to call the host printf function with a set of arguments defined by |
| 3627 // arg_pattern_list. Because we don't know the types and sizes of the | 3627 // arg_pattern_list. Because we don't know the types and sizes of the |
| 3628 // arguments, this is very difficult to do in a robust and portable way. To | 3628 // arguments, this is very difficult to do in a robust and portable way. To |
| 3629 // work around the problem, we pick apart the format string, and print one | 3629 // work around the problem, we pick apart the format string, and print one |
| 3630 // format placeholder at a time. | 3630 // format placeholder at a time. |
| 3631 | 3631 |
| 3632 // Allocate space for the format string. We take a copy, so we can modify it. | 3632 // Allocate space for the format string. We take a copy, so we can modify it. |
| 3633 // Leave enough space for one extra character per expected argument (plus the | 3633 // Leave enough space for one extra character per expected argument (plus the |
| 3634 // '\0' termination). | 3634 // '\0' termination). |
| 3635 const char * format_base = reg<const char *>(0); | 3635 const char * format_base = reg<const char *>(0); |
| 3636 ASSERT(format_base != NULL); | 3636 DCHECK(format_base != NULL); |
| 3637 size_t length = strlen(format_base) + 1; | 3637 size_t length = strlen(format_base) + 1; |
| 3638 char * const format = new char[length + arg_count]; | 3638 char * const format = new char[length + arg_count]; |
| 3639 | 3639 |
| 3640 // A list of chunks, each with exactly one format placeholder. | 3640 // A list of chunks, each with exactly one format placeholder. |
| 3641 const char * chunks[kPrintfMaxArgCount]; | 3641 const char * chunks[kPrintfMaxArgCount]; |
| 3642 | 3642 |
| 3643 // Copy the format string and search for format placeholders. | 3643 // Copy the format string and search for format placeholders. |
| 3644 uint32_t placeholder_count = 0; | 3644 uint32_t placeholder_count = 0; |
| 3645 char * format_scratch = format; | 3645 char * format_scratch = format; |
| 3646 for (size_t i = 0; i < length; i++) { | 3646 for (size_t i = 0; i < length; i++) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3661 } | 3661 } |
| 3662 } else { | 3662 } else { |
| 3663 CHECK(placeholder_count < arg_count); | 3663 CHECK(placeholder_count < arg_count); |
| 3664 // Insert '\0' before placeholders, and store their locations. | 3664 // Insert '\0' before placeholders, and store their locations. |
| 3665 *format_scratch++ = '\0'; | 3665 *format_scratch++ = '\0'; |
| 3666 chunks[placeholder_count++] = format_scratch; | 3666 chunks[placeholder_count++] = format_scratch; |
| 3667 *format_scratch++ = format_base[i]; | 3667 *format_scratch++ = format_base[i]; |
| 3668 } | 3668 } |
| 3669 } | 3669 } |
| 3670 } | 3670 } |
| 3671 ASSERT(format_scratch <= (format + length + arg_count)); | 3671 DCHECK(format_scratch <= (format + length + arg_count)); |
| 3672 CHECK(placeholder_count == arg_count); | 3672 CHECK(placeholder_count == arg_count); |
| 3673 | 3673 |
| 3674 // Finally, call printf with each chunk, passing the appropriate register | 3674 // Finally, call printf with each chunk, passing the appropriate register |
| 3675 // argument. Normally, printf returns the number of bytes transmitted, so we | 3675 // argument. Normally, printf returns the number of bytes transmitted, so we |
| 3676 // can emulate a single printf call by adding the result from each chunk. If | 3676 // can emulate a single printf call by adding the result from each chunk. If |
| 3677 // any call returns a negative (error) value, though, just return that value. | 3677 // any call returns a negative (error) value, though, just return that value. |
| 3678 | 3678 |
| 3679 fprintf(stream_, "%s", clr_printf); | 3679 fprintf(stream_, "%s", clr_printf); |
| 3680 | 3680 |
| 3681 // Because '\0' is inserted before each placeholder, the first string in | 3681 // Because '\0' is inserted before each placeholder, the first string in |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3729 | 3729 |
| 3730 delete[] format; | 3730 delete[] format; |
| 3731 } | 3731 } |
| 3732 | 3732 |
| 3733 | 3733 |
| 3734 #endif // USE_SIMULATOR | 3734 #endif // USE_SIMULATOR |
| 3735 | 3735 |
| 3736 } } // namespace v8::internal | 3736 } } // namespace v8::internal |
| 3737 | 3737 |
| 3738 #endif // V8_TARGET_ARCH_ARM64 | 3738 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |