| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Rrdistribution and use in source and binary forms, with or without | 2 // Rrdistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Rrdistributions of source code must retain the above copyright | 6 // * Rrdistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Rrdistributions in binary form must reproduce the above | 8 // * Rrdistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 58 DoubleToIStub stub(source_reg, destination_reg, offset, true); | 58 DoubleToIStub stub(source_reg, destination_reg, offset, true); |
| 59 byte* start = stub.GetCode(isolate)->instruction_start(); | 59 byte* start = stub.GetCode(isolate)->instruction_start(); |
| 60 | 60 |
| 61 __ push(rbx); | 61 __ push(rbx); |
| 62 __ push(rcx); | 62 __ push(rcx); |
| 63 __ push(rdx); | 63 __ push(rdx); |
| 64 __ push(rsi); | 64 __ push(rsi); |
| 65 __ push(rdi); | 65 __ push(rdi); |
| 66 | 66 |
| 67 if (!source_reg.is(rsp)) { | 67 if (!source_reg.is(rsp)) { |
| 68 __ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset)); | 68 // The argument we pass to the stub is not a heap number, but instead |
| 69 // stack-allocated and offset-wise made to look like a heap number for |
| 70 // the stub. We create that "heap number" after pushing all allocatable |
| 71 // registers. |
| 72 int double_argument_slot = |
| 73 (Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize; |
| 74 __ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset)); |
| 69 } | 75 } |
| 70 | 76 |
| 71 int param_offset = 7 * kPointerSize; | |
| 72 // Save registers make sure they don't get clobbered. | 77 // Save registers make sure they don't get clobbered. |
| 73 int reg_num = 0; | 78 int reg_num = 0; |
| 74 for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) { | 79 for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) { |
| 75 Register reg = Register::from_code(reg_num); | 80 Register reg = Register::FromAllocationIndex(reg_num); |
| 76 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { | 81 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { |
| 77 __ push(reg); | 82 __ push(reg); |
| 78 param_offset += kPointerSize; | |
| 79 } | 83 } |
| 80 } | 84 } |
| 81 | 85 |
| 82 // Re-push the double argument | 86 // Put the double argument into the designated double argument slot. |
| 83 __ subq(rsp, Immediate(kDoubleSize)); | 87 __ subq(rsp, Immediate(kDoubleSize)); |
| 84 __ movsd(MemOperand(rsp, 0), xmm0); | 88 __ movsd(MemOperand(rsp, 0), xmm0); |
| 85 | 89 |
| 86 // Call through to the actual stub | 90 // Call through to the actual stub |
| 87 __ Call(start, RelocInfo::EXTERNAL_REFERENCE); | 91 __ Call(start, RelocInfo::EXTERNAL_REFERENCE); |
| 88 | 92 |
| 89 __ addq(rsp, Immediate(kDoubleSize)); | 93 __ addq(rsp, Immediate(kDoubleSize)); |
| 90 | 94 |
| 91 // Make sure no registers have been unexpectedly clobbered | 95 // Make sure no registers have been unexpectedly clobbered |
| 92 for (--reg_num; reg_num >= 0; --reg_num) { | 96 for (--reg_num; reg_num >= 0; --reg_num) { |
| 93 Register reg = Register::from_code(reg_num); | 97 Register reg = Register::FromAllocationIndex(reg_num); |
| 94 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { | 98 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { |
| 95 __ cmpq(reg, MemOperand(rsp, 0)); | 99 __ cmpq(reg, MemOperand(rsp, 0)); |
| 96 __ Assert(equal, kRegisterWasClobbered); | 100 __ Assert(equal, kRegisterWasClobbered); |
| 97 __ addq(rsp, Immediate(kPointerSize)); | 101 __ addq(rsp, Immediate(kPointerSize)); |
| 98 } | 102 } |
| 99 } | 103 } |
| 100 | 104 |
| 101 __ movq(rax, destination_reg); | 105 __ movq(rax, destination_reg); |
| 102 | 106 |
| 103 __ pop(rdi); | 107 __ pop(rdi); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 140 | 144 |
| 141 for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) { | 145 for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) { |
| 142 for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) { | 146 for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) { |
| 143 RunAllTruncationTests( | 147 RunAllTruncationTests( |
| 144 MakeConvertDToIFuncTrampoline(isolate, | 148 MakeConvertDToIFuncTrampoline(isolate, |
| 145 source_registers[s], | 149 source_registers[s], |
| 146 dest_registers[d])); | 150 dest_registers[d])); |
| 147 } | 151 } |
| 148 } | 152 } |
| 149 } | 153 } |
| OLD | NEW |