OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Rrdistribution and use in source and binary forms, with or without | 2 // Rrdistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Rrdistributions of source code must retain the above copyright | 6 // * Rrdistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Rrdistributions in binary form must reproduce the above | 8 // * Rrdistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 17 matching lines...) Expand all Loading... |
28 #include <stdlib.h> | 28 #include <stdlib.h> |
29 | 29 |
30 #include "v8.h" | 30 #include "v8.h" |
31 | 31 |
32 #include "cctest.h" | 32 #include "cctest.h" |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "test-code-stubs.h" | 34 #include "test-code-stubs.h" |
35 #include "factory.h" | 35 #include "factory.h" |
36 #include "macro-assembler.h" | 36 #include "macro-assembler.h" |
37 #include "platform.h" | 37 #include "platform.h" |
| 38 #include "simulator.h" |
38 | 39 |
39 using namespace v8::internal; | 40 using namespace v8::internal; |
40 | 41 |
41 | 42 #define __ masm. |
42 #define __ assm. | |
43 | 43 |
44 ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, | 44 ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, |
45 Register source_reg, | 45 Register source_reg, |
46 Register destination_reg) { | 46 Register destination_reg, |
| 47 bool inline_fastpath) { |
47 // Allocate an executable page of memory. | 48 // Allocate an executable page of memory. |
48 size_t actual_size; | 49 size_t actual_size; |
49 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, | 50 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, |
50 &actual_size, | 51 &actual_size, |
51 true)); | 52 true)); |
52 CHECK(buffer); | 53 CHECK(buffer); |
53 HandleScope handles(isolate); | 54 HandleScope handles(isolate); |
54 MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size)); | 55 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size)); |
55 assm.set_allow_stub_calls(false); | 56 masm.set_allow_stub_calls(false); |
56 int offset = | 57 DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath); |
57 source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize); | 58 |
58 DoubleToIStub stub(source_reg, destination_reg, offset, true); | |
59 byte* start = stub.GetCode(isolate)->instruction_start(); | 59 byte* start = stub.GetCode(isolate)->instruction_start(); |
| 60 Label done; |
60 | 61 |
61 __ push(rbx); | 62 // Save callee save registers. |
62 __ push(rcx); | 63 __ Push(r7, r6, r5, r4); |
63 __ push(rdx); | 64 __ Push(lr); |
64 __ push(rsi); | |
65 __ push(rdi); | |
66 | 65 |
67 if (!source_reg.is(rsp)) { | 66 // Push the double argument. |
68 __ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset)); | 67 __ vmov(d0, r0, r1); |
| 68 __ sub(sp, sp, Operand(kDoubleSize)); |
| 69 __ vstr(d0, sp, 0); |
| 70 if (!source_reg.is(sp)) { |
| 71 __ mov(source_reg, sp); |
69 } | 72 } |
70 | 73 |
71 int param_offset = 7 * kPointerSize; | |
72 // Save registers make sure they don't get clobbered. | 74 // Save registers make sure they don't get clobbered. |
| 75 int source_reg_offset = kDoubleSize; |
73 int reg_num = 0; | 76 int reg_num = 0; |
74 for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) { | 77 for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) { |
75 Register reg = Register::from_code(reg_num); | 78 Register reg = Register::from_code(reg_num); |
76 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { | 79 if (!reg.is(destination_reg)) { |
77 __ push(reg); | 80 __ push(reg); |
78 param_offset += kPointerSize; | 81 source_reg_offset += kPointerSize; |
79 } | 82 } |
80 } | 83 } |
81 | 84 |
82 // Re-push the double argument | 85 // Re-push the double argument. |
83 __ subq(rsp, Immediate(kDoubleSize)); | 86 __ sub(sp, sp, Operand(kDoubleSize)); |
84 __ movsd(MemOperand(rsp, 0), xmm0); | 87 __ vstr(d0, sp, 0); |
85 | 88 |
86 // Call through to the actual stub | 89 // Call through to the actual stub |
| 90 if (inline_fastpath) { |
| 91 __ vldr(d0, MemOperand(source_reg)); |
| 92 __ TryInlineTruncateDoubleToI(destination_reg, d0, &done); |
| 93 if (destination_reg.is(source_reg) && !source_reg.is(sp)) { |
| 94 // Restore clobbered source_reg. |
| 95 __ add(source_reg, sp, Operand(source_reg_offset)); |
| 96 } |
| 97 } |
87 __ Call(start, RelocInfo::EXTERNAL_REFERENCE); | 98 __ Call(start, RelocInfo::EXTERNAL_REFERENCE); |
| 99 __ bind(&done); |
88 | 100 |
89 __ addq(rsp, Immediate(kDoubleSize)); | 101 __ add(sp, sp, Operand(kDoubleSize)); |
90 | 102 |
91 // Make sure no registers have been unexpectedly clobbered | 103 // Make sure no registers have been unexpectedly clobbered |
92 for (--reg_num; reg_num >= 0; --reg_num) { | 104 for (--reg_num; reg_num >= 0; --reg_num) { |
93 Register reg = Register::from_code(reg_num); | 105 Register reg = Register::from_code(reg_num); |
94 if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) { | 106 if (!reg.is(destination_reg)) { |
95 __ cmpq(reg, MemOperand(rsp, 0)); | 107 __ ldr(ip, MemOperand(sp, 0)); |
96 __ Assert(equal, kRegisterWasClobbered); | 108 __ cmp(reg, ip); |
97 __ addq(rsp, Immediate(kPointerSize)); | 109 __ Assert(eq, kRegisterWasClobbered); |
| 110 __ add(sp, sp, Operand(kPointerSize)); |
98 } | 111 } |
99 } | 112 } |
100 | 113 |
101 __ movq(rax, destination_reg); | 114 __ add(sp, sp, Operand(kDoubleSize)); |
102 | 115 |
103 __ pop(rdi); | 116 if (!destination_reg.is(r0)) |
104 __ pop(rsi); | 117 __ mov(r0, destination_reg); |
105 __ pop(rdx); | |
106 __ pop(rcx); | |
107 __ pop(rbx); | |
108 | 118 |
109 __ ret(0); | 119 // Restore callee save registers. |
| 120 __ Pop(lr); |
| 121 __ Pop(r7, r6, r5, r4); |
| 122 |
| 123 __ Ret(0); |
110 | 124 |
111 CodeDesc desc; | 125 CodeDesc desc; |
112 assm.GetCode(&desc); | 126 masm.GetCode(&desc); |
113 return reinterpret_cast<ConvertDToIFunc>( | 127 return (reinterpret_cast<ConvertDToIFunc>( |
114 reinterpret_cast<intptr_t>(buffer)); | 128 reinterpret_cast<intptr_t>(buffer))); |
115 } | 129 } |
116 | 130 |
117 #undef __ | 131 #undef __ |
118 | 132 |
119 | 133 |
120 static Isolate* GetIsolateFrom(LocalContext* context) { | 134 static Isolate* GetIsolateFrom(LocalContext* context) { |
121 return reinterpret_cast<Isolate*>((*context)->GetIsolate()); | 135 return reinterpret_cast<Isolate*>((*context)->GetIsolate()); |
122 } | 136 } |
123 | 137 |
124 | 138 |
| 139 int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func, |
| 140 double from) { |
| 141 #ifdef USE_SIMULATOR |
| 142 return reinterpret_cast<int32_t>(CALL_GENERATED_CODE(func, from, 0, 0, 0, 0)); |
| 143 #else |
| 144 return (*func)(from); |
| 145 #endif |
| 146 } |
| 147 |
| 148 |
125 TEST(ConvertDToI) { | 149 TEST(ConvertDToI) { |
126 CcTest::InitializeVM(); | 150 CcTest::InitializeVM(); |
127 LocalContext context; | 151 LocalContext context; |
128 Isolate* isolate = GetIsolateFrom(&context); | 152 Isolate* isolate = GetIsolateFrom(&context); |
129 HandleScope scope(isolate); | 153 HandleScope scope(isolate); |
130 | 154 |
131 #if DEBUG | 155 #if DEBUG |
132 // Verify that the tests actually work with the C version. In the release | 156 // Verify that the tests actually work with the C version. In the release |
133 // code, the compiler optimizes it away because it's all constant, but does it | 157 // code, the compiler optimizes it away because it's all constant, but does it |
134 // wrong, triggering an assert on gcc. | 158 // wrong, triggering an assert on gcc. |
135 RunAllTruncationTests(&ConvertDToICVersion); | 159 RunAllTruncationTests(&ConvertDToICVersion); |
136 #endif | 160 #endif |
137 | 161 |
138 Register source_registers[] = {rsp, rax, rbx, rcx, rdx, rsi, rdi, r8, r9}; | 162 Register source_registers[] = {sp, r0, r1, r2, r3, r4, r5, r6, r7}; |
139 Register dest_registers[] = {rax, rbx, rcx, rdx, rsi, rdi, r8, r9}; | 163 Register dest_registers[] = {r0, r1, r2, r3, r4, r5, r6, r7}; |
140 | 164 |
141 for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) { | 165 for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) { |
142 for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) { | 166 for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) { |
143 RunAllTruncationTests( | 167 RunAllTruncationTests( |
| 168 RunGeneratedCodeCallWrapper, |
144 MakeConvertDToIFuncTrampoline(isolate, | 169 MakeConvertDToIFuncTrampoline(isolate, |
145 source_registers[s], | 170 source_registers[s], |
146 dest_registers[d])); | 171 dest_registers[d], |
| 172 false)); |
| 173 RunAllTruncationTests( |
| 174 RunGeneratedCodeCallWrapper, |
| 175 MakeConvertDToIFuncTrampoline(isolate, |
| 176 source_registers[s], |
| 177 dest_registers[d], |
| 178 true)); |
147 } | 179 } |
148 } | 180 } |
149 } | 181 } |
OLD | NEW |