Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(81)

Side by Side Diff: src/x87/codegen-x87.cc

Issue 293743005: Introduce x87 port (Closed) Base URL: git://github.com/v8/v8.git@master
Patch Set: rebase Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/x87/codegen-x87.h ('k') | src/x87/cpu-x87.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "v8.h" 5 #include "v8.h"
6 6
7 #if V8_TARGET_ARCH_IA32 7 #if V8_TARGET_ARCH_X87
8 8
9 #include "codegen.h" 9 #include "codegen.h"
10 #include "heap.h" 10 #include "heap.h"
11 #include "macro-assembler.h" 11 #include "macro-assembler.h"
12 12
13 namespace v8 { 13 namespace v8 {
14 namespace internal { 14 namespace internal {
15 15
16 16
17 // ------------------------------------------------------------------------- 17 // -------------------------------------------------------------------------
(...skipping 10 matching lines...) Expand all
28 masm->LeaveFrame(StackFrame::INTERNAL); 28 masm->LeaveFrame(StackFrame::INTERNAL);
29 ASSERT(masm->has_frame()); 29 ASSERT(masm->has_frame());
30 masm->set_has_frame(false); 30 masm->set_has_frame(false);
31 } 31 }
32 32
33 33
34 #define __ masm. 34 #define __ masm.
35 35
36 36
37 UnaryMathFunction CreateExpFunction() { 37 UnaryMathFunction CreateExpFunction() {
38 if (!FLAG_fast_math) return &std::exp; 38 // No SSE2 support
39 size_t actual_size; 39 return &std::exp;
40 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
41 if (buffer == NULL) return &std::exp;
42 ExternalReference::InitializeMathExpData();
43
44 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
45 // esp[1 * kPointerSize]: raw double input
46 // esp[0 * kPointerSize]: return address
47 {
48 XMMRegister input = xmm1;
49 XMMRegister result = xmm2;
50 __ movsd(input, Operand(esp, 1 * kPointerSize));
51 __ push(eax);
52 __ push(ebx);
53
54 MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
55
56 __ pop(ebx);
57 __ pop(eax);
58 __ movsd(Operand(esp, 1 * kPointerSize), result);
59 __ fld_d(Operand(esp, 1 * kPointerSize));
60 __ Ret();
61 }
62
63 CodeDesc desc;
64 masm.GetCode(&desc);
65 ASSERT(!RelocInfo::RequiresRelocation(desc));
66
67 CPU::FlushICache(buffer, actual_size);
68 OS::ProtectCode(buffer, actual_size);
69 return FUNCTION_CAST<UnaryMathFunction>(buffer);
70 } 40 }
71 41
72 42
73 UnaryMathFunction CreateSqrtFunction() { 43 UnaryMathFunction CreateSqrtFunction() {
74 size_t actual_size; 44 // No SSE2 support
75 // Allocate buffer in executable space. 45 return &std::sqrt;
76 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
77 &actual_size,
78 true));
79 if (buffer == NULL) return &std::sqrt;
80 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
81 // esp[1 * kPointerSize]: raw double input
82 // esp[0 * kPointerSize]: return address
83 // Move double input into registers.
84 {
85 __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
86 __ sqrtsd(xmm0, xmm0);
87 __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
88 // Load result into floating point register as return value.
89 __ fld_d(Operand(esp, 1 * kPointerSize));
90 __ Ret();
91 }
92
93 CodeDesc desc;
94 masm.GetCode(&desc);
95 ASSERT(!RelocInfo::RequiresRelocation(desc));
96
97 CPU::FlushICache(buffer, actual_size);
98 OS::ProtectCode(buffer, actual_size);
99 return FUNCTION_CAST<UnaryMathFunction>(buffer);
100 } 46 }
101 47
102 48
103 // Helper functions for CreateMemMoveFunction. 49 // Helper functions for CreateMemMoveFunction.
104 #undef __ 50 #undef __
105 #define __ ACCESS_MASM(masm) 51 #define __ ACCESS_MASM(masm)
106 52
107 enum Direction { FORWARD, BACKWARD }; 53 enum Direction { FORWARD, BACKWARD };
108 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED }; 54 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
109 55
110 // Expects registers:
111 // esi - source, aligned if alignment == ALIGNED
112 // edi - destination, always aligned
113 // ecx - count (copy size in bytes)
114 // edx - loop count (number of 64 byte chunks)
115 void MemMoveEmitMainLoop(MacroAssembler* masm,
116 Label* move_last_15,
117 Direction direction,
118 Alignment alignment) {
119 Register src = esi;
120 Register dst = edi;
121 Register count = ecx;
122 Register loop_count = edx;
123 Label loop, move_last_31, move_last_63;
124 __ cmp(loop_count, 0);
125 __ j(equal, &move_last_63);
126 __ bind(&loop);
127 // Main loop. Copy in 64 byte chunks.
128 if (direction == BACKWARD) __ sub(src, Immediate(0x40));
129 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
130 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
131 __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
132 __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
133 if (direction == FORWARD) __ add(src, Immediate(0x40));
134 if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
135 __ movdqa(Operand(dst, 0x00), xmm0);
136 __ movdqa(Operand(dst, 0x10), xmm1);
137 __ movdqa(Operand(dst, 0x20), xmm2);
138 __ movdqa(Operand(dst, 0x30), xmm3);
139 if (direction == FORWARD) __ add(dst, Immediate(0x40));
140 __ dec(loop_count);
141 __ j(not_zero, &loop);
142 // At most 63 bytes left to copy.
143 __ bind(&move_last_63);
144 __ test(count, Immediate(0x20));
145 __ j(zero, &move_last_31);
146 if (direction == BACKWARD) __ sub(src, Immediate(0x20));
147 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
148 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
149 if (direction == FORWARD) __ add(src, Immediate(0x20));
150 if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
151 __ movdqa(Operand(dst, 0x00), xmm0);
152 __ movdqa(Operand(dst, 0x10), xmm1);
153 if (direction == FORWARD) __ add(dst, Immediate(0x20));
154 // At most 31 bytes left to copy.
155 __ bind(&move_last_31);
156 __ test(count, Immediate(0x10));
157 __ j(zero, move_last_15);
158 if (direction == BACKWARD) __ sub(src, Immediate(0x10));
159 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
160 if (direction == FORWARD) __ add(src, Immediate(0x10));
161 if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
162 __ movdqa(Operand(dst, 0), xmm0);
163 if (direction == FORWARD) __ add(dst, Immediate(0x10));
164 }
165
166 56
167 void MemMoveEmitPopAndReturn(MacroAssembler* masm) { 57 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
168 __ pop(esi); 58 __ pop(esi);
169 __ pop(edi); 59 __ pop(edi);
170 __ ret(0); 60 __ ret(0);
171 } 61 }
172 62
173 63
174 #undef __ 64 #undef __
175 #define __ masm. 65 #define __ masm.
(...skipping 27 matching lines...) Expand all
203 // Stack layout: 93 // Stack layout:
204 // esp[12]: Third argument, size. 94 // esp[12]: Third argument, size.
205 // esp[8]: Second argument, source pointer. 95 // esp[8]: Second argument, source pointer.
206 // esp[4]: First argument, destination pointer. 96 // esp[4]: First argument, destination pointer.
207 // esp[0]: return address 97 // esp[0]: return address
208 98
209 const int kDestinationOffset = 1 * kPointerSize; 99 const int kDestinationOffset = 1 * kPointerSize;
210 const int kSourceOffset = 2 * kPointerSize; 100 const int kSourceOffset = 2 * kPointerSize;
211 const int kSizeOffset = 3 * kPointerSize; 101 const int kSizeOffset = 3 * kPointerSize;
212 102
213 // When copying up to this many bytes, use special "small" handlers.
214 const size_t kSmallCopySize = 8;
215 // When copying up to this many bytes, use special "medium" handlers.
216 const size_t kMediumCopySize = 63;
217 // When non-overlapping region of src and dst is less than this,
218 // use a more careful implementation (slightly slower).
219 const size_t kMinMoveDistance = 16;
220 // Note that these values are dictated by the implementation below,
221 // do not just change them and hope things will work!
222
223 int stack_offset = 0; // Update if we change the stack height. 103 int stack_offset = 0; // Update if we change the stack height.
224 104
225 Label backward, backward_much_overlap; 105 Label backward, backward_much_overlap;
226 Label forward_much_overlap, small_size, medium_size, pop_and_return; 106 Label forward_much_overlap, small_size, medium_size, pop_and_return;
227 __ push(edi); 107 __ push(edi);
228 __ push(esi); 108 __ push(esi);
229 stack_offset += 2 * kPointerSize; 109 stack_offset += 2 * kPointerSize;
230 Register dst = edi; 110 Register dst = edi;
231 Register src = esi; 111 Register src = esi;
232 Register count = ecx; 112 Register count = ecx;
233 Register loop_count = edx;
234 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 113 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
235 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 114 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
236 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 115 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
237 116
238 __ cmp(dst, src); 117 __ cmp(dst, src);
239 __ j(equal, &pop_and_return); 118 __ j(equal, &pop_and_return);
240 119
241 __ prefetch(Operand(src, 0), 1); 120 // No SSE2.
242 __ cmp(count, kSmallCopySize); 121 Label forward;
243 __ j(below_equal, &small_size); 122 __ cmp(count, 0);
244 __ cmp(count, kMediumCopySize); 123 __ j(equal, &pop_and_return);
245 __ j(below_equal, &medium_size);
246 __ cmp(dst, src); 124 __ cmp(dst, src);
247 __ j(above, &backward); 125 __ j(above, &backward);
248 126 __ jmp(&forward);
249 { 127 {
250 // |dst| is a lower address than |src|. Copy front-to-back. 128 // Simple forward copier.
251 Label unaligned_source, move_last_15, skip_last_move; 129 Label forward_loop_1byte, forward_loop_4byte;
252 __ mov(eax, src); 130 __ bind(&forward_loop_4byte);
253 __ sub(eax, dst); 131 __ mov(eax, Operand(src, 0));
254 __ cmp(eax, kMinMoveDistance); 132 __ sub(count, Immediate(4));
255 __ j(below, &forward_much_overlap); 133 __ add(src, Immediate(4));
256 // Copy first 16 bytes. 134 __ mov(Operand(dst, 0), eax);
257 __ movdqu(xmm0, Operand(src, 0)); 135 __ add(dst, Immediate(4));
258 __ movdqu(Operand(dst, 0), xmm0); 136 __ bind(&forward); // Entry point.
259 // Determine distance to alignment: 16 - (dst & 0xF). 137 __ cmp(count, 3);
260 __ mov(edx, dst); 138 __ j(above, &forward_loop_4byte);
261 __ and_(edx, 0xF); 139 __ bind(&forward_loop_1byte);
262 __ neg(edx); 140 __ cmp(count, 0);
263 __ add(edx, Immediate(16)); 141 __ j(below_equal, &pop_and_return);
264 __ add(dst, edx);
265 __ add(src, edx);
266 __ sub(count, edx);
267 // dst is now aligned. Main copy loop.
268 __ mov(loop_count, count);
269 __ shr(loop_count, 6);
270 // Check if src is also aligned.
271 __ test(src, Immediate(0xF));
272 __ j(not_zero, &unaligned_source);
273 // Copy loop for aligned source and destination.
274 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
275 // At most 15 bytes to copy. Copy 16 bytes at end of string.
276 __ bind(&move_last_15);
277 __ and_(count, 0xF);
278 __ j(zero, &skip_last_move, Label::kNear);
279 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
280 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
281 __ bind(&skip_last_move);
282 MemMoveEmitPopAndReturn(&masm);
283
284 // Copy loop for unaligned source and aligned destination.
285 __ bind(&unaligned_source);
286 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
287 __ jmp(&move_last_15);
288
289 // Less than kMinMoveDistance offset between dst and src.
290 Label loop_until_aligned, last_15_much_overlap;
291 __ bind(&loop_until_aligned);
292 __ mov_b(eax, Operand(src, 0)); 142 __ mov_b(eax, Operand(src, 0));
143 __ dec(count);
293 __ inc(src); 144 __ inc(src);
294 __ mov_b(Operand(dst, 0), eax); 145 __ mov_b(Operand(dst, 0), eax);
295 __ inc(dst); 146 __ inc(dst);
296 __ dec(count); 147 __ jmp(&forward_loop_1byte);
297 __ bind(&forward_much_overlap); // Entry point into this block.
298 __ test(dst, Immediate(0xF));
299 __ j(not_zero, &loop_until_aligned);
300 // dst is now aligned, src can't be. Main copy loop.
301 __ mov(loop_count, count);
302 __ shr(loop_count, 6);
303 MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
304 FORWARD, MOVE_UNALIGNED);
305 __ bind(&last_15_much_overlap);
306 __ and_(count, 0xF);
307 __ j(zero, &pop_and_return);
308 __ cmp(count, kSmallCopySize);
309 __ j(below_equal, &small_size);
310 __ jmp(&medium_size);
311 }
312
313 {
314 // |dst| is a higher address than |src|. Copy backwards.
315 Label unaligned_source, move_first_15, skip_last_move;
316 __ bind(&backward);
317 // |dst| and |src| always point to the end of what's left to copy.
318 __ add(dst, count);
319 __ add(src, count);
320 __ mov(eax, dst);
321 __ sub(eax, src);
322 __ cmp(eax, kMinMoveDistance);
323 __ j(below, &backward_much_overlap);
324 // Copy last 16 bytes.
325 __ movdqu(xmm0, Operand(src, -0x10));
326 __ movdqu(Operand(dst, -0x10), xmm0);
327 // Find distance to alignment: dst & 0xF
328 __ mov(edx, dst);
329 __ and_(edx, 0xF);
330 __ sub(dst, edx);
331 __ sub(src, edx);
332 __ sub(count, edx);
333 // dst is now aligned. Main copy loop.
334 __ mov(loop_count, count);
335 __ shr(loop_count, 6);
336 // Check if src is also aligned.
337 __ test(src, Immediate(0xF));
338 __ j(not_zero, &unaligned_source);
339 // Copy loop for aligned source and destination.
340 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
341 // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
342 __ bind(&move_first_15);
343 __ and_(count, 0xF);
344 __ j(zero, &skip_last_move, Label::kNear);
345 __ sub(src, count);
346 __ sub(dst, count);
347 __ movdqu(xmm0, Operand(src, 0));
348 __ movdqu(Operand(dst, 0), xmm0);
349 __ bind(&skip_last_move);
350 MemMoveEmitPopAndReturn(&masm);
351
352 // Copy loop for unaligned source and aligned destination.
353 __ bind(&unaligned_source);
354 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
355 __ jmp(&move_first_15);
356
357 // Less than kMinMoveDistance offset between dst and src.
358 Label loop_until_aligned, first_15_much_overlap;
359 __ bind(&loop_until_aligned);
360 __ dec(src);
361 __ dec(dst);
362 __ mov_b(eax, Operand(src, 0));
363 __ mov_b(Operand(dst, 0), eax);
364 __ dec(count);
365 __ bind(&backward_much_overlap); // Entry point into this block.
366 __ test(dst, Immediate(0xF));
367 __ j(not_zero, &loop_until_aligned);
368 // dst is now aligned, src can't be. Main copy loop.
369 __ mov(loop_count, count);
370 __ shr(loop_count, 6);
371 MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
372 BACKWARD, MOVE_UNALIGNED);
373 __ bind(&first_15_much_overlap);
374 __ and_(count, 0xF);
375 __ j(zero, &pop_and_return);
376 // Small/medium handlers expect dst/src to point to the beginning.
377 __ sub(dst, count);
378 __ sub(src, count);
379 __ cmp(count, kSmallCopySize);
380 __ j(below_equal, &small_size);
381 __ jmp(&medium_size);
382 } 148 }
383 { 149 {
384 // Special handlers for 9 <= copy_size < 64. No assumptions about 150 // Simple backward copier.
385 // alignment or move distance, so all reads must be unaligned and 151 Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
386 // must happen before any writes. 152 __ bind(&backward);
387 Label medium_handlers, f9_16, f17_32, f33_48, f49_63; 153 __ add(src, count);
154 __ add(dst, count);
155 __ cmp(count, 3);
156 __ j(below_equal, &entry_shortcut);
388 157
389 __ bind(&f9_16); 158 __ bind(&backward_loop_4byte);
390 __ movsd(xmm0, Operand(src, 0)); 159 __ sub(src, Immediate(4));
391 __ movsd(xmm1, Operand(src, count, times_1, -8)); 160 __ sub(count, Immediate(4));
392 __ movsd(Operand(dst, 0), xmm0); 161 __ mov(eax, Operand(src, 0));
393 __ movsd(Operand(dst, count, times_1, -8), xmm1); 162 __ sub(dst, Immediate(4));
394 MemMoveEmitPopAndReturn(&masm); 163 __ mov(Operand(dst, 0), eax);
395 164 __ cmp(count, 3);
396 __ bind(&f17_32); 165 __ j(above, &backward_loop_4byte);
397 __ movdqu(xmm0, Operand(src, 0)); 166 __ bind(&backward_loop_1byte);
398 __ movdqu(xmm1, Operand(src, count, times_1, -0x10)); 167 __ cmp(count, 0);
399 __ movdqu(Operand(dst, 0x00), xmm0); 168 __ j(below_equal, &pop_and_return);
400 __ movdqu(Operand(dst, count, times_1, -0x10), xmm1); 169 __ bind(&entry_shortcut);
401 MemMoveEmitPopAndReturn(&masm); 170 __ dec(src);
402 171 __ dec(count);
403 __ bind(&f33_48);
404 __ movdqu(xmm0, Operand(src, 0x00));
405 __ movdqu(xmm1, Operand(src, 0x10));
406 __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
407 __ movdqu(Operand(dst, 0x00), xmm0);
408 __ movdqu(Operand(dst, 0x10), xmm1);
409 __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
410 MemMoveEmitPopAndReturn(&masm);
411
412 __ bind(&f49_63);
413 __ movdqu(xmm0, Operand(src, 0x00));
414 __ movdqu(xmm1, Operand(src, 0x10));
415 __ movdqu(xmm2, Operand(src, 0x20));
416 __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
417 __ movdqu(Operand(dst, 0x00), xmm0);
418 __ movdqu(Operand(dst, 0x10), xmm1);
419 __ movdqu(Operand(dst, 0x20), xmm2);
420 __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
421 MemMoveEmitPopAndReturn(&masm);
422
423 __ bind(&medium_handlers);
424 __ dd(conv.address(&f9_16));
425 __ dd(conv.address(&f17_32));
426 __ dd(conv.address(&f33_48));
427 __ dd(conv.address(&f49_63));
428
429 __ bind(&medium_size); // Entry point into this block.
430 __ mov(eax, count);
431 __ dec(eax);
432 __ shr(eax, 4);
433 if (FLAG_debug_code) {
434 Label ok;
435 __ cmp(eax, 3);
436 __ j(below_equal, &ok);
437 __ int3();
438 __ bind(&ok);
439 }
440 __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
441 __ jmp(eax);
442 }
443 {
444 // Specialized copiers for copy_size <= 8 bytes.
445 Label small_handlers, f0, f1, f2, f3, f4, f5_8;
446 __ bind(&f0);
447 MemMoveEmitPopAndReturn(&masm);
448
449 __ bind(&f1);
450 __ mov_b(eax, Operand(src, 0)); 172 __ mov_b(eax, Operand(src, 0));
173 __ dec(dst);
451 __ mov_b(Operand(dst, 0), eax); 174 __ mov_b(Operand(dst, 0), eax);
452 MemMoveEmitPopAndReturn(&masm); 175 __ jmp(&backward_loop_1byte);
453
454 __ bind(&f2);
455 __ mov_w(eax, Operand(src, 0));
456 __ mov_w(Operand(dst, 0), eax);
457 MemMoveEmitPopAndReturn(&masm);
458
459 __ bind(&f3);
460 __ mov_w(eax, Operand(src, 0));
461 __ mov_b(edx, Operand(src, 2));
462 __ mov_w(Operand(dst, 0), eax);
463 __ mov_b(Operand(dst, 2), edx);
464 MemMoveEmitPopAndReturn(&masm);
465
466 __ bind(&f4);
467 __ mov(eax, Operand(src, 0));
468 __ mov(Operand(dst, 0), eax);
469 MemMoveEmitPopAndReturn(&masm);
470
471 __ bind(&f5_8);
472 __ mov(eax, Operand(src, 0));
473 __ mov(edx, Operand(src, count, times_1, -4));
474 __ mov(Operand(dst, 0), eax);
475 __ mov(Operand(dst, count, times_1, -4), edx);
476 MemMoveEmitPopAndReturn(&masm);
477
478 __ bind(&small_handlers);
479 __ dd(conv.address(&f0));
480 __ dd(conv.address(&f1));
481 __ dd(conv.address(&f2));
482 __ dd(conv.address(&f3));
483 __ dd(conv.address(&f4));
484 __ dd(conv.address(&f5_8));
485 __ dd(conv.address(&f5_8));
486 __ dd(conv.address(&f5_8));
487 __ dd(conv.address(&f5_8));
488
489 __ bind(&small_size); // Entry point into this block.
490 if (FLAG_debug_code) {
491 Label ok;
492 __ cmp(count, 8);
493 __ j(below_equal, &ok);
494 __ int3();
495 __ bind(&ok);
496 }
497 __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
498 __ jmp(eax);
499 } 176 }
500 177
501 __ bind(&pop_and_return); 178 __ bind(&pop_and_return);
502 MemMoveEmitPopAndReturn(&masm); 179 MemMoveEmitPopAndReturn(&masm);
503 180
504 CodeDesc desc; 181 CodeDesc desc;
505 masm.GetCode(&desc); 182 masm.GetCode(&desc);
506 ASSERT(!RelocInfo::RequiresRelocation(desc)); 183 ASSERT(!RelocInfo::RequiresRelocation(desc));
507 CPU::FlushICache(buffer, actual_size); 184 CPU::FlushICache(buffer, actual_size);
508 OS::ProtectCode(buffer, actual_size); 185 OS::ProtectCode(buffer, actual_size);
(...skipping 25 matching lines...) Expand all
534 ASSERT(allocation_memento_found != NULL); 211 ASSERT(allocation_memento_found != NULL);
535 __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found); 212 __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
536 } 213 }
537 214
538 // Set transitioned map. 215 // Set transitioned map.
539 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 216 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
540 __ RecordWriteField(edx, 217 __ RecordWriteField(edx,
541 HeapObject::kMapOffset, 218 HeapObject::kMapOffset,
542 ebx, 219 ebx,
543 edi, 220 edi,
544 kDontSaveFPRegs,
545 EMIT_REMEMBERED_SET, 221 EMIT_REMEMBERED_SET,
546 OMIT_SMI_CHECK); 222 OMIT_SMI_CHECK);
547 } 223 }
548 224
549 225
550 void ElementsTransitionGenerator::GenerateSmiToDouble( 226 void ElementsTransitionGenerator::GenerateSmiToDouble(
551 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 227 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
552 // ----------- S t a t e ------------- 228 // ----------- S t a t e -------------
553 // -- eax : value 229 // -- eax : value
554 // -- ebx : target map 230 // -- ebx : target map
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 Immediate(masm->isolate()->factory()->fixed_double_array_map())); 264 Immediate(masm->isolate()->factory()->fixed_double_array_map()));
589 __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi); 265 __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
590 __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset)); 266 __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
591 // Replace receiver's backing store with newly created FixedDoubleArray. 267 // Replace receiver's backing store with newly created FixedDoubleArray.
592 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); 268 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
593 __ mov(ebx, eax); 269 __ mov(ebx, eax);
594 __ RecordWriteField(edx, 270 __ RecordWriteField(edx,
595 JSObject::kElementsOffset, 271 JSObject::kElementsOffset,
596 ebx, 272 ebx,
597 edi, 273 edi,
598 kDontSaveFPRegs,
599 EMIT_REMEMBERED_SET, 274 EMIT_REMEMBERED_SET,
600 OMIT_SMI_CHECK); 275 OMIT_SMI_CHECK);
601 276
602 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); 277 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
603 278
604 // Prepare for conversion loop. 279 // Prepare for conversion loop.
605 ExternalReference canonical_the_hole_nan_reference = 280 ExternalReference canonical_the_hole_nan_reference =
606 ExternalReference::address_of_the_hole_nan(); 281 ExternalReference::address_of_the_hole_nan();
607 XMMRegister the_hole_nan = xmm1;
608 __ movsd(the_hole_nan,
609 Operand::StaticVariable(canonical_the_hole_nan_reference));
610 __ jmp(&entry); 282 __ jmp(&entry);
611 283
612 // Call into runtime if GC is required. 284 // Call into runtime if GC is required.
613 __ bind(&gc_required); 285 __ bind(&gc_required);
614 // Restore registers before jumping into runtime. 286 // Restore registers before jumping into runtime.
615 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 287 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
616 __ pop(ebx); 288 __ pop(ebx);
617 __ pop(eax); 289 __ pop(eax);
618 __ jmp(fail); 290 __ jmp(fail);
619 291
620 // Convert and copy elements 292 // Convert and copy elements
621 // esi: source FixedArray 293 // esi: source FixedArray
622 __ bind(&loop); 294 __ bind(&loop);
623 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); 295 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
624 // ebx: current element from source 296 // ebx: current element from source
625 // edi: index of current element 297 // edi: index of current element
626 __ JumpIfNotSmi(ebx, &convert_hole); 298 __ JumpIfNotSmi(ebx, &convert_hole);
627 299
628 // Normal smi, convert it to double and store. 300 // Normal smi, convert it to double and store.
629 __ SmiUntag(ebx); 301 __ SmiUntag(ebx);
630 __ Cvtsi2sd(xmm0, ebx); 302 __ push(ebx);
631 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), 303 __ fild_s(Operand(esp, 0));
632 xmm0); 304 __ pop(ebx);
305 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
633 __ jmp(&entry); 306 __ jmp(&entry);
634 307
635 // Found hole, store hole_nan_as_double instead. 308 // Found hole, store hole_nan_as_double instead.
636 __ bind(&convert_hole); 309 __ bind(&convert_hole);
637 310
638 if (FLAG_debug_code) { 311 if (FLAG_debug_code) {
639 __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); 312 __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
640 __ Assert(equal, kObjectFoundInSmiOnlyArray); 313 __ Assert(equal, kObjectFoundInSmiOnlyArray);
641 } 314 }
642 315
643 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), 316 __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
644 the_hole_nan); 317 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
645 318
646 __ bind(&entry); 319 __ bind(&entry);
647 __ sub(edi, Immediate(Smi::FromInt(1))); 320 __ sub(edi, Immediate(Smi::FromInt(1)));
648 __ j(not_sign, &loop); 321 __ j(not_sign, &loop);
649 322
650 __ pop(ebx); 323 __ pop(ebx);
651 __ pop(eax); 324 __ pop(eax);
652 325
653 // Restore esi. 326 // Restore esi.
654 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 327 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
655 328
656 __ bind(&only_change_map); 329 __ bind(&only_change_map);
657 // eax: value 330 // eax: value
658 // ebx: target map 331 // ebx: target map
659 // Set transitioned map. 332 // Set transitioned map.
660 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 333 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
661 __ RecordWriteField(edx, 334 __ RecordWriteField(edx,
662 HeapObject::kMapOffset, 335 HeapObject::kMapOffset,
663 ebx, 336 ebx,
664 edi, 337 edi,
665 kDontSaveFPRegs,
666 OMIT_REMEMBERED_SET, 338 OMIT_REMEMBERED_SET,
667 OMIT_SMI_CHECK); 339 OMIT_SMI_CHECK);
668 } 340 }
669 341
670 342
671 void ElementsTransitionGenerator::GenerateDoubleToObject( 343 void ElementsTransitionGenerator::GenerateDoubleToObject(
672 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 344 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
673 // ----------- S t a t e ------------- 345 // ----------- S t a t e -------------
674 // -- eax : value 346 // -- eax : value
675 // -- ebx : target map 347 // -- ebx : target map
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
711 383
712 // ebx: target map 384 // ebx: target map
713 // edx: receiver 385 // edx: receiver
714 // Set transitioned map. 386 // Set transitioned map.
715 __ bind(&only_change_map); 387 __ bind(&only_change_map);
716 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 388 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
717 __ RecordWriteField(edx, 389 __ RecordWriteField(edx,
718 HeapObject::kMapOffset, 390 HeapObject::kMapOffset,
719 ebx, 391 ebx,
720 edi, 392 edi,
721 kDontSaveFPRegs,
722 OMIT_REMEMBERED_SET, 393 OMIT_REMEMBERED_SET,
723 OMIT_SMI_CHECK); 394 OMIT_SMI_CHECK);
724 __ jmp(&success); 395 __ jmp(&success);
725 396
726 // Call into runtime if GC is required. 397 // Call into runtime if GC is required.
727 __ bind(&gc_required); 398 __ bind(&gc_required);
728 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 399 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
729 __ pop(ebx); 400 __ pop(ebx);
730 __ pop(edx); 401 __ pop(edx);
731 __ pop(eax); 402 __ pop(eax);
732 __ jmp(fail); 403 __ jmp(fail);
733 404
734 // Box doubles into heap numbers. 405 // Box doubles into heap numbers.
735 // edi: source FixedDoubleArray 406 // edi: source FixedDoubleArray
736 // eax: destination FixedArray 407 // eax: destination FixedArray
737 __ bind(&loop); 408 __ bind(&loop);
738 // ebx: index of current element (smi-tagged) 409 // ebx: index of current element (smi-tagged)
739 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); 410 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
740 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); 411 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
741 __ j(equal, &convert_hole); 412 __ j(equal, &convert_hole);
742 413
743 // Non-hole double, copy value into a heap number. 414 // Non-hole double, copy value into a heap number.
744 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); 415 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
745 // edx: new heap number 416 // edx: new heap number
746 __ movsd(xmm0, 417 __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
747 FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); 418 __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
748 __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); 419 __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
420 __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
749 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); 421 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
750 __ mov(esi, ebx); 422 __ mov(esi, ebx);
751 __ RecordWriteArray(eax, 423 __ RecordWriteArray(eax,
752 edx, 424 edx,
753 esi, 425 esi,
754 kDontSaveFPRegs,
755 EMIT_REMEMBERED_SET, 426 EMIT_REMEMBERED_SET,
756 OMIT_SMI_CHECK); 427 OMIT_SMI_CHECK);
757 __ jmp(&entry, Label::kNear); 428 __ jmp(&entry, Label::kNear);
758 429
759 // Replace the-hole NaN with the-hole pointer. 430 // Replace the-hole NaN with the-hole pointer.
760 __ bind(&convert_hole); 431 __ bind(&convert_hole);
761 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), 432 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
762 masm->isolate()->factory()->the_hole_value()); 433 masm->isolate()->factory()->the_hole_value());
763 434
764 __ bind(&entry); 435 __ bind(&entry);
765 __ sub(ebx, Immediate(Smi::FromInt(1))); 436 __ sub(ebx, Immediate(Smi::FromInt(1)));
766 __ j(not_sign, &loop); 437 __ j(not_sign, &loop);
767 438
768 __ pop(ebx); 439 __ pop(ebx);
769 __ pop(edx); 440 __ pop(edx);
770 // ebx: target map 441 // ebx: target map
771 // edx: receiver 442 // edx: receiver
772 // Set transitioned map. 443 // Set transitioned map.
773 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 444 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
774 __ RecordWriteField(edx, 445 __ RecordWriteField(edx,
775 HeapObject::kMapOffset, 446 HeapObject::kMapOffset,
776 ebx, 447 ebx,
777 edi, 448 edi,
778 kDontSaveFPRegs,
779 OMIT_REMEMBERED_SET, 449 OMIT_REMEMBERED_SET,
780 OMIT_SMI_CHECK); 450 OMIT_SMI_CHECK);
781 // Replace receiver's backing store with newly created and filled FixedArray. 451 // Replace receiver's backing store with newly created and filled FixedArray.
782 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); 452 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
783 __ RecordWriteField(edx, 453 __ RecordWriteField(edx,
784 JSObject::kElementsOffset, 454 JSObject::kElementsOffset,
785 eax, 455 eax,
786 edi, 456 edi,
787 kDontSaveFPRegs,
788 EMIT_REMEMBERED_SET, 457 EMIT_REMEMBERED_SET,
789 OMIT_SMI_CHECK); 458 OMIT_SMI_CHECK);
790 459
791 // Restore registers. 460 // Restore registers.
792 __ pop(eax); 461 __ pop(eax);
793 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 462 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
794 463
795 __ bind(&success); 464 __ bind(&success);
796 } 465 }
797 466
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 // Load the byte into the result register. 562 // Load the byte into the result register.
894 __ bind(&ascii); 563 __ bind(&ascii);
895 __ movzx_b(result, FieldOperand(string, 564 __ movzx_b(result, FieldOperand(string,
896 index, 565 index,
897 times_1, 566 times_1,
898 SeqOneByteString::kHeaderSize)); 567 SeqOneByteString::kHeaderSize));
899 __ bind(&done); 568 __ bind(&done);
900 } 569 }
901 570
902 571
903 static Operand ExpConstant(int index) {
904 return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
905 }
906
907
908 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
909 XMMRegister input,
910 XMMRegister result,
911 XMMRegister double_scratch,
912 Register temp1,
913 Register temp2) {
914 ASSERT(!input.is(double_scratch));
915 ASSERT(!input.is(result));
916 ASSERT(!result.is(double_scratch));
917 ASSERT(!temp1.is(temp2));
918 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
919
920 Label done;
921
922 __ movsd(double_scratch, ExpConstant(0));
923 __ xorpd(result, result);
924 __ ucomisd(double_scratch, input);
925 __ j(above_equal, &done);
926 __ ucomisd(input, ExpConstant(1));
927 __ movsd(result, ExpConstant(2));
928 __ j(above_equal, &done);
929 __ movsd(double_scratch, ExpConstant(3));
930 __ movsd(result, ExpConstant(4));
931 __ mulsd(double_scratch, input);
932 __ addsd(double_scratch, result);
933 __ movd(temp2, double_scratch);
934 __ subsd(double_scratch, result);
935 __ movsd(result, ExpConstant(6));
936 __ mulsd(double_scratch, ExpConstant(5));
937 __ subsd(double_scratch, input);
938 __ subsd(result, double_scratch);
939 __ movsd(input, double_scratch);
940 __ mulsd(input, double_scratch);
941 __ mulsd(result, input);
942 __ mov(temp1, temp2);
943 __ mulsd(result, ExpConstant(7));
944 __ subsd(result, double_scratch);
945 __ add(temp1, Immediate(0x1ff800));
946 __ addsd(result, ExpConstant(8));
947 __ and_(temp2, Immediate(0x7ff));
948 __ shr(temp1, 11);
949 __ shl(temp1, 20);
950 __ movd(input, temp1);
951 __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
952 __ movsd(double_scratch, Operand::StaticArray(
953 temp2, times_8, ExternalReference::math_exp_log_table()));
954 __ orps(input, double_scratch);
955 __ mulsd(result, input);
956 __ bind(&done);
957 }
958
959 #undef __ 572 #undef __
960 573
961 574
962 CodeAgingHelper::CodeAgingHelper() { 575 CodeAgingHelper::CodeAgingHelper() {
963 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength); 576 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
964 CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); 577 CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
965 patcher.masm()->push(ebp); 578 patcher.masm()->push(ebp);
966 patcher.masm()->mov(ebp, esp); 579 patcher.masm()->mov(ebp, esp);
967 patcher.masm()->push(esi); 580 patcher.masm()->push(esi);
968 patcher.masm()->push(edi); 581 patcher.masm()->push(edi);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1009 } else { 622 } else {
1010 Code* stub = GetCodeAgeStub(isolate, age, parity); 623 Code* stub = GetCodeAgeStub(isolate, age, parity);
1011 CodePatcher patcher(sequence, young_length); 624 CodePatcher patcher(sequence, young_length);
1012 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); 625 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1013 } 626 }
1014 } 627 }
1015 628
1016 629
1017 } } // namespace v8::internal 630 } } // namespace v8::internal
1018 631
1019 #endif // V8_TARGET_ARCH_IA32 632 #endif // V8_TARGET_ARCH_X87
OLDNEW
« no previous file with comments | « src/x87/codegen-x87.h ('k') | src/x87/cpu-x87.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698