Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1001)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 3195022: Move code stubs from codegen*.* files to code-stub*.* files. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/code-stubs-x64.h ('k') | src/x64/codegen-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if defined(V8_TARGET_ARCH_X64)
31
32 #include "bootstrapper.h"
33 #include "code-stubs-x64.h"
34 #include "codegen-inl.h"
35 #include "regexp-macro-assembler.h"
36
37 namespace v8 {
38 namespace internal {
39
40 #define __ ACCESS_MASM(masm)
41 void FastNewClosureStub::Generate(MacroAssembler* masm) {
42 // Create a new closure from the given function info in new
43 // space. Set the context to the current context in rsi.
44 Label gc;
45 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
46
47 // Get the function info from the stack.
48 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
49
50 // Compute the function map in the current global context and set that
51 // as the map of the allocated object.
52 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
53 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
54 __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
55 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
56
57 // Initialize the rest of the function. We don't have to update the
58 // write barrier because the allocated object is in new space.
59 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
60 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
61 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
62 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
63 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
64 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
65 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
66 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
67
68 // Initialize the code pointer in the function to be the one
69 // found in the shared function info object.
70 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
71 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
72 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
73
74
75 // Return and remove the on-stack parameter.
76 __ ret(1 * kPointerSize);
77
78 // Create a new closure through the slower runtime call.
79 __ bind(&gc);
80 __ pop(rcx); // Temporarily remove return address.
81 __ pop(rdx);
82 __ push(rsi);
83 __ push(rdx);
84 __ push(rcx); // Restore return address.
85 __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
86 }
87
88
89 void FastNewContextStub::Generate(MacroAssembler* masm) {
90 // Try to allocate the context in new space.
91 Label gc;
92 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
93 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
94 rax, rbx, rcx, &gc, TAG_OBJECT);
95
96 // Get the function from the stack.
97 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
98
99 // Setup the object header.
100 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
101 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
102 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
103
104 // Setup the fixed slots.
105 __ xor_(rbx, rbx); // Set to NULL.
106 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
107 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
108 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
109 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
110
111 // Copy the global object from the surrounding context.
112 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
113 __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
114
115 // Initialize the rest of the slots to undefined.
116 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
117 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
118 __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
119 }
120
121 // Return and remove the on-stack parameter.
122 __ movq(rsi, rax);
123 __ ret(1 * kPointerSize);
124
125 // Need to collect. Call into runtime system.
126 __ bind(&gc);
127 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
128 }
129
130
131 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
132 // Stack layout on entry:
133 //
134 // [rsp + kPointerSize]: constant elements.
135 // [rsp + (2 * kPointerSize)]: literal index.
136 // [rsp + (3 * kPointerSize)]: literals array.
137
138 // All sizes here are multiples of kPointerSize.
139 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
140 int size = JSArray::kSize + elements_size;
141
142 // Load boilerplate object into rcx and check if we need to create a
143 // boilerplate.
144 Label slow_case;
145 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
146 __ movq(rax, Operand(rsp, 2 * kPointerSize));
147 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
148 __ movq(rcx,
149 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
150 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
151 __ j(equal, &slow_case);
152
153 if (FLAG_debug_code) {
154 const char* message;
155 Heap::RootListIndex expected_map_index;
156 if (mode_ == CLONE_ELEMENTS) {
157 message = "Expected (writable) fixed array";
158 expected_map_index = Heap::kFixedArrayMapRootIndex;
159 } else {
160 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
161 message = "Expected copy-on-write fixed array";
162 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
163 }
164 __ push(rcx);
165 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
166 __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
167 expected_map_index);
168 __ Assert(equal, message);
169 __ pop(rcx);
170 }
171
172 // Allocate both the JS array and the elements array in one big
173 // allocation. This avoids multiple limit checks.
174 __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
175
176 // Copy the JS array part.
177 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
178 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
179 __ movq(rbx, FieldOperand(rcx, i));
180 __ movq(FieldOperand(rax, i), rbx);
181 }
182 }
183
184 if (length_ > 0) {
185 // Get hold of the elements array of the boilerplate and setup the
186 // elements pointer in the resulting object.
187 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
188 __ lea(rdx, Operand(rax, JSArray::kSize));
189 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
190
191 // Copy the elements array.
192 for (int i = 0; i < elements_size; i += kPointerSize) {
193 __ movq(rbx, FieldOperand(rcx, i));
194 __ movq(FieldOperand(rdx, i), rbx);
195 }
196 }
197
198 // Return and remove the on-stack parameters.
199 __ ret(3 * kPointerSize);
200
201 __ bind(&slow_case);
202 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
203 }
204
205
206 void ToBooleanStub::Generate(MacroAssembler* masm) {
207 Label false_result, true_result, not_string;
208 __ movq(rax, Operand(rsp, 1 * kPointerSize));
209
210 // 'null' => false.
211 __ CompareRoot(rax, Heap::kNullValueRootIndex);
212 __ j(equal, &false_result);
213
214 // Get the map and type of the heap object.
215 // We don't use CmpObjectType because we manipulate the type field.
216 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
217 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
218
219 // Undetectable => false.
220 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
221 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
222 __ j(not_zero, &false_result);
223
224 // JavaScript object => true.
225 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
226 __ j(above_equal, &true_result);
227
228 // String value => false iff empty.
229 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
230 __ j(above_equal, &not_string);
231 __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
232 __ SmiTest(rdx);
233 __ j(zero, &false_result);
234 __ jmp(&true_result);
235
236 __ bind(&not_string);
237 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
238 __ j(not_equal, &true_result);
239 // HeapNumber => false iff +0, -0, or NaN.
240 // These three cases set the zero flag when compared to zero using ucomisd.
241 __ xorpd(xmm0, xmm0);
242 __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
243 __ j(zero, &false_result);
244 // Fall through to |true_result|.
245
246 // Return 1/0 for true/false in rax.
247 __ bind(&true_result);
248 __ movq(rax, Immediate(1));
249 __ ret(1 * kPointerSize);
250 __ bind(&false_result);
251 __ xor_(rax, rax);
252 __ ret(1 * kPointerSize);
253 }
254
255
256 const char* GenericBinaryOpStub::GetName() {
257 if (name_ != NULL) return name_;
258 const int kMaxNameLength = 100;
259 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
260 if (name_ == NULL) return "OOM";
261 const char* op_name = Token::Name(op_);
262 const char* overwrite_name;
263 switch (mode_) {
264 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
265 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
266 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
267 default: overwrite_name = "UnknownOverwrite"; break;
268 }
269
270 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
271 "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
272 op_name,
273 overwrite_name,
274 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
275 args_in_registers_ ? "RegArgs" : "StackArgs",
276 args_reversed_ ? "_R" : "",
277 static_operands_type_.ToString(),
278 BinaryOpIC::GetName(runtime_operands_type_));
279 return name_;
280 }
281
282
283 void GenericBinaryOpStub::GenerateCall(
284 MacroAssembler* masm,
285 Register left,
286 Register right) {
287 if (!ArgsInRegistersSupported()) {
288 // Pass arguments on the stack.
289 __ push(left);
290 __ push(right);
291 } else {
292 // The calling convention with registers is left in rdx and right in rax.
293 Register left_arg = rdx;
294 Register right_arg = rax;
295 if (!(left.is(left_arg) && right.is(right_arg))) {
296 if (left.is(right_arg) && right.is(left_arg)) {
297 if (IsOperationCommutative()) {
298 SetArgsReversed();
299 } else {
300 __ xchg(left, right);
301 }
302 } else if (left.is(left_arg)) {
303 __ movq(right_arg, right);
304 } else if (right.is(right_arg)) {
305 __ movq(left_arg, left);
306 } else if (left.is(right_arg)) {
307 if (IsOperationCommutative()) {
308 __ movq(left_arg, right);
309 SetArgsReversed();
310 } else {
311 // Order of moves important to avoid destroying left argument.
312 __ movq(left_arg, left);
313 __ movq(right_arg, right);
314 }
315 } else if (right.is(left_arg)) {
316 if (IsOperationCommutative()) {
317 __ movq(right_arg, left);
318 SetArgsReversed();
319 } else {
320 // Order of moves important to avoid destroying right argument.
321 __ movq(right_arg, right);
322 __ movq(left_arg, left);
323 }
324 } else {
325 // Order of moves is not important.
326 __ movq(left_arg, left);
327 __ movq(right_arg, right);
328 }
329 }
330
331 // Update flags to indicate that arguments are in registers.
332 SetArgsInRegisters();
333 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
334 }
335
336 // Call the stub.
337 __ CallStub(this);
338 }
339
340
341 void GenericBinaryOpStub::GenerateCall(
342 MacroAssembler* masm,
343 Register left,
344 Smi* right) {
345 if (!ArgsInRegistersSupported()) {
346 // Pass arguments on the stack.
347 __ push(left);
348 __ Push(right);
349 } else {
350 // The calling convention with registers is left in rdx and right in rax.
351 Register left_arg = rdx;
352 Register right_arg = rax;
353 if (left.is(left_arg)) {
354 __ Move(right_arg, right);
355 } else if (left.is(right_arg) && IsOperationCommutative()) {
356 __ Move(left_arg, right);
357 SetArgsReversed();
358 } else {
359 // For non-commutative operations, left and right_arg might be
360 // the same register. Therefore, the order of the moves is
361 // important here in order to not overwrite left before moving
362 // it to left_arg.
363 __ movq(left_arg, left);
364 __ Move(right_arg, right);
365 }
366
367 // Update flags to indicate that arguments are in registers.
368 SetArgsInRegisters();
369 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
370 }
371
372 // Call the stub.
373 __ CallStub(this);
374 }
375
376
377 void GenericBinaryOpStub::GenerateCall(
378 MacroAssembler* masm,
379 Smi* left,
380 Register right) {
381 if (!ArgsInRegistersSupported()) {
382 // Pass arguments on the stack.
383 __ Push(left);
384 __ push(right);
385 } else {
386 // The calling convention with registers is left in rdx and right in rax.
387 Register left_arg = rdx;
388 Register right_arg = rax;
389 if (right.is(right_arg)) {
390 __ Move(left_arg, left);
391 } else if (right.is(left_arg) && IsOperationCommutative()) {
392 __ Move(right_arg, left);
393 SetArgsReversed();
394 } else {
395 // For non-commutative operations, right and left_arg might be
396 // the same register. Therefore, the order of the moves is
397 // important here in order to not overwrite right before moving
398 // it to right_arg.
399 __ movq(right_arg, right);
400 __ Move(left_arg, left);
401 }
402 // Update flags to indicate that arguments are in registers.
403 SetArgsInRegisters();
404 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
405 }
406
407 // Call the stub.
408 __ CallStub(this);
409 }
410
411
412 class FloatingPointHelper : public AllStatic {
413 public:
414 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
415 // If the operands are not both numbers, jump to not_numbers.
416 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
417 // NumberOperands assumes both are smis or heap numbers.
418 static void LoadSSE2SmiOperands(MacroAssembler* masm);
419 static void LoadSSE2NumberOperands(MacroAssembler* masm);
420 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
421 Label* not_numbers);
422
423 // Takes the operands in rdx and rax and loads them as integers in rax
424 // and rcx.
425 static void LoadAsIntegers(MacroAssembler* masm,
426 Label* operand_conversion_failure,
427 Register heap_number_map);
428 // As above, but we know the operands to be numbers. In that case,
429 // conversion can't fail.
430 static void LoadNumbersAsIntegers(MacroAssembler* masm);
431 };
432
433
434 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
435 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
436 // dividend in rax and rdx free for the division. Use rax, rbx for those.
437 Comment load_comment(masm, "-- Load arguments");
438 Register left = rdx;
439 Register right = rax;
440 if (op_ == Token::DIV || op_ == Token::MOD) {
441 left = rax;
442 right = rbx;
443 if (HasArgsInRegisters()) {
444 __ movq(rbx, rax);
445 __ movq(rax, rdx);
446 }
447 }
448 if (!HasArgsInRegisters()) {
449 __ movq(right, Operand(rsp, 1 * kPointerSize));
450 __ movq(left, Operand(rsp, 2 * kPointerSize));
451 }
452
453 Label not_smis;
454 // 2. Smi check both operands.
455 if (static_operands_type_.IsSmi()) {
456 // Skip smi check if we know that both arguments are smis.
457 if (FLAG_debug_code) {
458 __ AbortIfNotSmi(left);
459 __ AbortIfNotSmi(right);
460 }
461 if (op_ == Token::BIT_OR) {
462 // Handle OR here, since we do extra smi-checking in the or code below.
463 __ SmiOr(right, right, left);
464 GenerateReturn(masm);
465 return;
466 }
467 } else {
468 if (op_ != Token::BIT_OR) {
469 // Skip the check for OR as it is better combined with the
470 // actual operation.
471 Comment smi_check_comment(masm, "-- Smi check arguments");
472 __ JumpIfNotBothSmi(left, right, &not_smis);
473 }
474 }
475
476 // 3. Operands are both smis (except for OR), perform the operation leaving
477 // the result in rax and check the result if necessary.
478 Comment perform_smi(masm, "-- Perform smi operation");
479 Label use_fp_on_smis;
480 switch (op_) {
481 case Token::ADD: {
482 ASSERT(right.is(rax));
483 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
484 break;
485 }
486
487 case Token::SUB: {
488 __ SmiSub(left, left, right, &use_fp_on_smis);
489 __ movq(rax, left);
490 break;
491 }
492
493 case Token::MUL:
494 ASSERT(right.is(rax));
495 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
496 break;
497
498 case Token::DIV:
499 ASSERT(left.is(rax));
500 __ SmiDiv(left, left, right, &use_fp_on_smis);
501 break;
502
503 case Token::MOD:
504 ASSERT(left.is(rax));
505 __ SmiMod(left, left, right, slow);
506 break;
507
508 case Token::BIT_OR:
509 ASSERT(right.is(rax));
510 __ movq(rcx, right); // Save the right operand.
511 __ SmiOr(right, right, left); // BIT_OR is commutative.
512 __ testb(right, Immediate(kSmiTagMask));
513 __ j(not_zero, &not_smis);
514 break;
515
516 case Token::BIT_AND:
517 ASSERT(right.is(rax));
518 __ SmiAnd(right, right, left); // BIT_AND is commutative.
519 break;
520
521 case Token::BIT_XOR:
522 ASSERT(right.is(rax));
523 __ SmiXor(right, right, left); // BIT_XOR is commutative.
524 break;
525
526 case Token::SHL:
527 case Token::SHR:
528 case Token::SAR:
529 switch (op_) {
530 case Token::SAR:
531 __ SmiShiftArithmeticRight(left, left, right);
532 break;
533 case Token::SHR:
534 __ SmiShiftLogicalRight(left, left, right, slow);
535 break;
536 case Token::SHL:
537 __ SmiShiftLeft(left, left, right);
538 break;
539 default:
540 UNREACHABLE();
541 }
542 __ movq(rax, left);
543 break;
544
545 default:
546 UNREACHABLE();
547 break;
548 }
549
550 // 4. Emit return of result in rax.
551 GenerateReturn(masm);
552
553 // 5. For some operations emit inline code to perform floating point
554 // operations on known smis (e.g., if the result of the operation
555 // overflowed the smi range).
556 switch (op_) {
557 case Token::ADD:
558 case Token::SUB:
559 case Token::MUL:
560 case Token::DIV: {
561 ASSERT(use_fp_on_smis.is_linked());
562 __ bind(&use_fp_on_smis);
563 if (op_ == Token::DIV) {
564 __ movq(rdx, rax);
565 __ movq(rax, rbx);
566 }
567 // left is rdx, right is rax.
568 __ AllocateHeapNumber(rbx, rcx, slow);
569 FloatingPointHelper::LoadSSE2SmiOperands(masm);
570 switch (op_) {
571 case Token::ADD: __ addsd(xmm0, xmm1); break;
572 case Token::SUB: __ subsd(xmm0, xmm1); break;
573 case Token::MUL: __ mulsd(xmm0, xmm1); break;
574 case Token::DIV: __ divsd(xmm0, xmm1); break;
575 default: UNREACHABLE();
576 }
577 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
578 __ movq(rax, rbx);
579 GenerateReturn(masm);
580 }
581 default:
582 break;
583 }
584
585 // 6. Non-smi operands, fall out to the non-smi code with the operands in
586 // rdx and rax.
587 Comment done_comment(masm, "-- Enter non-smi code");
588 __ bind(&not_smis);
589
590 switch (op_) {
591 case Token::DIV:
592 case Token::MOD:
593 // Operands are in rax, rbx at this point.
594 __ movq(rdx, rax);
595 __ movq(rax, rbx);
596 break;
597
598 case Token::BIT_OR:
599 // Right operand is saved in rcx and rax was destroyed by the smi
600 // operation.
601 __ movq(rax, rcx);
602 break;
603
604 default:
605 break;
606 }
607 }
608
609
610 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
611 Label call_runtime;
612
613 if (ShouldGenerateSmiCode()) {
614 GenerateSmiCode(masm, &call_runtime);
615 } else if (op_ != Token::MOD) {
616 if (!HasArgsInRegisters()) {
617 GenerateLoadArguments(masm);
618 }
619 }
620 // Floating point case.
621 if (ShouldGenerateFPCode()) {
622 switch (op_) {
623 case Token::ADD:
624 case Token::SUB:
625 case Token::MUL:
626 case Token::DIV: {
627 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
628 HasSmiCodeInStub()) {
629 // Execution reaches this point when the first non-smi argument occurs
630 // (and only if smi code is generated). This is the right moment to
631 // patch to HEAP_NUMBERS state. The transition is attempted only for
632 // the four basic operations. The stub stays in the DEFAULT state
633 // forever for all other operations (also if smi code is skipped).
634 GenerateTypeTransition(masm);
635 break;
636 }
637
638 Label not_floats;
639 // rax: y
640 // rdx: x
641 if (static_operands_type_.IsNumber()) {
642 if (FLAG_debug_code) {
643 // Assert at runtime that inputs are only numbers.
644 __ AbortIfNotNumber(rdx);
645 __ AbortIfNotNumber(rax);
646 }
647 FloatingPointHelper::LoadSSE2NumberOperands(masm);
648 } else {
649 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
650 }
651
652 switch (op_) {
653 case Token::ADD: __ addsd(xmm0, xmm1); break;
654 case Token::SUB: __ subsd(xmm0, xmm1); break;
655 case Token::MUL: __ mulsd(xmm0, xmm1); break;
656 case Token::DIV: __ divsd(xmm0, xmm1); break;
657 default: UNREACHABLE();
658 }
659 // Allocate a heap number, if needed.
660 Label skip_allocation;
661 OverwriteMode mode = mode_;
662 if (HasArgsReversed()) {
663 if (mode == OVERWRITE_RIGHT) {
664 mode = OVERWRITE_LEFT;
665 } else if (mode == OVERWRITE_LEFT) {
666 mode = OVERWRITE_RIGHT;
667 }
668 }
669 switch (mode) {
670 case OVERWRITE_LEFT:
671 __ JumpIfNotSmi(rdx, &skip_allocation);
672 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
673 __ movq(rdx, rbx);
674 __ bind(&skip_allocation);
675 __ movq(rax, rdx);
676 break;
677 case OVERWRITE_RIGHT:
678 // If the argument in rax is already an object, we skip the
679 // allocation of a heap number.
680 __ JumpIfNotSmi(rax, &skip_allocation);
681 // Fall through!
682 case NO_OVERWRITE:
683 // Allocate a heap number for the result. Keep rax and rdx intact
684 // for the possible runtime call.
685 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
686 __ movq(rax, rbx);
687 __ bind(&skip_allocation);
688 break;
689 default: UNREACHABLE();
690 }
691 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
692 GenerateReturn(masm);
693 __ bind(&not_floats);
694 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
695 !HasSmiCodeInStub()) {
696 // Execution reaches this point when the first non-number argument
697 // occurs (and only if smi code is skipped from the stub, otherwise
698 // the patching has already been done earlier in this case branch).
699 // A perfect moment to try patching to STRINGS for ADD operation.
700 if (op_ == Token::ADD) {
701 GenerateTypeTransition(masm);
702 }
703 }
704 break;
705 }
706 case Token::MOD: {
707 // For MOD we go directly to runtime in the non-smi case.
708 break;
709 }
710 case Token::BIT_OR:
711 case Token::BIT_AND:
712 case Token::BIT_XOR:
713 case Token::SAR:
714 case Token::SHL:
715 case Token::SHR: {
716 Label skip_allocation, non_smi_shr_result;
717 Register heap_number_map = r9;
718 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
719 if (static_operands_type_.IsNumber()) {
720 if (FLAG_debug_code) {
721 // Assert at runtime that inputs are only numbers.
722 __ AbortIfNotNumber(rdx);
723 __ AbortIfNotNumber(rax);
724 }
725 FloatingPointHelper::LoadNumbersAsIntegers(masm);
726 } else {
727 FloatingPointHelper::LoadAsIntegers(masm,
728 &call_runtime,
729 heap_number_map);
730 }
731 switch (op_) {
732 case Token::BIT_OR: __ orl(rax, rcx); break;
733 case Token::BIT_AND: __ andl(rax, rcx); break;
734 case Token::BIT_XOR: __ xorl(rax, rcx); break;
735 case Token::SAR: __ sarl_cl(rax); break;
736 case Token::SHL: __ shll_cl(rax); break;
737 case Token::SHR: {
738 __ shrl_cl(rax);
739 // Check if result is negative. This can only happen for a shift
740 // by zero.
741 __ testl(rax, rax);
742 __ j(negative, &non_smi_shr_result);
743 break;
744 }
745 default: UNREACHABLE();
746 }
747
748 STATIC_ASSERT(kSmiValueSize == 32);
749 // Tag smi result and return.
750 __ Integer32ToSmi(rax, rax);
751 GenerateReturn(masm);
752
753 // All bit-ops except SHR return a signed int32 that can be
754 // returned immediately as a smi.
755 // We might need to allocate a HeapNumber if we shift a negative
756 // number right by zero (i.e., convert to UInt32).
757 if (op_ == Token::SHR) {
758 ASSERT(non_smi_shr_result.is_linked());
759 __ bind(&non_smi_shr_result);
760 // Allocate a heap number if needed.
761 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
762 switch (mode_) {
763 case OVERWRITE_LEFT:
764 case OVERWRITE_RIGHT:
765 // If the operand was an object, we skip the
766 // allocation of a heap number.
767 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
768 1 * kPointerSize : 2 * kPointerSize));
769 __ JumpIfNotSmi(rax, &skip_allocation);
770 // Fall through!
771 case NO_OVERWRITE:
772 // Allocate heap number in new space.
773 // Not using AllocateHeapNumber macro in order to reuse
774 // already loaded heap_number_map.
775 __ AllocateInNewSpace(HeapNumber::kSize,
776 rax,
777 rcx,
778 no_reg,
779 &call_runtime,
780 TAG_OBJECT);
781 // Set the map.
782 if (FLAG_debug_code) {
783 __ AbortIfNotRootValue(heap_number_map,
784 Heap::kHeapNumberMapRootIndex,
785 "HeapNumberMap register clobbered.");
786 }
787 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
788 heap_number_map);
789 __ bind(&skip_allocation);
790 break;
791 default: UNREACHABLE();
792 }
793 // Store the result in the HeapNumber and return.
794 __ cvtqsi2sd(xmm0, rbx);
795 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
796 GenerateReturn(masm);
797 }
798
799 break;
800 }
801 default: UNREACHABLE(); break;
802 }
803 }
804
805 // If all else fails, use the runtime system to get the correct
806 // result. If arguments was passed in registers now place them on the
807 // stack in the correct order below the return address.
808 __ bind(&call_runtime);
809
810 if (HasArgsInRegisters()) {
811 GenerateRegisterArgsPush(masm);
812 }
813
814 switch (op_) {
815 case Token::ADD: {
816 // Registers containing left and right operands respectively.
817 Register lhs, rhs;
818
819 if (HasArgsReversed()) {
820 lhs = rax;
821 rhs = rdx;
822 } else {
823 lhs = rdx;
824 rhs = rax;
825 }
826
827 // Test for string arguments before calling runtime.
828 Label not_strings, both_strings, not_string1, string1, string1_smi2;
829
830 // If this stub has already generated FP-specific code then the arguments
831 // are already in rdx and rax.
832 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
833 GenerateLoadArguments(masm);
834 }
835
836 Condition is_smi;
837 is_smi = masm->CheckSmi(lhs);
838 __ j(is_smi, &not_string1);
839 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
840 __ j(above_equal, &not_string1);
841
842 // First argument is a a string, test second.
843 is_smi = masm->CheckSmi(rhs);
844 __ j(is_smi, &string1_smi2);
845 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
846 __ j(above_equal, &string1);
847
848 // First and second argument are strings.
849 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
850 __ TailCallStub(&string_add_stub);
851
852 __ bind(&string1_smi2);
853 // First argument is a string, second is a smi. Try to lookup the number
854 // string for the smi in the number string cache.
855 NumberToStringStub::GenerateLookupNumberStringCache(
856 masm, rhs, rbx, rcx, r8, true, &string1);
857
858 // Replace second argument on stack and tailcall string add stub to make
859 // the result.
860 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
861 __ TailCallStub(&string_add_stub);
862
863 // Only first argument is a string.
864 __ bind(&string1);
865 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
866
867 // First argument was not a string, test second.
868 __ bind(&not_string1);
869 is_smi = masm->CheckSmi(rhs);
870 __ j(is_smi, &not_strings);
871 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
872 __ j(above_equal, &not_strings);
873
874 // Only second argument is a string.
875 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
876
877 __ bind(&not_strings);
878 // Neither argument is a string.
879 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
880 break;
881 }
882 case Token::SUB:
883 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
884 break;
885 case Token::MUL:
886 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
887 break;
888 case Token::DIV:
889 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
890 break;
891 case Token::MOD:
892 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
893 break;
894 case Token::BIT_OR:
895 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
896 break;
897 case Token::BIT_AND:
898 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
899 break;
900 case Token::BIT_XOR:
901 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
902 break;
903 case Token::SAR:
904 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
905 break;
906 case Token::SHL:
907 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
908 break;
909 case Token::SHR:
910 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
911 break;
912 default:
913 UNREACHABLE();
914 }
915 }
916
917
918 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
919 ASSERT(!HasArgsInRegisters());
920 __ movq(rax, Operand(rsp, 1 * kPointerSize));
921 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
922 }
923
924
925 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
926 // If arguments are not passed in registers remove them from the stack before
927 // returning.
928 if (!HasArgsInRegisters()) {
929 __ ret(2 * kPointerSize); // Remove both operands
930 } else {
931 __ ret(0);
932 }
933 }
934
935
936 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
937 ASSERT(HasArgsInRegisters());
938 __ pop(rcx);
939 if (HasArgsReversed()) {
940 __ push(rax);
941 __ push(rdx);
942 } else {
943 __ push(rdx);
944 __ push(rax);
945 }
946 __ push(rcx);
947 }
948
949
950 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
951 Label get_result;
952
953 // Ensure the operands are on the stack.
954 if (HasArgsInRegisters()) {
955 GenerateRegisterArgsPush(masm);
956 }
957
958 // Left and right arguments are already on stack.
959 __ pop(rcx); // Save the return address.
960
961 // Push this stub's key.
962 __ Push(Smi::FromInt(MinorKey()));
963
964 // Although the operation and the type info are encoded into the key,
965 // the encoding is opaque, so push them too.
966 __ Push(Smi::FromInt(op_));
967
968 __ Push(Smi::FromInt(runtime_operands_type_));
969
970 __ push(rcx); // The return address.
971
972 // Perform patching to an appropriate fast case and return the result.
973 __ TailCallExternalReference(
974 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
975 5,
976 1);
977 }
978
979
980 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
981 GenericBinaryOpStub stub(key, type_info);
982 return stub.GetCode();
983 }
984
985
986 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
987 // Input on stack:
988 // rsp[8]: argument (should be number).
989 // rsp[0]: return address.
990 Label runtime_call;
991 Label runtime_call_clear_stack;
992 Label input_not_smi;
993 Label loaded;
994 // Test that rax is a number.
995 __ movq(rax, Operand(rsp, kPointerSize));
996 __ JumpIfNotSmi(rax, &input_not_smi);
997 // Input is a smi. Untag and load it onto the FPU stack.
998 // Then load the bits of the double into rbx.
999 __ SmiToInteger32(rax, rax);
1000 __ subq(rsp, Immediate(kPointerSize));
1001 __ cvtlsi2sd(xmm1, rax);
1002 __ movsd(Operand(rsp, 0), xmm1);
1003 __ movq(rbx, xmm1);
1004 __ movq(rdx, xmm1);
1005 __ fld_d(Operand(rsp, 0));
1006 __ addq(rsp, Immediate(kPointerSize));
1007 __ jmp(&loaded);
1008
1009 __ bind(&input_not_smi);
1010 // Check if input is a HeapNumber.
1011 __ Move(rbx, Factory::heap_number_map());
1012 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1013 __ j(not_equal, &runtime_call);
1014 // Input is a HeapNumber. Push it on the FPU stack and load its
1015 // bits into rbx.
1016 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1017 __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
1018 __ movq(rdx, rbx);
1019 __ bind(&loaded);
1020 // ST[0] == double value
1021 // rbx = bits of double value.
1022 // rdx = also bits of double value.
1023 // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1024 // h = h0 = bits ^ (bits >> 32);
1025 // h ^= h >> 16;
1026 // h ^= h >> 8;
1027 // h = h & (cacheSize - 1);
1028 // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1029 __ sar(rdx, Immediate(32));
1030 __ xorl(rdx, rbx);
1031 __ movl(rcx, rdx);
1032 __ movl(rax, rdx);
1033 __ movl(rdi, rdx);
1034 __ sarl(rdx, Immediate(8));
1035 __ sarl(rcx, Immediate(16));
1036 __ sarl(rax, Immediate(24));
1037 __ xorl(rcx, rdx);
1038 __ xorl(rax, rdi);
1039 __ xorl(rcx, rax);
1040 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
1041 __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
1042
1043 // ST[0] == double value.
1044 // rbx = bits of double value.
1045 // rcx = TranscendentalCache::hash(double value).
1046 __ movq(rax, ExternalReference::transcendental_cache_array_address());
1047 // rax points to cache array.
1048 __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
1049 // rax points to the cache for the type type_.
1050 // If NULL, the cache hasn't been initialized yet, so go through runtime.
1051 __ testq(rax, rax);
1052 __ j(zero, &runtime_call_clear_stack);
1053 #ifdef DEBUG
1054 // Check that the layout of cache elements match expectations.
1055 { // NOLINT - doesn't like a single brace on a line.
1056 TranscendentalCache::Element test_elem[2];
1057 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1058 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1059 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1060 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1061 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1062 // Two uint_32's and a pointer per element.
1063 CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
1064 CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1065 CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1066 CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1067 }
1068 #endif
1069 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1070 __ addl(rcx, rcx);
1071 __ lea(rcx, Operand(rax, rcx, times_8, 0));
1072 // Check if cache matches: Double value is stored in uint32_t[2] array.
1073 Label cache_miss;
1074 __ cmpq(rbx, Operand(rcx, 0));
1075 __ j(not_equal, &cache_miss);
1076 // Cache hit!
1077 __ movq(rax, Operand(rcx, 2 * kIntSize));
1078 __ fstp(0); // Clear FPU stack.
1079 __ ret(kPointerSize);
1080
1081 __ bind(&cache_miss);
1082 // Update cache with new value.
1083 Label nan_result;
1084 GenerateOperation(masm, &nan_result);
1085 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1086 __ movq(Operand(rcx, 0), rbx);
1087 __ movq(Operand(rcx, 2 * kIntSize), rax);
1088 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1089 __ ret(kPointerSize);
1090
1091 __ bind(&runtime_call_clear_stack);
1092 __ fstp(0);
1093 __ bind(&runtime_call);
1094 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
1095
1096 __ bind(&nan_result);
1097 __ fstp(0); // Remove argument from FPU stack.
1098 __ LoadRoot(rax, Heap::kNanValueRootIndex);
1099 __ movq(Operand(rcx, 0), rbx);
1100 __ movq(Operand(rcx, 2 * kIntSize), rax);
1101 __ ret(kPointerSize);
1102 }
1103
1104
1105 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1106 switch (type_) {
1107 // Add more cases when necessary.
1108 case TranscendentalCache::SIN: return Runtime::kMath_sin;
1109 case TranscendentalCache::COS: return Runtime::kMath_cos;
1110 default:
1111 UNIMPLEMENTED();
1112 return Runtime::kAbort;
1113 }
1114 }
1115
1116
1117 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
1118 Label* on_nan_result) {
1119 // Registers:
1120 // rbx: Bits of input double. Must be preserved.
1121 // rcx: Pointer to cache entry. Must be preserved.
1122 // st(0): Input double
1123 Label done;
1124 ASSERT(type_ == TranscendentalCache::SIN ||
1125 type_ == TranscendentalCache::COS);
1126 // More transcendental types can be added later.
1127
1128 // Both fsin and fcos require arguments in the range +/-2^63 and
1129 // return NaN for infinities and NaN. They can share all code except
1130 // the actual fsin/fcos operation.
1131 Label in_range;
1132 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1133 // work. We must reduce it to the appropriate range.
1134 __ movq(rdi, rbx);
1135 // Move exponent and sign bits to low bits.
1136 __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1137 // Remove sign bit.
1138 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1139 int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1140 __ cmpl(rdi, Immediate(supported_exponent_limit));
1141 __ j(below, &in_range);
1142 // Check for infinity and NaN. Both return NaN for sin.
1143 __ cmpl(rdi, Immediate(0x7ff));
1144 __ j(equal, on_nan_result);
1145
1146 // Use fpmod to restrict argument to the range +/-2*PI.
1147 __ fldpi();
1148 __ fadd(0);
1149 __ fld(1);
1150 // FPU Stack: input, 2*pi, input.
1151 {
1152 Label no_exceptions;
1153 __ fwait();
1154 __ fnstsw_ax();
1155 // Clear if Illegal Operand or Zero Division exceptions are set.
1156 __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
1157 __ j(zero, &no_exceptions);
1158 __ fnclex();
1159 __ bind(&no_exceptions);
1160 }
1161
1162 // Compute st(0) % st(1)
1163 {
1164 Label partial_remainder_loop;
1165 __ bind(&partial_remainder_loop);
1166 __ fprem1();
1167 __ fwait();
1168 __ fnstsw_ax();
1169 __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
1170 // If C2 is set, computation only has partial result. Loop to
1171 // continue computation.
1172 __ j(not_zero, &partial_remainder_loop);
1173 }
1174 // FPU Stack: input, 2*pi, input % 2*pi
1175 __ fstp(2);
1176 // FPU Stack: input % 2*pi, 2*pi,
1177 __ fstp(0);
1178 // FPU Stack: input % 2*pi
1179 __ bind(&in_range);
1180 switch (type_) {
1181 case TranscendentalCache::SIN:
1182 __ fsin();
1183 break;
1184 case TranscendentalCache::COS:
1185 __ fcos();
1186 break;
1187 default:
1188 UNREACHABLE();
1189 }
1190 __ bind(&done);
1191 }
1192
1193
1194 // Get the integer part of a heap number.
1195 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
1196 void IntegerConvert(MacroAssembler* masm,
1197 Register result,
1198 Register source) {
1199 // Result may be rcx. If result and source are the same register, source will
1200 // be overwritten.
1201 ASSERT(!result.is(rdi) && !result.is(rbx));
1202 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
1203 // cvttsd2si (32-bit version) directly.
1204 Register double_exponent = rbx;
1205 Register double_value = rdi;
1206 Label done, exponent_63_plus;
1207 // Get double and extract exponent.
1208 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
1209 // Clear result preemptively, in case we need to return zero.
1210 __ xorl(result, result);
1211 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
1212 // Double to remove sign bit, shift exponent down to least significant bits.
1213 // and subtract bias to get the unshifted, unbiased exponent.
1214 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
1215 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
1216 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
1217 // Check whether the exponent is too big for a 63 bit unsigned integer.
1218 __ cmpl(double_exponent, Immediate(63));
1219 __ j(above_equal, &exponent_63_plus);
1220 // Handle exponent range 0..62.
1221 __ cvttsd2siq(result, xmm0);
1222 __ jmp(&done);
1223
1224 __ bind(&exponent_63_plus);
1225 // Exponent negative or 63+.
1226 __ cmpl(double_exponent, Immediate(83));
1227 // If exponent negative or above 83, number contains no significant bits in
1228 // the range 0..2^31, so result is zero, and rcx already holds zero.
1229 __ j(above, &done);
1230
1231 // Exponent in rage 63..83.
1232 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
1233 // the least significant exponent-52 bits.
1234
1235 // Negate low bits of mantissa if value is negative.
1236 __ addq(double_value, double_value); // Move sign bit to carry.
1237 __ sbbl(result, result); // And convert carry to -1 in result register.
1238 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
1239 __ addl(double_value, result);
1240 // Do xor in opposite directions depending on where we want the result
1241 // (depending on whether result is rcx or not).
1242
1243 if (result.is(rcx)) {
1244 __ xorl(double_value, result);
1245 // Left shift mantissa by (exponent - mantissabits - 1) to save the
1246 // bits that have positional values below 2^32 (the extra -1 comes from the
1247 // doubling done above to move the sign bit into the carry flag).
1248 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1249 __ shll_cl(double_value);
1250 __ movl(result, double_value);
1251 } else {
1252 // As the then-branch, but move double-value to result before shifting.
1253 __ xorl(result, double_value);
1254 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1255 __ shll_cl(result);
1256 }
1257
1258 __ bind(&done);
1259 }
1260
1261
1262 // Input: rdx, rax are the left and right objects of a bit op.
1263 // Output: rax, rcx are left and right integers for a bit op.
1264 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1265 // Check float operands.
1266 Label done;
1267 Label rax_is_smi;
1268 Label rax_is_object;
1269 Label rdx_is_object;
1270
1271 __ JumpIfNotSmi(rdx, &rdx_is_object);
1272 __ SmiToInteger32(rdx, rdx);
1273 __ JumpIfSmi(rax, &rax_is_smi);
1274
1275 __ bind(&rax_is_object);
1276 IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
1277 __ jmp(&done);
1278
1279 __ bind(&rdx_is_object);
1280 IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
1281 __ JumpIfNotSmi(rax, &rax_is_object);
1282 __ bind(&rax_is_smi);
1283 __ SmiToInteger32(rcx, rax);
1284
1285 __ bind(&done);
1286 __ movl(rax, rdx);
1287 }
1288
1289
1290 // Input: rdx, rax are the left and right objects of a bit op.
1291 // Output: rax, rcx are left and right integers for a bit op.
1292 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1293 Label* conversion_failure,
1294 Register heap_number_map) {
1295 // Check float operands.
1296 Label arg1_is_object, check_undefined_arg1;
1297 Label arg2_is_object, check_undefined_arg2;
1298 Label load_arg2, done;
1299
1300 __ JumpIfNotSmi(rdx, &arg1_is_object);
1301 __ SmiToInteger32(rdx, rdx);
1302 __ jmp(&load_arg2);
1303
1304 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1305 __ bind(&check_undefined_arg1);
1306 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1307 __ j(not_equal, conversion_failure);
1308 __ movl(rdx, Immediate(0));
1309 __ jmp(&load_arg2);
1310
1311 __ bind(&arg1_is_object);
1312 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1313 __ j(not_equal, &check_undefined_arg1);
1314 // Get the untagged integer version of the edx heap number in rcx.
1315 IntegerConvert(masm, rdx, rdx);
1316
1317 // Here rdx has the untagged integer, rax has a Smi or a heap number.
1318 __ bind(&load_arg2);
1319 // Test if arg2 is a Smi.
1320 __ JumpIfNotSmi(rax, &arg2_is_object);
1321 __ SmiToInteger32(rax, rax);
1322 __ movl(rcx, rax);
1323 __ jmp(&done);
1324
1325 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1326 __ bind(&check_undefined_arg2);
1327 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1328 __ j(not_equal, conversion_failure);
1329 __ movl(rcx, Immediate(0));
1330 __ jmp(&done);
1331
1332 __ bind(&arg2_is_object);
1333 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1334 __ j(not_equal, &check_undefined_arg2);
1335 // Get the untagged integer version of the rax heap number in rcx.
1336 IntegerConvert(masm, rcx, rax);
1337 __ bind(&done);
1338 __ movl(rax, rdx);
1339 }
1340
1341
1342 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1343 __ SmiToInteger32(kScratchRegister, rdx);
1344 __ cvtlsi2sd(xmm0, kScratchRegister);
1345 __ SmiToInteger32(kScratchRegister, rax);
1346 __ cvtlsi2sd(xmm1, kScratchRegister);
1347 }
1348
1349
1350 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1351 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1352 // Load operand in rdx into xmm0.
1353 __ JumpIfSmi(rdx, &load_smi_rdx);
1354 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1355 // Load operand in rax into xmm1.
1356 __ JumpIfSmi(rax, &load_smi_rax);
1357 __ bind(&load_nonsmi_rax);
1358 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1359 __ jmp(&done);
1360
1361 __ bind(&load_smi_rdx);
1362 __ SmiToInteger32(kScratchRegister, rdx);
1363 __ cvtlsi2sd(xmm0, kScratchRegister);
1364 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1365
1366 __ bind(&load_smi_rax);
1367 __ SmiToInteger32(kScratchRegister, rax);
1368 __ cvtlsi2sd(xmm1, kScratchRegister);
1369
1370 __ bind(&done);
1371 }
1372
1373
1374 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1375 Label* not_numbers) {
1376 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1377 // Load operand in rdx into xmm0, or branch to not_numbers.
1378 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1379 __ JumpIfSmi(rdx, &load_smi_rdx);
1380 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1381 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1382 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1383 // Load operand in rax into xmm1, or branch to not_numbers.
1384 __ JumpIfSmi(rax, &load_smi_rax);
1385
1386 __ bind(&load_nonsmi_rax);
1387 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1388 __ j(not_equal, not_numbers);
1389 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1390 __ jmp(&done);
1391
1392 __ bind(&load_smi_rdx);
1393 __ SmiToInteger32(kScratchRegister, rdx);
1394 __ cvtlsi2sd(xmm0, kScratchRegister);
1395 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1396
1397 __ bind(&load_smi_rax);
1398 __ SmiToInteger32(kScratchRegister, rax);
1399 __ cvtlsi2sd(xmm1, kScratchRegister);
1400 __ bind(&done);
1401 }
1402
1403
1404 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
1405 Label slow, done;
1406
1407 if (op_ == Token::SUB) {
1408 // Check whether the value is a smi.
1409 Label try_float;
1410 __ JumpIfNotSmi(rax, &try_float);
1411
1412 if (negative_zero_ == kIgnoreNegativeZero) {
1413 __ SmiCompare(rax, Smi::FromInt(0));
1414 __ j(equal, &done);
1415 }
1416
1417 // Enter runtime system if the value of the smi is zero
1418 // to make sure that we switch between 0 and -0.
1419 // Also enter it if the value of the smi is Smi::kMinValue.
1420 __ SmiNeg(rax, rax, &done);
1421
1422 // Either zero or Smi::kMinValue, neither of which become a smi when
1423 // negated.
1424 if (negative_zero_ == kStrictNegativeZero) {
1425 __ SmiCompare(rax, Smi::FromInt(0));
1426 __ j(not_equal, &slow);
1427 __ Move(rax, Factory::minus_zero_value());
1428 __ jmp(&done);
1429 } else {
1430 __ jmp(&slow);
1431 }
1432
1433 // Try floating point case.
1434 __ bind(&try_float);
1435 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
1436 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
1437 __ j(not_equal, &slow);
1438 // Operand is a float, negate its value by flipping sign bit.
1439 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
1440 __ movq(kScratchRegister, Immediate(0x01));
1441 __ shl(kScratchRegister, Immediate(63));
1442 __ xor_(rdx, kScratchRegister); // Flip sign.
1443 // rdx is value to store.
1444 if (overwrite_ == UNARY_OVERWRITE) {
1445 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
1446 } else {
1447 __ AllocateHeapNumber(rcx, rbx, &slow);
1448 // rcx: allocated 'empty' number
1449 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
1450 __ movq(rax, rcx);
1451 }
1452 } else if (op_ == Token::BIT_NOT) {
1453 // Check if the operand is a heap number.
1454 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
1455 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
1456 __ j(not_equal, &slow);
1457
1458 // Convert the heap number in rax to an untagged integer in rcx.
1459 IntegerConvert(masm, rax, rax);
1460
1461 // Do the bitwise operation and smi tag the result.
1462 __ notl(rax);
1463 __ Integer32ToSmi(rax, rax);
1464 }
1465
1466 // Return from the stub.
1467 __ bind(&done);
1468 __ StubReturn(1);
1469
1470 // Handle the slow case by jumping to the JavaScript builtin.
1471 __ bind(&slow);
1472 __ pop(rcx); // pop return address
1473 __ push(rax);
1474 __ push(rcx); // push return address
1475 switch (op_) {
1476 case Token::SUB:
1477 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1478 break;
1479 case Token::BIT_NOT:
1480 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1481 break;
1482 default:
1483 UNREACHABLE();
1484 }
1485 }
1486
1487
1488 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1489 // The key is in rdx and the parameter count is in rax.
1490
1491 // The displacement is used for skipping the frame pointer on the
1492 // stack. It is the offset of the last parameter (if any) relative
1493 // to the frame pointer.
1494 static const int kDisplacement = 1 * kPointerSize;
1495
1496 // Check that the key is a smi.
1497 Label slow;
1498 __ JumpIfNotSmi(rdx, &slow);
1499
1500 // Check if the calling frame is an arguments adaptor frame.
1501 Label adaptor;
1502 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1503 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
1504 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1505 __ j(equal, &adaptor);
1506
1507 // Check index against formal parameters count limit passed in
1508 // through register rax. Use unsigned comparison to get negative
1509 // check for free.
1510 __ cmpq(rdx, rax);
1511 __ j(above_equal, &slow);
1512
1513 // Read the argument from the stack and return it.
1514 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
1515 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
1516 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
1517 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1518 __ Ret();
1519
1520 // Arguments adaptor case: Check index against actual arguments
1521 // limit found in the arguments adaptor frame. Use unsigned
1522 // comparison to get negative check for free.
1523 __ bind(&adaptor);
1524 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1525 __ cmpq(rdx, rcx);
1526 __ j(above_equal, &slow);
1527
1528 // Read the argument from the stack and return it.
1529 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
1530 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
1531 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
1532 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1533 __ Ret();
1534
1535 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1536 // by calling the runtime system.
1537 __ bind(&slow);
1538 __ pop(rbx); // Return address.
1539 __ push(rdx);
1540 __ push(rbx);
1541 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1542 }
1543
1544
1545 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
1546 // rsp[0] : return address
1547 // rsp[8] : number of parameters
1548 // rsp[16] : receiver displacement
1549 // rsp[24] : function
1550
1551 // The displacement is used for skipping the return address and the
1552 // frame pointer on the stack. It is the offset of the last
1553 // parameter (if any) relative to the frame pointer.
1554 static const int kDisplacement = 2 * kPointerSize;
1555
1556 // Check if the calling frame is an arguments adaptor frame.
1557 Label adaptor_frame, try_allocate, runtime;
1558 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1559 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
1560 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1561 __ j(equal, &adaptor_frame);
1562
1563 // Get the length from the frame.
1564 __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
1565 __ jmp(&try_allocate);
1566
1567 // Patch the arguments.length and the parameters pointer.
1568 __ bind(&adaptor_frame);
1569 __ SmiToInteger32(rcx,
1570 Operand(rdx,
1571 ArgumentsAdaptorFrameConstants::kLengthOffset));
1572 // Space on stack must already hold a smi.
1573 __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
1574 // Do not clobber the length index for the indexing operation since
1575 // it is used compute the size for allocation later.
1576 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
1577 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
1578
1579 // Try the new space allocation. Start out with computing the size of
1580 // the arguments object and the elements array.
1581 Label add_arguments_object;
1582 __ bind(&try_allocate);
1583 __ testl(rcx, rcx);
1584 __ j(zero, &add_arguments_object);
1585 __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
1586 __ bind(&add_arguments_object);
1587 __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
1588
1589 // Do the allocation of both objects in one go.
1590 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
1591
1592 // Get the arguments boilerplate from the current (global) context.
1593 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
1594 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1595 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
1596 __ movq(rdi, Operand(rdi, offset));
1597
1598 // Copy the JS object part.
1599 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
1600 __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
1601 __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
1602 __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
1603 __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
1604 __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
1605 __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
1606
1607 // Setup the callee in-object property.
1608 ASSERT(Heap::arguments_callee_index == 0);
1609 __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
1610 __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
1611
1612 // Get the length (smi tagged) and set that as an in-object property too.
1613 ASSERT(Heap::arguments_length_index == 1);
1614 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
1615 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
1616
1617 // If there are no actual arguments, we're done.
1618 Label done;
1619 __ SmiTest(rcx);
1620 __ j(zero, &done);
1621
1622 // Get the parameters pointer from the stack and untag the length.
1623 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
1624
1625 // Setup the elements pointer in the allocated arguments object and
1626 // initialize the header in the elements fixed array.
1627 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
1628 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1629 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
1630 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1631 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1632 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
1633
1634 // Copy the fixed array slots.
1635 Label loop;
1636 __ bind(&loop);
1637 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
1638 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
1639 __ addq(rdi, Immediate(kPointerSize));
1640 __ subq(rdx, Immediate(kPointerSize));
1641 __ decl(rcx);
1642 __ j(not_zero, &loop);
1643
1644 // Return and remove the on-stack parameters.
1645 __ bind(&done);
1646 __ ret(3 * kPointerSize);
1647
1648 // Do the runtime call to allocate the arguments object.
1649 __ bind(&runtime);
1650 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
1651 }
1652
1653
1654 void RegExpExecStub::Generate(MacroAssembler* masm) {
1655 // Just jump directly to runtime if native RegExp is not selected at compile
1656 // time or if regexp entry in generated code is turned off runtime switch or
1657 // at compilation.
1658 #ifdef V8_INTERPRETED_REGEXP
1659 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1660 #else // V8_INTERPRETED_REGEXP
1661 if (!FLAG_regexp_entry_native) {
1662 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1663 return;
1664 }
1665
1666 // Stack frame on entry.
1667 // esp[0]: return address
1668 // esp[8]: last_match_info (expected JSArray)
1669 // esp[16]: previous index
1670 // esp[24]: subject string
1671 // esp[32]: JSRegExp object
1672
1673 static const int kLastMatchInfoOffset = 1 * kPointerSize;
1674 static const int kPreviousIndexOffset = 2 * kPointerSize;
1675 static const int kSubjectOffset = 3 * kPointerSize;
1676 static const int kJSRegExpOffset = 4 * kPointerSize;
1677
1678 Label runtime;
1679
1680 // Ensure that a RegExp stack is allocated.
1681 ExternalReference address_of_regexp_stack_memory_address =
1682 ExternalReference::address_of_regexp_stack_memory_address();
1683 ExternalReference address_of_regexp_stack_memory_size =
1684 ExternalReference::address_of_regexp_stack_memory_size();
1685 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
1686 __ movq(kScratchRegister, Operand(kScratchRegister, 0));
1687 __ testq(kScratchRegister, kScratchRegister);
1688 __ j(zero, &runtime);
1689
1690
1691 // Check that the first argument is a JSRegExp object.
1692 __ movq(rax, Operand(rsp, kJSRegExpOffset));
1693 __ JumpIfSmi(rax, &runtime);
1694 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1695 __ j(not_equal, &runtime);
1696 // Check that the RegExp has been compiled (data contains a fixed array).
1697 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1698 if (FLAG_debug_code) {
1699 Condition is_smi = masm->CheckSmi(rcx);
1700 __ Check(NegateCondition(is_smi),
1701 "Unexpected type for RegExp data, FixedArray expected");
1702 __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
1703 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
1704 }
1705
1706 // rcx: RegExp data (FixedArray)
1707 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1708 __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
1709 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1710 __ j(not_equal, &runtime);
1711
1712 // rcx: RegExp data (FixedArray)
1713 // Check that the number of captures fit in the static offsets vector buffer.
1714 __ SmiToInteger32(rdx,
1715 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1716 // Calculate number of capture registers (number_of_captures + 1) * 2.
1717 __ leal(rdx, Operand(rdx, rdx, times_1, 2));
1718 // Check that the static offsets vector buffer is large enough.
1719 __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
1720 __ j(above, &runtime);
1721
1722 // rcx: RegExp data (FixedArray)
1723 // rdx: Number of capture registers
1724 // Check that the second argument is a string.
1725 __ movq(rax, Operand(rsp, kSubjectOffset));
1726 __ JumpIfSmi(rax, &runtime);
1727 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
1728 __ j(NegateCondition(is_string), &runtime);
1729
1730 // rax: Subject string.
1731 // rcx: RegExp data (FixedArray).
1732 // rdx: Number of capture registers.
1733 // Check that the third argument is a positive smi less than the string
1734 // length. A negative value will be greater (unsigned comparison).
1735 __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
1736 __ JumpIfNotSmi(rbx, &runtime);
1737 __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
1738 __ j(above_equal, &runtime);
1739
1740 // rcx: RegExp data (FixedArray)
1741 // rdx: Number of capture registers
1742 // Check that the fourth object is a JSArray object.
1743 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
1744 __ JumpIfSmi(rax, &runtime);
1745 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
1746 __ j(not_equal, &runtime);
1747 // Check that the JSArray is in fast case.
1748 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
1749 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
1750 __ Cmp(rax, Factory::fixed_array_map());
1751 __ j(not_equal, &runtime);
1752 // Check that the last match info has space for the capture registers and the
1753 // additional information. Ensure no overflow in add.
1754 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1755 __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
1756 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
1757 __ cmpl(rdx, rax);
1758 __ j(greater, &runtime);
1759
1760 // rcx: RegExp data (FixedArray)
1761 // Check the representation and encoding of the subject string.
1762 Label seq_ascii_string, seq_two_byte_string, check_code;
1763 __ movq(rax, Operand(rsp, kSubjectOffset));
1764 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1765 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1766 // First check for flat two byte string.
1767 __ andb(rbx, Immediate(
1768 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
1769 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1770 __ j(zero, &seq_two_byte_string);
1771 // Any other flat string must be a flat ascii string.
1772 __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
1773 __ j(zero, &seq_ascii_string);
1774
1775 // Check for flat cons string.
1776 // A flat cons string is a cons string where the second part is the empty
1777 // string. In that case the subject string is just the first part of the cons
1778 // string. Also in this case the first part of the cons string is known to be
1779 // a sequential string or an external string.
1780 STATIC_ASSERT(kExternalStringTag !=0);
1781 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
1782 __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
1783 __ j(not_zero, &runtime);
1784 // String is a cons string.
1785 __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
1786 __ Cmp(rdx, Factory::empty_string());
1787 __ j(not_equal, &runtime);
1788 __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
1789 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1790 // String is a cons string with empty second part.
1791 // rax: first part of cons string.
1792 // rbx: map of first part of cons string.
1793 // Is first part a flat two byte string?
1794 __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
1795 Immediate(kStringRepresentationMask | kStringEncodingMask));
1796 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1797 __ j(zero, &seq_two_byte_string);
1798 // Any other flat string must be ascii.
1799 __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
1800 Immediate(kStringRepresentationMask));
1801 __ j(not_zero, &runtime);
1802
1803 __ bind(&seq_ascii_string);
1804 // rax: subject string (sequential ascii)
1805 // rcx: RegExp data (FixedArray)
1806 __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
1807 __ Set(rdi, 1); // Type is ascii.
1808 __ jmp(&check_code);
1809
1810 __ bind(&seq_two_byte_string);
1811 // rax: subject string (flat two-byte)
1812 // rcx: RegExp data (FixedArray)
1813 __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
1814 __ Set(rdi, 0); // Type is two byte.
1815
1816 __ bind(&check_code);
1817 // Check that the irregexp code has been generated for the actual string
1818 // encoding. If it has, the field contains a code object otherwise it contains
1819 // the hole.
1820 __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
1821 __ j(not_equal, &runtime);
1822
1823 // rax: subject string
1824 // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
1825 // r11: code
1826 // Load used arguments before starting to push arguments for call to native
1827 // RegExp code to avoid handling changing stack height.
1828 __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
1829
1830 // rax: subject string
1831 // rbx: previous index
1832 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
1833 // r11: code
1834 // All checks done. Now push arguments for native regexp code.
1835 __ IncrementCounter(&Counters::regexp_entry_native, 1);
1836
1837 // rsi is caller save on Windows and used to pass parameter on Linux.
1838 __ push(rsi);
1839
1840 static const int kRegExpExecuteArguments = 7;
1841 __ PrepareCallCFunction(kRegExpExecuteArguments);
1842 int argument_slots_on_stack =
1843 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
1844
1845 // Argument 7: Indicate that this is a direct call from JavaScript.
1846 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
1847 Immediate(1));
1848
1849 // Argument 6: Start (high end) of backtracking stack memory area.
1850 __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
1851 __ movq(r9, Operand(kScratchRegister, 0));
1852 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
1853 __ addq(r9, Operand(kScratchRegister, 0));
1854 // Argument 6 passed in r9 on Linux and on the stack on Windows.
1855 #ifdef _WIN64
1856 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
1857 #endif
1858
1859 // Argument 5: static offsets vector buffer.
1860 __ movq(r8, ExternalReference::address_of_static_offsets_vector());
1861 // Argument 5 passed in r8 on Linux and on the stack on Windows.
1862 #ifdef _WIN64
1863 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
1864 #endif
1865
1866 // First four arguments are passed in registers on both Linux and Windows.
1867 #ifdef _WIN64
1868 Register arg4 = r9;
1869 Register arg3 = r8;
1870 Register arg2 = rdx;
1871 Register arg1 = rcx;
1872 #else
1873 Register arg4 = rcx;
1874 Register arg3 = rdx;
1875 Register arg2 = rsi;
1876 Register arg1 = rdi;
1877 #endif
1878
1879 // Keep track on aliasing between argX defined above and the registers used.
1880 // rax: subject string
1881 // rbx: previous index
1882 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
1883 // r11: code
1884
1885 // Argument 4: End of string data
1886 // Argument 3: Start of string data
1887 Label setup_two_byte, setup_rest;
1888 __ testb(rdi, rdi);
1889 __ j(zero, &setup_two_byte);
1890 __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
1891 __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
1892 __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
1893 __ jmp(&setup_rest);
1894 __ bind(&setup_two_byte);
1895 __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
1896 __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
1897 __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
1898
1899 __ bind(&setup_rest);
1900 // Argument 2: Previous index.
1901 __ movq(arg2, rbx);
1902
1903 // Argument 1: Subject string.
1904 __ movq(arg1, rax);
1905
1906 // Locate the code entry and call it.
1907 __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1908 __ CallCFunction(r11, kRegExpExecuteArguments);
1909
1910 // rsi is caller save, as it is used to pass parameter.
1911 __ pop(rsi);
1912
1913 // Check the result.
1914 Label success;
1915 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
1916 __ j(equal, &success);
1917 Label failure;
1918 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
1919 __ j(equal, &failure);
1920 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
1921 // If not exception it can only be retry. Handle that in the runtime system.
1922 __ j(not_equal, &runtime);
1923 // Result must now be exception. If there is no pending exception already a
1924 // stack overflow (on the backtrack stack) was detected in RegExp code but
1925 // haven't created the exception yet. Handle that in the runtime system.
1926 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1927 ExternalReference pending_exception_address(Top::k_pending_exception_address);
1928 __ movq(kScratchRegister, pending_exception_address);
1929 __ Cmp(kScratchRegister, Factory::the_hole_value());
1930 __ j(equal, &runtime);
1931 __ bind(&failure);
1932 // For failure and exception return null.
1933 __ Move(rax, Factory::null_value());
1934 __ ret(4 * kPointerSize);
1935
1936 // Load RegExp data.
1937 __ bind(&success);
1938 __ movq(rax, Operand(rsp, kJSRegExpOffset));
1939 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1940 __ SmiToInteger32(rax,
1941 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1942 // Calculate number of capture registers (number_of_captures + 1) * 2.
1943 __ leal(rdx, Operand(rax, rax, times_1, 2));
1944
1945 // rdx: Number of capture registers
1946 // Load last_match_info which is still known to be a fast case JSArray.
1947 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
1948 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
1949
1950 // rbx: last_match_info backing store (FixedArray)
1951 // rdx: number of capture registers
1952 // Store the capture count.
1953 __ Integer32ToSmi(kScratchRegister, rdx);
1954 __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
1955 kScratchRegister);
1956 // Store last subject and last input.
1957 __ movq(rax, Operand(rsp, kSubjectOffset));
1958 __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
1959 __ movq(rcx, rbx);
1960 __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
1961 __ movq(rax, Operand(rsp, kSubjectOffset));
1962 __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
1963 __ movq(rcx, rbx);
1964 __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
1965
1966 // Get the static offsets vector filled by the native regexp code.
1967 __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
1968
1969 // rbx: last_match_info backing store (FixedArray)
1970 // rcx: offsets vector
1971 // rdx: number of capture registers
1972 Label next_capture, done;
1973 // Capture register counter starts from number of capture registers and
1974 // counts down until wraping after zero.
1975 __ bind(&next_capture);
1976 __ subq(rdx, Immediate(1));
1977 __ j(negative, &done);
1978 // Read the value from the static offsets vector buffer and make it a smi.
1979 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1980 __ Integer32ToSmi(rdi, rdi, &runtime);
1981 // Store the smi value in the last match info.
1982 __ movq(FieldOperand(rbx,
1983 rdx,
1984 times_pointer_size,
1985 RegExpImpl::kFirstCaptureOffset),
1986 rdi);
1987 __ jmp(&next_capture);
1988 __ bind(&done);
1989
1990 // Return last match info.
1991 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
1992 __ ret(4 * kPointerSize);
1993
1994 // Do the runtime call to execute the regexp.
1995 __ bind(&runtime);
1996 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1997 #endif // V8_INTERPRETED_REGEXP
1998 }
1999
2000
2001 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
2002 Register object,
2003 Register result,
2004 Register scratch1,
2005 Register scratch2,
2006 bool object_is_smi,
2007 Label* not_found) {
2008 // Use of registers. Register result is used as a temporary.
2009 Register number_string_cache = result;
2010 Register mask = scratch1;
2011 Register scratch = scratch2;
2012
2013 // Load the number string cache.
2014 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2015
2016 // Make the hash mask from the length of the number string cache. It
2017 // contains two elements (number and string) for each cache entry.
2018 __ SmiToInteger32(
2019 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2020 __ shrl(mask, Immediate(1));
2021 __ subq(mask, Immediate(1)); // Make mask.
2022
2023 // Calculate the entry in the number string cache. The hash value in the
2024 // number string cache for smis is just the smi value, and the hash for
2025 // doubles is the xor of the upper and lower words. See
2026 // Heap::GetNumberStringCache.
2027 Label is_smi;
2028 Label load_result_from_cache;
2029 if (!object_is_smi) {
2030 __ JumpIfSmi(object, &is_smi);
2031 __ CheckMap(object, Factory::heap_number_map(), not_found, true);
2032
2033 STATIC_ASSERT(8 == kDoubleSize);
2034 __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2035 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2036 GenerateConvertHashCodeToIndex(masm, scratch, mask);
2037
2038 Register index = scratch;
2039 Register probe = mask;
2040 __ movq(probe,
2041 FieldOperand(number_string_cache,
2042 index,
2043 times_1,
2044 FixedArray::kHeaderSize));
2045 __ JumpIfSmi(probe, not_found);
2046 ASSERT(CpuFeatures::IsSupported(SSE2));
2047 CpuFeatures::Scope fscope(SSE2);
2048 __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2049 __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
2050 __ ucomisd(xmm0, xmm1);
2051 __ j(parity_even, not_found); // Bail out if NaN is involved.
2052 __ j(not_equal, not_found); // The cache did not contain this value.
2053 __ jmp(&load_result_from_cache);
2054 }
2055
2056 __ bind(&is_smi);
2057 __ SmiToInteger32(scratch, object);
2058 GenerateConvertHashCodeToIndex(masm, scratch, mask);
2059
2060 Register index = scratch;
2061 // Check if the entry is the smi we are looking for.
2062 __ cmpq(object,
2063 FieldOperand(number_string_cache,
2064 index,
2065 times_1,
2066 FixedArray::kHeaderSize));
2067 __ j(not_equal, not_found);
2068
2069 // Get the result from the cache.
2070 __ bind(&load_result_from_cache);
2071 __ movq(result,
2072 FieldOperand(number_string_cache,
2073 index,
2074 times_1,
2075 FixedArray::kHeaderSize + kPointerSize));
2076 __ IncrementCounter(&Counters::number_to_string_native, 1);
2077 }
2078
2079
2080 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
2081 Register hash,
2082 Register mask) {
2083 __ and_(hash, mask);
2084 // Each entry in string cache consists of two pointer sized fields,
2085 // but times_twice_pointer_size (multiplication by 16) scale factor
2086 // is not supported by addrmode on x64 platform.
2087 // So we have to premultiply entry index before lookup.
2088 __ shl(hash, Immediate(kPointerSizeLog2 + 1));
2089 }
2090
2091
2092 void NumberToStringStub::Generate(MacroAssembler* masm) {
2093 Label runtime;
2094
2095 __ movq(rbx, Operand(rsp, kPointerSize));
2096
2097 // Generate code to lookup number in the number string cache.
2098 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
2099 __ ret(1 * kPointerSize);
2100
2101 __ bind(&runtime);
2102 // Handle number to string in the runtime system if not found in the cache.
2103 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
2104 }
2105
2106
2107 static int NegativeComparisonResult(Condition cc) {
2108 ASSERT(cc != equal);
2109 ASSERT((cc == less) || (cc == less_equal)
2110 || (cc == greater) || (cc == greater_equal));
2111 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
2112 }
2113
2114
2115 void CompareStub::Generate(MacroAssembler* masm) {
2116 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
2117
2118 Label check_unequal_objects, done;
2119 // The compare stub returns a positive, negative, or zero 64-bit integer
2120 // value in rax, corresponding to result of comparing the two inputs.
2121 // NOTICE! This code is only reached after a smi-fast-case check, so
2122 // it is certain that at least one operand isn't a smi.
2123
2124 // Two identical objects are equal unless they are both NaN or undefined.
2125 {
2126 Label not_identical;
2127 __ cmpq(rax, rdx);
2128 __ j(not_equal, &not_identical);
2129
2130 if (cc_ != equal) {
2131 // Check for undefined. undefined OP undefined is false even though
2132 // undefined == undefined.
2133 Label check_for_nan;
2134 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
2135 __ j(not_equal, &check_for_nan);
2136 __ Set(rax, NegativeComparisonResult(cc_));
2137 __ ret(0);
2138 __ bind(&check_for_nan);
2139 }
2140
2141 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
2142 // so we do the second best thing - test it ourselves.
2143 // Note: if cc_ != equal, never_nan_nan_ is not used.
2144 // We cannot set rax to EQUAL until just before return because
2145 // rax must be unchanged on jump to not_identical.
2146
2147 if (never_nan_nan_ && (cc_ == equal)) {
2148 __ Set(rax, EQUAL);
2149 __ ret(0);
2150 } else {
2151 Label heap_number;
2152 // If it's not a heap number, then return equal for (in)equality operator.
2153 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
2154 Factory::heap_number_map());
2155 __ j(equal, &heap_number);
2156 if (cc_ != equal) {
2157 // Call runtime on identical JSObjects. Otherwise return equal.
2158 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
2159 __ j(above_equal, &not_identical);
2160 }
2161 __ Set(rax, EQUAL);
2162 __ ret(0);
2163
2164 __ bind(&heap_number);
2165 // It is a heap number, so return equal if it's not NaN.
2166 // For NaN, return 1 for every condition except greater and
2167 // greater-equal. Return -1 for them, so the comparison yields
2168 // false for all conditions except not-equal.
2169 __ Set(rax, EQUAL);
2170 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2171 __ ucomisd(xmm0, xmm0);
2172 __ setcc(parity_even, rax);
2173 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
2174 if (cc_ == greater_equal || cc_ == greater) {
2175 __ neg(rax);
2176 }
2177 __ ret(0);
2178 }
2179
2180 __ bind(&not_identical);
2181 }
2182
2183 if (cc_ == equal) { // Both strict and non-strict.
2184 Label slow; // Fallthrough label.
2185
2186 // If we're doing a strict equality comparison, we don't have to do
2187 // type conversion, so we generate code to do fast comparison for objects
2188 // and oddballs. Non-smi numbers and strings still go through the usual
2189 // slow-case code.
2190 if (strict_) {
2191 // If either is a Smi (we know that not both are), then they can only
2192 // be equal if the other is a HeapNumber. If so, use the slow case.
2193 {
2194 Label not_smis;
2195 __ SelectNonSmi(rbx, rax, rdx, &not_smis);
2196
2197 // Check if the non-smi operand is a heap number.
2198 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
2199 Factory::heap_number_map());
2200 // If heap number, handle it in the slow case.
2201 __ j(equal, &slow);
2202 // Return non-equal. ebx (the lower half of rbx) is not zero.
2203 __ movq(rax, rbx);
2204 __ ret(0);
2205
2206 __ bind(&not_smis);
2207 }
2208
2209 // If either operand is a JSObject or an oddball value, then they are not
2210 // equal since their pointers are different
2211 // There is no test for undetectability in strict equality.
2212
2213 // If the first object is a JS object, we have done pointer comparison.
2214 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2215 Label first_non_object;
2216 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
2217 __ j(below, &first_non_object);
2218 // Return non-zero (eax (not rax) is not zero)
2219 Label return_not_equal;
2220 STATIC_ASSERT(kHeapObjectTag != 0);
2221 __ bind(&return_not_equal);
2222 __ ret(0);
2223
2224 __ bind(&first_non_object);
2225 // Check for oddballs: true, false, null, undefined.
2226 __ CmpInstanceType(rcx, ODDBALL_TYPE);
2227 __ j(equal, &return_not_equal);
2228
2229 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
2230 __ j(above_equal, &return_not_equal);
2231
2232 // Check for oddballs: true, false, null, undefined.
2233 __ CmpInstanceType(rcx, ODDBALL_TYPE);
2234 __ j(equal, &return_not_equal);
2235
2236 // Fall through to the general case.
2237 }
2238 __ bind(&slow);
2239 }
2240
2241 // Generate the number comparison code.
2242 if (include_number_compare_) {
2243 Label non_number_comparison;
2244 Label unordered;
2245 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
2246 __ xorl(rax, rax);
2247 __ xorl(rcx, rcx);
2248 __ ucomisd(xmm0, xmm1);
2249
2250 // Don't base result on EFLAGS when a NaN is involved.
2251 __ j(parity_even, &unordered);
2252 // Return a result of -1, 0, or 1, based on EFLAGS.
2253 __ setcc(above, rax);
2254 __ setcc(below, rcx);
2255 __ subq(rax, rcx);
2256 __ ret(0);
2257
2258 // If one of the numbers was NaN, then the result is always false.
2259 // The cc is never not-equal.
2260 __ bind(&unordered);
2261 ASSERT(cc_ != not_equal);
2262 if (cc_ == less || cc_ == less_equal) {
2263 __ Set(rax, 1);
2264 } else {
2265 __ Set(rax, -1);
2266 }
2267 __ ret(0);
2268
2269 // The number comparison code did not provide a valid result.
2270 __ bind(&non_number_comparison);
2271 }
2272
2273 // Fast negative check for symbol-to-symbol equality.
2274 Label check_for_strings;
2275 if (cc_ == equal) {
2276 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
2277 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
2278
2279 // We've already checked for object identity, so if both operands
2280 // are symbols they aren't equal. Register eax (not rax) already holds a
2281 // non-zero value, which indicates not equal, so just return.
2282 __ ret(0);
2283 }
2284
2285 __ bind(&check_for_strings);
2286
2287 __ JumpIfNotBothSequentialAsciiStrings(
2288 rdx, rax, rcx, rbx, &check_unequal_objects);
2289
2290 // Inline comparison of ascii strings.
2291 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
2292 rdx,
2293 rax,
2294 rcx,
2295 rbx,
2296 rdi,
2297 r8);
2298
2299 #ifdef DEBUG
2300 __ Abort("Unexpected fall-through from string comparison");
2301 #endif
2302
2303 __ bind(&check_unequal_objects);
2304 if (cc_ == equal && !strict_) {
2305 // Not strict equality. Objects are unequal if
2306 // they are both JSObjects and not undetectable,
2307 // and their pointers are different.
2308 Label not_both_objects, return_unequal;
2309 // At most one is a smi, so we can test for smi by adding the two.
2310 // A smi plus a heap object has the low bit set, a heap object plus
2311 // a heap object has the low bit clear.
2312 STATIC_ASSERT(kSmiTag == 0);
2313 STATIC_ASSERT(kSmiTagMask == 1);
2314 __ lea(rcx, Operand(rax, rdx, times_1, 0));
2315 __ testb(rcx, Immediate(kSmiTagMask));
2316 __ j(not_zero, &not_both_objects);
2317 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
2318 __ j(below, &not_both_objects);
2319 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
2320 __ j(below, &not_both_objects);
2321 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2322 Immediate(1 << Map::kIsUndetectable));
2323 __ j(zero, &return_unequal);
2324 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
2325 Immediate(1 << Map::kIsUndetectable));
2326 __ j(zero, &return_unequal);
2327 // The objects are both undetectable, so they both compare as the value
2328 // undefined, and are equal.
2329 __ Set(rax, EQUAL);
2330 __ bind(&return_unequal);
2331 // Return non-equal by returning the non-zero object pointer in eax,
2332 // or return equal if we fell through to here.
2333 __ ret(0);
2334 __ bind(&not_both_objects);
2335 }
2336
2337 // Push arguments below the return address to prepare jump to builtin.
2338 __ pop(rcx);
2339 __ push(rdx);
2340 __ push(rax);
2341
2342 // Figure out which native to call and setup the arguments.
2343 Builtins::JavaScript builtin;
2344 if (cc_ == equal) {
2345 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
2346 } else {
2347 builtin = Builtins::COMPARE;
2348 __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
2349 }
2350
2351 // Restore return address on the stack.
2352 __ push(rcx);
2353
2354 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
2355 // tagged as a small integer.
2356 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
2357 }
2358
2359
2360 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
2361 Label* label,
2362 Register object,
2363 Register scratch) {
2364 __ JumpIfSmi(object, label);
2365 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
2366 __ movzxbq(scratch,
2367 FieldOperand(scratch, Map::kInstanceTypeOffset));
2368 // Ensure that no non-strings have the symbol bit set.
2369 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
2370 STATIC_ASSERT(kSymbolTag != 0);
2371 __ testb(scratch, Immediate(kIsSymbolMask));
2372 __ j(zero, label);
2373 }
2374
2375
2376 void StackCheckStub::Generate(MacroAssembler* masm) {
2377 // Because builtins always remove the receiver from the stack, we
2378 // have to fake one to avoid underflowing the stack. The receiver
2379 // must be inserted below the return address on the stack so we
2380 // temporarily store that in a register.
2381 __ pop(rax);
2382 __ Push(Smi::FromInt(0));
2383 __ push(rax);
2384
2385 // Do tail-call to runtime routine.
2386 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
2387 }
2388
2389
2390 void CallFunctionStub::Generate(MacroAssembler* masm) {
2391 Label slow;
2392
2393 // If the receiver might be a value (string, number or boolean) check for this
2394 // and box it if it is.
2395 if (ReceiverMightBeValue()) {
2396 // Get the receiver from the stack.
2397 // +1 ~ return address
2398 Label receiver_is_value, receiver_is_js_object;
2399 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
2400
2401 // Check if receiver is a smi (which is a number value).
2402 __ JumpIfSmi(rax, &receiver_is_value);
2403
2404 // Check if the receiver is a valid JS object.
2405 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
2406 __ j(above_equal, &receiver_is_js_object);
2407
2408 // Call the runtime to box the value.
2409 __ bind(&receiver_is_value);
2410 __ EnterInternalFrame();
2411 __ push(rax);
2412 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2413 __ LeaveInternalFrame();
2414 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
2415
2416 __ bind(&receiver_is_js_object);
2417 }
2418
2419 // Get the function to call from the stack.
2420 // +2 ~ receiver, return address
2421 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
2422
2423 // Check that the function really is a JavaScript function.
2424 __ JumpIfSmi(rdi, &slow);
2425 // Goto slow case if we do not have a function.
2426 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2427 __ j(not_equal, &slow);
2428
2429 // Fast-case: Just invoke the function.
2430 ParameterCount actual(argc_);
2431 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
2432
2433 // Slow-case: Non-function called.
2434 __ bind(&slow);
2435 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2436 // of the original receiver from the call site).
2437 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
2438 __ Set(rax, argc_);
2439 __ Set(rbx, 0);
2440 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
2441 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
2442 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2443 }
2444
2445
2446 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
2447 // Check that stack should contain next handler, frame pointer, state and
2448 // return address in that order.
2449 STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
2450 StackHandlerConstants::kStateOffset);
2451 STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
2452 StackHandlerConstants::kPCOffset);
2453
2454 ExternalReference handler_address(Top::k_handler_address);
2455 __ movq(kScratchRegister, handler_address);
2456 __ movq(rsp, Operand(kScratchRegister, 0));
2457 // get next in chain
2458 __ pop(rcx);
2459 __ movq(Operand(kScratchRegister, 0), rcx);
2460 __ pop(rbp); // pop frame pointer
2461 __ pop(rdx); // remove state
2462
2463 // Before returning we restore the context from the frame pointer if not NULL.
2464 // The frame pointer is NULL in the exception handler of a JS entry frame.
2465 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
2466 Label skip;
2467 __ cmpq(rbp, Immediate(0));
2468 __ j(equal, &skip);
2469 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2470 __ bind(&skip);
2471 __ ret(0);
2472 }
2473
2474
2475 void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
2476 Label empty_result;
2477 Label prologue;
2478 Label promote_scheduled_exception;
2479 __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, 0);
2480 ASSERT_EQ(kArgc, 4);
2481 #ifdef _WIN64
2482 // All the parameters should be set up by a caller.
2483 #else
2484 // Set 1st parameter register with property name.
2485 __ movq(rsi, rdx);
2486 // Second parameter register rdi should be set with pointer to AccessorInfo
2487 // by a caller.
2488 #endif
2489 // Call the api function!
2490 __ movq(rax,
2491 reinterpret_cast<int64_t>(fun()->address()),
2492 RelocInfo::RUNTIME_ENTRY);
2493 __ call(rax);
2494 // Check if the function scheduled an exception.
2495 ExternalReference scheduled_exception_address =
2496 ExternalReference::scheduled_exception_address();
2497 __ movq(rsi, scheduled_exception_address);
2498 __ Cmp(Operand(rsi, 0), Factory::the_hole_value());
2499 __ j(not_equal, &promote_scheduled_exception);
2500 #ifdef _WIN64
2501 // rax keeps a pointer to v8::Handle, unpack it.
2502 __ movq(rax, Operand(rax, 0));
2503 #endif
2504 // Check if the result handle holds 0.
2505 __ testq(rax, rax);
2506 __ j(zero, &empty_result);
2507 // It was non-zero. Dereference to get the result value.
2508 __ movq(rax, Operand(rax, 0));
2509 __ bind(&prologue);
2510 __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
2511 __ ret(0);
2512 __ bind(&promote_scheduled_exception);
2513 __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
2514 __ bind(&empty_result);
2515 // It was zero; the result is undefined.
2516 __ Move(rax, Factory::undefined_value());
2517 __ jmp(&prologue);
2518 }
2519
2520
2521 void CEntryStub::GenerateCore(MacroAssembler* masm,
2522 Label* throw_normal_exception,
2523 Label* throw_termination_exception,
2524 Label* throw_out_of_memory_exception,
2525 bool do_gc,
2526 bool always_allocate_scope,
2527 int /* alignment_skew */) {
2528 // rax: result parameter for PerformGC, if any.
2529 // rbx: pointer to C function (C callee-saved).
2530 // rbp: frame pointer (restored after C call).
2531 // rsp: stack pointer (restored after C call).
2532 // r14: number of arguments including receiver (C callee-saved).
2533 // r12: pointer to the first argument (C callee-saved).
2534 // This pointer is reused in LeaveExitFrame(), so it is stored in a
2535 // callee-saved register.
2536
2537 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2538 // Complex results must be written to address passed as first argument.
2539 // AMD64 calling convention: a struct of two pointers in rax+rdx
2540
2541 // Check stack alignment.
2542 if (FLAG_debug_code) {
2543 __ CheckStackAlignment();
2544 }
2545
2546 if (do_gc) {
2547 // Pass failure code returned from last attempt as first argument to
2548 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
2549 // stack is known to be aligned. This function takes one argument which is
2550 // passed in register.
2551 #ifdef _WIN64
2552 __ movq(rcx, rax);
2553 #else // _WIN64
2554 __ movq(rdi, rax);
2555 #endif
2556 __ movq(kScratchRegister,
2557 FUNCTION_ADDR(Runtime::PerformGC),
2558 RelocInfo::RUNTIME_ENTRY);
2559 __ call(kScratchRegister);
2560 }
2561
2562 ExternalReference scope_depth =
2563 ExternalReference::heap_always_allocate_scope_depth();
2564 if (always_allocate_scope) {
2565 __ movq(kScratchRegister, scope_depth);
2566 __ incl(Operand(kScratchRegister, 0));
2567 }
2568
2569 // Call C function.
2570 #ifdef _WIN64
2571 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
2572 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
2573 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
2574 __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
2575 if (result_size_ < 2) {
2576 // Pass a pointer to the Arguments object as the first argument.
2577 // Return result in single register (rax).
2578 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
2579 } else {
2580 ASSERT_EQ(2, result_size_);
2581 // Pass a pointer to the result location as the first argument.
2582 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
2583 // Pass a pointer to the Arguments object as the second argument.
2584 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
2585 }
2586
2587 #else // _WIN64
2588 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2589 __ movq(rdi, r14); // argc.
2590 __ movq(rsi, r12); // argv.
2591 #endif
2592 __ call(rbx);
2593 // Result is in rax - do not destroy this register!
2594
2595 if (always_allocate_scope) {
2596 __ movq(kScratchRegister, scope_depth);
2597 __ decl(Operand(kScratchRegister, 0));
2598 }
2599
2600 // Check for failure result.
2601 Label failure_returned;
2602 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
2603 #ifdef _WIN64
2604 // If return value is on the stack, pop it to registers.
2605 if (result_size_ > 1) {
2606 ASSERT_EQ(2, result_size_);
2607 // Read result values stored on stack. Result is stored
2608 // above the four argument mirror slots and the two
2609 // Arguments object slots.
2610 __ movq(rax, Operand(rsp, 6 * kPointerSize));
2611 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
2612 }
2613 #endif
2614 __ lea(rcx, Operand(rax, 1));
2615 // Lower 2 bits of rcx are 0 iff rax has failure tag.
2616 __ testl(rcx, Immediate(kFailureTagMask));
2617 __ j(zero, &failure_returned);
2618
2619 // Exit the JavaScript to C++ exit frame.
2620 __ LeaveExitFrame(mode_, result_size_);
2621 __ ret(0);
2622
2623 // Handling of failure.
2624 __ bind(&failure_returned);
2625
2626 Label retry;
2627 // If the returned exception is RETRY_AFTER_GC continue at retry label
2628 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
2629 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
2630 __ j(zero, &retry);
2631
2632 // Special handling of out of memory exceptions.
2633 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
2634 __ cmpq(rax, kScratchRegister);
2635 __ j(equal, throw_out_of_memory_exception);
2636
2637 // Retrieve the pending exception and clear the variable.
2638 ExternalReference pending_exception_address(Top::k_pending_exception_address);
2639 __ movq(kScratchRegister, pending_exception_address);
2640 __ movq(rax, Operand(kScratchRegister, 0));
2641 __ movq(rdx, ExternalReference::the_hole_value_location());
2642 __ movq(rdx, Operand(rdx, 0));
2643 __ movq(Operand(kScratchRegister, 0), rdx);
2644
2645 // Special handling of termination exceptions which are uncatchable
2646 // by javascript code.
2647 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2648 __ j(equal, throw_termination_exception);
2649
2650 // Handle normal exception.
2651 __ jmp(throw_normal_exception);
2652
2653 // Retry.
2654 __ bind(&retry);
2655 }
2656
2657
2658 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
2659 UncatchableExceptionType type) {
2660 // Fetch top stack handler.
2661 ExternalReference handler_address(Top::k_handler_address);
2662 __ movq(kScratchRegister, handler_address);
2663 __ movq(rsp, Operand(kScratchRegister, 0));
2664
2665 // Unwind the handlers until the ENTRY handler is found.
2666 Label loop, done;
2667 __ bind(&loop);
2668 // Load the type of the current stack handler.
2669 const int kStateOffset = StackHandlerConstants::kStateOffset;
2670 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
2671 __ j(equal, &done);
2672 // Fetch the next handler in the list.
2673 const int kNextOffset = StackHandlerConstants::kNextOffset;
2674 __ movq(rsp, Operand(rsp, kNextOffset));
2675 __ jmp(&loop);
2676 __ bind(&done);
2677
2678 // Set the top handler address to next handler past the current ENTRY handler.
2679 __ movq(kScratchRegister, handler_address);
2680 __ pop(Operand(kScratchRegister, 0));
2681
2682 if (type == OUT_OF_MEMORY) {
2683 // Set external caught exception to false.
2684 ExternalReference external_caught(Top::k_external_caught_exception_address);
2685 __ movq(rax, Immediate(false));
2686 __ store_rax(external_caught);
2687
2688 // Set pending exception and rax to out of memory exception.
2689 ExternalReference pending_exception(Top::k_pending_exception_address);
2690 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
2691 __ store_rax(pending_exception);
2692 }
2693
2694 // Clear the context pointer.
2695 __ xor_(rsi, rsi);
2696
2697 // Restore registers from handler.
2698 STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
2699 StackHandlerConstants::kFPOffset);
2700 __ pop(rbp); // FP
2701 STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
2702 StackHandlerConstants::kStateOffset);
2703 __ pop(rdx); // State
2704
2705 STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
2706 StackHandlerConstants::kPCOffset);
2707 __ ret(0);
2708 }
2709
2710
2711 void CEntryStub::Generate(MacroAssembler* masm) {
2712 // rax: number of arguments including receiver
2713 // rbx: pointer to C function (C callee-saved)
2714 // rbp: frame pointer of calling JS frame (restored after C call)
2715 // rsp: stack pointer (restored after C call)
2716 // rsi: current context (restored)
2717
2718 // NOTE: Invocations of builtins may return failure objects
2719 // instead of a proper result. The builtin entry handles
2720 // this by performing a garbage collection and retrying the
2721 // builtin once.
2722
2723 // Enter the exit frame that transitions from JavaScript to C++.
2724 __ EnterExitFrame(mode_, result_size_);
2725
2726 // rax: Holds the context at this point, but should not be used.
2727 // On entry to code generated by GenerateCore, it must hold
2728 // a failure result if the collect_garbage argument to GenerateCore
2729 // is true. This failure result can be the result of code
2730 // generated by a previous call to GenerateCore. The value
2731 // of rax is then passed to Runtime::PerformGC.
2732 // rbx: pointer to builtin function (C callee-saved).
2733 // rbp: frame pointer of exit frame (restored after C call).
2734 // rsp: stack pointer (restored after C call).
2735 // r14: number of arguments including receiver (C callee-saved).
2736 // r12: argv pointer (C callee-saved).
2737
2738 Label throw_normal_exception;
2739 Label throw_termination_exception;
2740 Label throw_out_of_memory_exception;
2741
2742 // Call into the runtime system.
2743 GenerateCore(masm,
2744 &throw_normal_exception,
2745 &throw_termination_exception,
2746 &throw_out_of_memory_exception,
2747 false,
2748 false);
2749
2750 // Do space-specific GC and retry runtime call.
2751 GenerateCore(masm,
2752 &throw_normal_exception,
2753 &throw_termination_exception,
2754 &throw_out_of_memory_exception,
2755 true,
2756 false);
2757
2758 // Do full GC and retry runtime call one final time.
2759 Failure* failure = Failure::InternalError();
2760 __ movq(rax, failure, RelocInfo::NONE);
2761 GenerateCore(masm,
2762 &throw_normal_exception,
2763 &throw_termination_exception,
2764 &throw_out_of_memory_exception,
2765 true,
2766 true);
2767
2768 __ bind(&throw_out_of_memory_exception);
2769 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
2770
2771 __ bind(&throw_termination_exception);
2772 GenerateThrowUncatchable(masm, TERMINATION);
2773
2774 __ bind(&throw_normal_exception);
2775 GenerateThrowTOS(masm);
2776 }
2777
2778
2779 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
2780 Label invoke, exit;
2781 #ifdef ENABLE_LOGGING_AND_PROFILING
2782 Label not_outermost_js, not_outermost_js_2;
2783 #endif
2784
2785 // Setup frame.
2786 __ push(rbp);
2787 __ movq(rbp, rsp);
2788
2789 // Push the stack frame type marker twice.
2790 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2791 // Scratch register is neither callee-save, nor an argument register on any
2792 // platform. It's free to use at this point.
2793 // Cannot use smi-register for loading yet.
2794 __ movq(kScratchRegister,
2795 reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
2796 RelocInfo::NONE);
2797 __ push(kScratchRegister); // context slot
2798 __ push(kScratchRegister); // function slot
2799 // Save callee-saved registers (X64/Win64 calling conventions).
2800 __ push(r12);
2801 __ push(r13);
2802 __ push(r14);
2803 __ push(r15);
2804 #ifdef _WIN64
2805 __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2806 __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2807 #endif
2808 __ push(rbx);
2809 // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
2810 // callee save as well.
2811
2812 // Save copies of the top frame descriptor on the stack.
2813 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
2814 __ load_rax(c_entry_fp);
2815 __ push(rax);
2816
2817 // Set up the roots and smi constant registers.
2818 // Needs to be done before any further smi loads.
2819 ExternalReference roots_address = ExternalReference::roots_address();
2820 __ movq(kRootRegister, roots_address);
2821 __ InitializeSmiConstantRegister();
2822
2823 #ifdef ENABLE_LOGGING_AND_PROFILING
2824 // If this is the outermost JS call, set js_entry_sp value.
2825 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
2826 __ load_rax(js_entry_sp);
2827 __ testq(rax, rax);
2828 __ j(not_zero, &not_outermost_js);
2829 __ movq(rax, rbp);
2830 __ store_rax(js_entry_sp);
2831 __ bind(&not_outermost_js);
2832 #endif
2833
2834 // Call a faked try-block that does the invoke.
2835 __ call(&invoke);
2836
2837 // Caught exception: Store result (exception) in the pending
2838 // exception field in the JSEnv and return a failure sentinel.
2839 ExternalReference pending_exception(Top::k_pending_exception_address);
2840 __ store_rax(pending_exception);
2841 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
2842 __ jmp(&exit);
2843
2844 // Invoke: Link this frame into the handler chain.
2845 __ bind(&invoke);
2846 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
2847
2848 // Clear any pending exceptions.
2849 __ load_rax(ExternalReference::the_hole_value_location());
2850 __ store_rax(pending_exception);
2851
2852 // Fake a receiver (NULL).
2853 __ push(Immediate(0)); // receiver
2854
2855 // Invoke the function by calling through JS entry trampoline
2856 // builtin and pop the faked function when we return. We load the address
2857 // from an external reference instead of inlining the call target address
2858 // directly in the code, because the builtin stubs may not have been
2859 // generated yet at the time this code is generated.
2860 if (is_construct) {
2861 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
2862 __ load_rax(construct_entry);
2863 } else {
2864 ExternalReference entry(Builtins::JSEntryTrampoline);
2865 __ load_rax(entry);
2866 }
2867 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
2868 __ call(kScratchRegister);
2869
2870 // Unlink this frame from the handler chain.
2871 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
2872 __ pop(Operand(kScratchRegister, 0));
2873 // Pop next_sp.
2874 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2875
2876 #ifdef ENABLE_LOGGING_AND_PROFILING
2877 // If current EBP value is the same as js_entry_sp value, it means that
2878 // the current function is the outermost.
2879 __ movq(kScratchRegister, js_entry_sp);
2880 __ cmpq(rbp, Operand(kScratchRegister, 0));
2881 __ j(not_equal, &not_outermost_js_2);
2882 __ movq(Operand(kScratchRegister, 0), Immediate(0));
2883 __ bind(&not_outermost_js_2);
2884 #endif
2885
2886 // Restore the top frame descriptor from the stack.
2887 __ bind(&exit);
2888 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
2889 __ pop(Operand(kScratchRegister, 0));
2890
2891 // Restore callee-saved registers (X64 conventions).
2892 __ pop(rbx);
2893 #ifdef _WIN64
2894 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2895 __ pop(rsi);
2896 __ pop(rdi);
2897 #endif
2898 __ pop(r15);
2899 __ pop(r14);
2900 __ pop(r13);
2901 __ pop(r12);
2902 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
2903
2904 // Restore frame pointer and return.
2905 __ pop(rbp);
2906 __ ret(0);
2907 }
2908
2909
2910 void InstanceofStub::Generate(MacroAssembler* masm) {
2911 // Implements "value instanceof function" operator.
2912 // Expected input state:
2913 // rsp[0] : return address
2914 // rsp[1] : function pointer
2915 // rsp[2] : value
2916 // Returns a bitwise zero to indicate that the value
2917 // is and instance of the function and anything else to
2918 // indicate that the value is not an instance.
2919
2920 // Get the object - go slow case if it's a smi.
2921 Label slow;
2922 __ movq(rax, Operand(rsp, 2 * kPointerSize));
2923 __ JumpIfSmi(rax, &slow);
2924
2925 // Check that the left hand is a JS object. Leave its map in rax.
2926 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
2927 __ j(below, &slow);
2928 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
2929 __ j(above, &slow);
2930
2931 // Get the prototype of the function.
2932 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
2933 // rdx is function, rax is map.
2934
2935 // Look up the function and the map in the instanceof cache.
2936 Label miss;
2937 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
2938 __ j(not_equal, &miss);
2939 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
2940 __ j(not_equal, &miss);
2941 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2942 __ ret(2 * kPointerSize);
2943
2944 __ bind(&miss);
2945 __ TryGetFunctionPrototype(rdx, rbx, &slow);
2946
2947 // Check that the function prototype is a JS object.
2948 __ JumpIfSmi(rbx, &slow);
2949 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
2950 __ j(below, &slow);
2951 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
2952 __ j(above, &slow);
2953
2954 // Register mapping:
2955 // rax is object map.
2956 // rdx is function.
2957 // rbx is function prototype.
2958 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
2959 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
2960
2961 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
2962
2963 // Loop through the prototype chain looking for the function prototype.
2964 Label loop, is_instance, is_not_instance;
2965 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
2966 __ bind(&loop);
2967 __ cmpq(rcx, rbx);
2968 __ j(equal, &is_instance);
2969 __ cmpq(rcx, kScratchRegister);
2970 // The code at is_not_instance assumes that kScratchRegister contains a
2971 // non-zero GCable value (the null object in this case).
2972 __ j(equal, &is_not_instance);
2973 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
2974 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
2975 __ jmp(&loop);
2976
2977 __ bind(&is_instance);
2978 __ xorl(rax, rax);
2979 // Store bitwise zero in the cache. This is a Smi in GC terms.
2980 STATIC_ASSERT(kSmiTag == 0);
2981 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2982 __ ret(2 * kPointerSize);
2983
2984 __ bind(&is_not_instance);
2985 // We have to store a non-zero value in the cache.
2986 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
2987 __ ret(2 * kPointerSize);
2988
2989 // Slow-case: Go through the JavaScript implementation.
2990 __ bind(&slow);
2991 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2992 }
2993
2994
2995 int CompareStub::MinorKey() {
2996 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
2997 // stubs the never NaN NaN condition is only taken into account if the
2998 // condition is equals.
2999 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
3000 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3001 return ConditionField::encode(static_cast<unsigned>(cc_))
3002 | RegisterField::encode(false) // lhs_ and rhs_ are not used
3003 | StrictField::encode(strict_)
3004 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
3005 | IncludeNumberCompareField::encode(include_number_compare_);
3006 }
3007
3008
3009 // Unfortunately you have to run without snapshots to see most of these
3010 // names in the profile since most compare stubs end up in the snapshot.
3011 const char* CompareStub::GetName() {
3012 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3013
3014 if (name_ != NULL) return name_;
3015 const int kMaxNameLength = 100;
3016 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
3017 if (name_ == NULL) return "OOM";
3018
3019 const char* cc_name;
3020 switch (cc_) {
3021 case less: cc_name = "LT"; break;
3022 case greater: cc_name = "GT"; break;
3023 case less_equal: cc_name = "LE"; break;
3024 case greater_equal: cc_name = "GE"; break;
3025 case equal: cc_name = "EQ"; break;
3026 case not_equal: cc_name = "NE"; break;
3027 default: cc_name = "UnknownCondition"; break;
3028 }
3029
3030 const char* strict_name = "";
3031 if (strict_ && (cc_ == equal || cc_ == not_equal)) {
3032 strict_name = "_STRICT";
3033 }
3034
3035 const char* never_nan_nan_name = "";
3036 if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
3037 never_nan_nan_name = "_NO_NAN";
3038 }
3039
3040 const char* include_number_compare_name = "";
3041 if (!include_number_compare_) {
3042 include_number_compare_name = "_NO_NUMBER";
3043 }
3044
3045 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
3046 "CompareStub_%s%s%s%s",
3047 cc_name,
3048 strict_name,
3049 never_nan_nan_name,
3050 include_number_compare_name);
3051 return name_;
3052 }
3053
3054
3055 // -------------------------------------------------------------------------
3056 // StringCharCodeAtGenerator
3057
3058 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3059 Label flat_string;
3060 Label ascii_string;
3061 Label got_char_code;
3062
3063 // If the receiver is a smi trigger the non-string case.
3064 __ JumpIfSmi(object_, receiver_not_string_);
3065
3066 // Fetch the instance type of the receiver into result register.
3067 __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3068 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3069 // If the receiver is not a string trigger the non-string case.
3070 __ testb(result_, Immediate(kIsNotStringMask));
3071 __ j(not_zero, receiver_not_string_);
3072
3073 // If the index is non-smi trigger the non-smi case.
3074 __ JumpIfNotSmi(index_, &index_not_smi_);
3075
3076 // Put smi-tagged index into scratch register.
3077 __ movq(scratch_, index_);
3078 __ bind(&got_smi_index_);
3079
3080 // Check for index out of range.
3081 __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
3082 __ j(above_equal, index_out_of_range_);
3083
3084 // We need special handling for non-flat strings.
3085 STATIC_ASSERT(kSeqStringTag == 0);
3086 __ testb(result_, Immediate(kStringRepresentationMask));
3087 __ j(zero, &flat_string);
3088
3089 // Handle non-flat strings.
3090 __ testb(result_, Immediate(kIsConsStringMask));
3091 __ j(zero, &call_runtime_);
3092
3093 // ConsString.
3094 // Check whether the right hand side is the empty string (i.e. if
3095 // this is really a flat string in a cons string). If that is not
3096 // the case we would rather go to the runtime system now to flatten
3097 // the string.
3098 __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
3099 Heap::kEmptyStringRootIndex);
3100 __ j(not_equal, &call_runtime_);
3101 // Get the first of the two strings and load its instance type.
3102 __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
3103 __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3104 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3105 // If the first cons component is also non-flat, then go to runtime.
3106 STATIC_ASSERT(kSeqStringTag == 0);
3107 __ testb(result_, Immediate(kStringRepresentationMask));
3108 __ j(not_zero, &call_runtime_);
3109
3110 // Check for 1-byte or 2-byte string.
3111 __ bind(&flat_string);
3112 STATIC_ASSERT(kAsciiStringTag != 0);
3113 __ testb(result_, Immediate(kStringEncodingMask));
3114 __ j(not_zero, &ascii_string);
3115
3116 // 2-byte string.
3117 // Load the 2-byte character code into the result register.
3118 __ SmiToInteger32(scratch_, scratch_);
3119 __ movzxwl(result_, FieldOperand(object_,
3120 scratch_, times_2,
3121 SeqTwoByteString::kHeaderSize));
3122 __ jmp(&got_char_code);
3123
3124 // ASCII string.
3125 // Load the byte into the result register.
3126 __ bind(&ascii_string);
3127 __ SmiToInteger32(scratch_, scratch_);
3128 __ movzxbl(result_, FieldOperand(object_,
3129 scratch_, times_1,
3130 SeqAsciiString::kHeaderSize));
3131 __ bind(&got_char_code);
3132 __ Integer32ToSmi(result_, result_);
3133 __ bind(&exit_);
3134 }
3135
3136
3137 void StringCharCodeAtGenerator::GenerateSlow(
3138 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3139 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
3140
3141 // Index is not a smi.
3142 __ bind(&index_not_smi_);
3143 // If index is a heap number, try converting it to an integer.
3144 __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
3145 call_helper.BeforeCall(masm);
3146 __ push(object_);
3147 __ push(index_);
3148 __ push(index_); // Consumed by runtime conversion function.
3149 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3150 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3151 } else {
3152 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3153 // NumberToSmi discards numbers that are not exact integers.
3154 __ CallRuntime(Runtime::kNumberToSmi, 1);
3155 }
3156 if (!scratch_.is(rax)) {
3157 // Save the conversion result before the pop instructions below
3158 // have a chance to overwrite it.
3159 __ movq(scratch_, rax);
3160 }
3161 __ pop(index_);
3162 __ pop(object_);
3163 // Reload the instance type.
3164 __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3165 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3166 call_helper.AfterCall(masm);
3167 // If index is still not a smi, it must be out of range.
3168 __ JumpIfNotSmi(scratch_, index_out_of_range_);
3169 // Otherwise, return to the fast path.
3170 __ jmp(&got_smi_index_);
3171
3172 // Call runtime. We get here when the receiver is a string and the
3173 // index is a number, but the code of getting the actual character
3174 // is too complex (e.g., when the string needs to be flattened).
3175 __ bind(&call_runtime_);
3176 call_helper.BeforeCall(masm);
3177 __ push(object_);
3178 __ push(index_);
3179 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3180 if (!result_.is(rax)) {
3181 __ movq(result_, rax);
3182 }
3183 call_helper.AfterCall(masm);
3184 __ jmp(&exit_);
3185
3186 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
3187 }
3188
3189
3190 // -------------------------------------------------------------------------
3191 // StringCharFromCodeGenerator
3192
3193 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3194 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3195 __ JumpIfNotSmi(code_, &slow_case_);
3196 __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
3197 __ j(above, &slow_case_);
3198
3199 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3200 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
3201 __ movq(result_, FieldOperand(result_, index.reg, index.scale,
3202 FixedArray::kHeaderSize));
3203 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3204 __ j(equal, &slow_case_);
3205 __ bind(&exit_);
3206 }
3207
3208
3209 void StringCharFromCodeGenerator::GenerateSlow(
3210 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3211 __ Abort("Unexpected fallthrough to CharFromCode slow case");
3212
3213 __ bind(&slow_case_);
3214 call_helper.BeforeCall(masm);
3215 __ push(code_);
3216 __ CallRuntime(Runtime::kCharFromCode, 1);
3217 if (!result_.is(rax)) {
3218 __ movq(result_, rax);
3219 }
3220 call_helper.AfterCall(masm);
3221 __ jmp(&exit_);
3222
3223 __ Abort("Unexpected fallthrough from CharFromCode slow case");
3224 }
3225
3226
3227 // -------------------------------------------------------------------------
3228 // StringCharAtGenerator
3229
3230 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
3231 char_code_at_generator_.GenerateFast(masm);
3232 char_from_code_generator_.GenerateFast(masm);
3233 }
3234
3235
3236 void StringCharAtGenerator::GenerateSlow(
3237 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3238 char_code_at_generator_.GenerateSlow(masm, call_helper);
3239 char_from_code_generator_.GenerateSlow(masm, call_helper);
3240 }
3241
3242
3243 void StringAddStub::Generate(MacroAssembler* masm) {
3244 Label string_add_runtime;
3245
3246 // Load the two arguments.
3247 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
3248 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
3249
3250 // Make sure that both arguments are strings if not known in advance.
3251 if (string_check_) {
3252 Condition is_smi;
3253 is_smi = masm->CheckSmi(rax);
3254 __ j(is_smi, &string_add_runtime);
3255 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
3256 __ j(above_equal, &string_add_runtime);
3257
3258 // First argument is a a string, test second.
3259 is_smi = masm->CheckSmi(rdx);
3260 __ j(is_smi, &string_add_runtime);
3261 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
3262 __ j(above_equal, &string_add_runtime);
3263 }
3264
3265 // Both arguments are strings.
3266 // rax: first string
3267 // rdx: second string
3268 // Check if either of the strings are empty. In that case return the other.
3269 Label second_not_zero_length, both_not_zero_length;
3270 __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
3271 __ SmiTest(rcx);
3272 __ j(not_zero, &second_not_zero_length);
3273 // Second string is empty, result is first string which is already in rax.
3274 __ IncrementCounter(&Counters::string_add_native, 1);
3275 __ ret(2 * kPointerSize);
3276 __ bind(&second_not_zero_length);
3277 __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
3278 __ SmiTest(rbx);
3279 __ j(not_zero, &both_not_zero_length);
3280 // First string is empty, result is second string which is in rdx.
3281 __ movq(rax, rdx);
3282 __ IncrementCounter(&Counters::string_add_native, 1);
3283 __ ret(2 * kPointerSize);
3284
3285 // Both strings are non-empty.
3286 // rax: first string
3287 // rbx: length of first string
3288 // rcx: length of second string
3289 // rdx: second string
3290 // r8: map of first string if string check was performed above
3291 // r9: map of second string if string check was performed above
3292 Label string_add_flat_result, longer_than_two;
3293 __ bind(&both_not_zero_length);
3294
3295 // If arguments where known to be strings, maps are not loaded to r8 and r9
3296 // by the code above.
3297 if (!string_check_) {
3298 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
3299 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
3300 }
3301 // Get the instance types of the two strings as they will be needed soon.
3302 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
3303 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
3304
3305 // Look at the length of the result of adding the two strings.
3306 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
3307 __ SmiAdd(rbx, rbx, rcx, NULL);
3308 // Use the runtime system when adding two one character strings, as it
3309 // contains optimizations for this specific case using the symbol table.
3310 __ SmiCompare(rbx, Smi::FromInt(2));
3311 __ j(not_equal, &longer_than_two);
3312
3313 // Check that both strings are non-external ascii strings.
3314 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
3315 &string_add_runtime);
3316
3317 // Get the two characters forming the sub string.
3318 __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
3319 __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
3320
3321 // Try to lookup two character string in symbol table. If it is not found
3322 // just allocate a new one.
3323 Label make_two_character_string, make_flat_ascii_string;
3324 StringHelper::GenerateTwoCharacterSymbolTableProbe(
3325 masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
3326 __ IncrementCounter(&Counters::string_add_native, 1);
3327 __ ret(2 * kPointerSize);
3328
3329 __ bind(&make_two_character_string);
3330 __ Set(rbx, 2);
3331 __ jmp(&make_flat_ascii_string);
3332
3333 __ bind(&longer_than_two);
3334 // Check if resulting string will be flat.
3335 __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
3336 __ j(below, &string_add_flat_result);
3337 // Handle exceptionally long strings in the runtime system.
3338 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
3339 __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
3340 __ j(above, &string_add_runtime);
3341
3342 // If result is not supposed to be flat, allocate a cons string object. If
3343 // both strings are ascii the result is an ascii cons string.
3344 // rax: first string
3345 // rbx: length of resulting flat string
3346 // rdx: second string
3347 // r8: instance type of first string
3348 // r9: instance type of second string
3349 Label non_ascii, allocated, ascii_data;
3350 __ movl(rcx, r8);
3351 __ and_(rcx, r9);
3352 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
3353 __ testl(rcx, Immediate(kAsciiStringTag));
3354 __ j(zero, &non_ascii);
3355 __ bind(&ascii_data);
3356 // Allocate an acsii cons string.
3357 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
3358 __ bind(&allocated);
3359 // Fill the fields of the cons string.
3360 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
3361 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
3362 Immediate(String::kEmptyHashField));
3363 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
3364 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
3365 __ movq(rax, rcx);
3366 __ IncrementCounter(&Counters::string_add_native, 1);
3367 __ ret(2 * kPointerSize);
3368 __ bind(&non_ascii);
3369 // At least one of the strings is two-byte. Check whether it happens
3370 // to contain only ascii characters.
3371 // rcx: first instance type AND second instance type.
3372 // r8: first instance type.
3373 // r9: second instance type.
3374 __ testb(rcx, Immediate(kAsciiDataHintMask));
3375 __ j(not_zero, &ascii_data);
3376 __ xor_(r8, r9);
3377 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
3378 __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
3379 __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
3380 __ j(equal, &ascii_data);
3381 // Allocate a two byte cons string.
3382 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
3383 __ jmp(&allocated);
3384
3385 // Handle creating a flat result. First check that both strings are not
3386 // external strings.
3387 // rax: first string
3388 // rbx: length of resulting flat string as smi
3389 // rdx: second string
3390 // r8: instance type of first string
3391 // r9: instance type of first string
3392 __ bind(&string_add_flat_result);
3393 __ SmiToInteger32(rbx, rbx);
3394 __ movl(rcx, r8);
3395 __ and_(rcx, Immediate(kStringRepresentationMask));
3396 __ cmpl(rcx, Immediate(kExternalStringTag));
3397 __ j(equal, &string_add_runtime);
3398 __ movl(rcx, r9);
3399 __ and_(rcx, Immediate(kStringRepresentationMask));
3400 __ cmpl(rcx, Immediate(kExternalStringTag));
3401 __ j(equal, &string_add_runtime);
3402 // Now check if both strings are ascii strings.
3403 // rax: first string
3404 // rbx: length of resulting flat string
3405 // rdx: second string
3406 // r8: instance type of first string
3407 // r9: instance type of second string
3408 Label non_ascii_string_add_flat_result;
3409 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
3410 __ testl(r8, Immediate(kAsciiStringTag));
3411 __ j(zero, &non_ascii_string_add_flat_result);
3412 __ testl(r9, Immediate(kAsciiStringTag));
3413 __ j(zero, &string_add_runtime);
3414
3415 __ bind(&make_flat_ascii_string);
3416 // Both strings are ascii strings. As they are short they are both flat.
3417 __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
3418 // rcx: result string
3419 __ movq(rbx, rcx);
3420 // Locate first character of result.
3421 __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3422 // Locate first character of first argument
3423 __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
3424 __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3425 // rax: first char of first argument
3426 // rbx: result string
3427 // rcx: first character of result
3428 // rdx: second string
3429 // rdi: length of first argument
3430 StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
3431 // Locate first character of second argument.
3432 __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
3433 __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3434 // rbx: result string
3435 // rcx: next character of result
3436 // rdx: first char of second argument
3437 // rdi: length of second argument
3438 StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
3439 __ movq(rax, rbx);
3440 __ IncrementCounter(&Counters::string_add_native, 1);
3441 __ ret(2 * kPointerSize);
3442
3443 // Handle creating a flat two byte result.
3444 // rax: first string - known to be two byte
3445 // rbx: length of resulting flat string
3446 // rdx: second string
3447 // r8: instance type of first string
3448 // r9: instance type of first string
3449 __ bind(&non_ascii_string_add_flat_result);
3450 __ and_(r9, Immediate(kAsciiStringTag));
3451 __ j(not_zero, &string_add_runtime);
3452 // Both strings are two byte strings. As they are short they are both
3453 // flat.
3454 __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
3455 // rcx: result string
3456 __ movq(rbx, rcx);
3457 // Locate first character of result.
3458 __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3459 // Locate first character of first argument.
3460 __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
3461 __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3462 // rax: first char of first argument
3463 // rbx: result string
3464 // rcx: first character of result
3465 // rdx: second argument
3466 // rdi: length of first argument
3467 StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
3468 // Locate first character of second argument.
3469 __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
3470 __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3471 // rbx: result string
3472 // rcx: next character of result
3473 // rdx: first char of second argument
3474 // rdi: length of second argument
3475 StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
3476 __ movq(rax, rbx);
3477 __ IncrementCounter(&Counters::string_add_native, 1);
3478 __ ret(2 * kPointerSize);
3479
3480 // Just jump to runtime to add the two strings.
3481 __ bind(&string_add_runtime);
3482 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
3483 }
3484
3485
3486 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3487 Register dest,
3488 Register src,
3489 Register count,
3490 bool ascii) {
3491 Label loop;
3492 __ bind(&loop);
3493 // This loop just copies one character at a time, as it is only used for very
3494 // short strings.
3495 if (ascii) {
3496 __ movb(kScratchRegister, Operand(src, 0));
3497 __ movb(Operand(dest, 0), kScratchRegister);
3498 __ incq(src);
3499 __ incq(dest);
3500 } else {
3501 __ movzxwl(kScratchRegister, Operand(src, 0));
3502 __ movw(Operand(dest, 0), kScratchRegister);
3503 __ addq(src, Immediate(2));
3504 __ addq(dest, Immediate(2));
3505 }
3506 __ decl(count);
3507 __ j(not_zero, &loop);
3508 }
3509
3510
3511 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
3512 Register dest,
3513 Register src,
3514 Register count,
3515 bool ascii) {
3516 // Copy characters using rep movs of doublewords. Align destination on 4 byte
3517 // boundary before starting rep movs. Copy remaining characters after running
3518 // rep movs.
3519 // Count is positive int32, dest and src are character pointers.
3520 ASSERT(dest.is(rdi)); // rep movs destination
3521 ASSERT(src.is(rsi)); // rep movs source
3522 ASSERT(count.is(rcx)); // rep movs count
3523
3524 // Nothing to do for zero characters.
3525 Label done;
3526 __ testl(count, count);
3527 __ j(zero, &done);
3528
3529 // Make count the number of bytes to copy.
3530 if (!ascii) {
3531 STATIC_ASSERT(2 == sizeof(uc16));
3532 __ addl(count, count);
3533 }
3534
3535 // Don't enter the rep movs if there are less than 4 bytes to copy.
3536 Label last_bytes;
3537 __ testl(count, Immediate(~7));
3538 __ j(zero, &last_bytes);
3539
3540 // Copy from edi to esi using rep movs instruction.
3541 __ movl(kScratchRegister, count);
3542 __ shr(count, Immediate(3)); // Number of doublewords to copy.
3543 __ repmovsq();
3544
3545 // Find number of bytes left.
3546 __ movl(count, kScratchRegister);
3547 __ and_(count, Immediate(7));
3548
3549 // Check if there are more bytes to copy.
3550 __ bind(&last_bytes);
3551 __ testl(count, count);
3552 __ j(zero, &done);
3553
3554 // Copy remaining characters.
3555 Label loop;
3556 __ bind(&loop);
3557 __ movb(kScratchRegister, Operand(src, 0));
3558 __ movb(Operand(dest, 0), kScratchRegister);
3559 __ incq(src);
3560 __ incq(dest);
3561 __ decl(count);
3562 __ j(not_zero, &loop);
3563
3564 __ bind(&done);
3565 }
3566
3567 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
3568 Register c1,
3569 Register c2,
3570 Register scratch1,
3571 Register scratch2,
3572 Register scratch3,
3573 Register scratch4,
3574 Label* not_found) {
3575 // Register scratch3 is the general scratch register in this function.
3576 Register scratch = scratch3;
3577
3578 // Make sure that both characters are not digits as such strings has a
3579 // different hash algorithm. Don't try to look for these in the symbol table.
3580 Label not_array_index;
3581 __ leal(scratch, Operand(c1, -'0'));
3582 __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
3583 __ j(above, &not_array_index);
3584 __ leal(scratch, Operand(c2, -'0'));
3585 __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
3586 __ j(below_equal, not_found);
3587
3588 __ bind(&not_array_index);
3589 // Calculate the two character string hash.
3590 Register hash = scratch1;
3591 GenerateHashInit(masm, hash, c1, scratch);
3592 GenerateHashAddCharacter(masm, hash, c2, scratch);
3593 GenerateHashGetHash(masm, hash, scratch);
3594
3595 // Collect the two characters in a register.
3596 Register chars = c1;
3597 __ shl(c2, Immediate(kBitsPerByte));
3598 __ orl(chars, c2);
3599
3600 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3601 // hash: hash of two character string.
3602
3603 // Load the symbol table.
3604 Register symbol_table = c2;
3605 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
3606
3607 // Calculate capacity mask from the symbol table capacity.
3608 Register mask = scratch2;
3609 __ SmiToInteger32(mask,
3610 FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
3611 __ decl(mask);
3612
3613 Register undefined = scratch4;
3614 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3615
3616 // Registers
3617 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3618 // hash: hash of two character string (32-bit int)
3619 // symbol_table: symbol table
3620 // mask: capacity mask (32-bit int)
3621 // undefined: undefined value
3622 // scratch: -
3623
3624 // Perform a number of probes in the symbol table.
3625 static const int kProbes = 4;
3626 Label found_in_symbol_table;
3627 Label next_probe[kProbes];
3628 for (int i = 0; i < kProbes; i++) {
3629 // Calculate entry in symbol table.
3630 __ movl(scratch, hash);
3631 if (i > 0) {
3632 __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
3633 }
3634 __ andl(scratch, mask);
3635
3636 // Load the entry from the symble table.
3637 Register candidate = scratch; // Scratch register contains candidate.
3638 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
3639 __ movq(candidate,
3640 FieldOperand(symbol_table,
3641 scratch,
3642 times_pointer_size,
3643 SymbolTable::kElementsStartOffset));
3644
3645 // If entry is undefined no string with this hash can be found.
3646 __ cmpq(candidate, undefined);
3647 __ j(equal, not_found);
3648
3649 // If length is not 2 the string is not a candidate.
3650 __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
3651 Smi::FromInt(2));
3652 __ j(not_equal, &next_probe[i]);
3653
3654 // We use kScratchRegister as a temporary register in assumption that
3655 // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
3656 Register temp = kScratchRegister;
3657
3658 // Check that the candidate is a non-external ascii string.
3659 __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
3660 __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
3661 __ JumpIfInstanceTypeIsNotSequentialAscii(
3662 temp, temp, &next_probe[i]);
3663
3664 // Check if the two characters match.
3665 __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
3666 __ andl(temp, Immediate(0x0000ffff));
3667 __ cmpl(chars, temp);
3668 __ j(equal, &found_in_symbol_table);
3669 __ bind(&next_probe[i]);
3670 }
3671
3672 // No matching 2 character string found by probing.
3673 __ jmp(not_found);
3674
3675 // Scratch register contains result when we fall through to here.
3676 Register result = scratch;
3677 __ bind(&found_in_symbol_table);
3678 if (!result.is(rax)) {
3679 __ movq(rax, result);
3680 }
3681 }
3682
3683
3684 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3685 Register hash,
3686 Register character,
3687 Register scratch) {
3688 // hash = character + (character << 10);
3689 __ movl(hash, character);
3690 __ shll(hash, Immediate(10));
3691 __ addl(hash, character);
3692 // hash ^= hash >> 6;
3693 __ movl(scratch, hash);
3694 __ sarl(scratch, Immediate(6));
3695 __ xorl(hash, scratch);
3696 }
3697
3698
3699 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3700 Register hash,
3701 Register character,
3702 Register scratch) {
3703 // hash += character;
3704 __ addl(hash, character);
3705 // hash += hash << 10;
3706 __ movl(scratch, hash);
3707 __ shll(scratch, Immediate(10));
3708 __ addl(hash, scratch);
3709 // hash ^= hash >> 6;
3710 __ movl(scratch, hash);
3711 __ sarl(scratch, Immediate(6));
3712 __ xorl(hash, scratch);
3713 }
3714
3715
3716 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3717 Register hash,
3718 Register scratch) {
3719 // hash += hash << 3;
3720 __ leal(hash, Operand(hash, hash, times_8, 0));
3721 // hash ^= hash >> 11;
3722 __ movl(scratch, hash);
3723 __ sarl(scratch, Immediate(11));
3724 __ xorl(hash, scratch);
3725 // hash += hash << 15;
3726 __ movl(scratch, hash);
3727 __ shll(scratch, Immediate(15));
3728 __ addl(hash, scratch);
3729
3730 // if (hash == 0) hash = 27;
3731 Label hash_not_zero;
3732 __ j(not_zero, &hash_not_zero);
3733 __ movl(hash, Immediate(27));
3734 __ bind(&hash_not_zero);
3735 }
3736
3737 void SubStringStub::Generate(MacroAssembler* masm) {
3738 Label runtime;
3739
3740 // Stack frame on entry.
3741 // rsp[0]: return address
3742 // rsp[8]: to
3743 // rsp[16]: from
3744 // rsp[24]: string
3745
3746 const int kToOffset = 1 * kPointerSize;
3747 const int kFromOffset = kToOffset + kPointerSize;
3748 const int kStringOffset = kFromOffset + kPointerSize;
3749 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
3750
3751 // Make sure first argument is a string.
3752 __ movq(rax, Operand(rsp, kStringOffset));
3753 STATIC_ASSERT(kSmiTag == 0);
3754 __ testl(rax, Immediate(kSmiTagMask));
3755 __ j(zero, &runtime);
3756 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
3757 __ j(NegateCondition(is_string), &runtime);
3758
3759 // rax: string
3760 // rbx: instance type
3761 // Calculate length of sub string using the smi values.
3762 Label result_longer_than_two;
3763 __ movq(rcx, Operand(rsp, kToOffset));
3764 __ movq(rdx, Operand(rsp, kFromOffset));
3765 __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
3766
3767 __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
3768 __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
3769 Label return_rax;
3770 __ j(equal, &return_rax);
3771 // Special handling of sub-strings of length 1 and 2. One character strings
3772 // are handled in the runtime system (looked up in the single character
3773 // cache). Two character strings are looked for in the symbol cache.
3774 __ SmiToInteger32(rcx, rcx);
3775 __ cmpl(rcx, Immediate(2));
3776 __ j(greater, &result_longer_than_two);
3777 __ j(less, &runtime);
3778
3779 // Sub string of length 2 requested.
3780 // rax: string
3781 // rbx: instance type
3782 // rcx: sub string length (value is 2)
3783 // rdx: from index (smi)
3784 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
3785
3786 // Get the two characters forming the sub string.
3787 __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
3788 __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
3789 __ movzxbq(rcx,
3790 FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
3791
3792 // Try to lookup two character string in symbol table.
3793 Label make_two_character_string;
3794 StringHelper::GenerateTwoCharacterSymbolTableProbe(
3795 masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
3796 __ ret(3 * kPointerSize);
3797
3798 __ bind(&make_two_character_string);
3799 // Setup registers for allocating the two character string.
3800 __ movq(rax, Operand(rsp, kStringOffset));
3801 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
3802 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3803 __ Set(rcx, 2);
3804
3805 __ bind(&result_longer_than_two);
3806
3807 // rax: string
3808 // rbx: instance type
3809 // rcx: result string length
3810 // Check for flat ascii string
3811 Label non_ascii_flat;
3812 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
3813
3814 // Allocate the result.
3815 __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
3816
3817 // rax: result string
3818 // rcx: result string length
3819 __ movq(rdx, rsi); // esi used by following code.
3820 // Locate first character of result.
3821 __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
3822 // Load string argument and locate character of sub string start.
3823 __ movq(rsi, Operand(rsp, kStringOffset));
3824 __ movq(rbx, Operand(rsp, kFromOffset));
3825 {
3826 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
3827 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
3828 SeqAsciiString::kHeaderSize - kHeapObjectTag));
3829 }
3830
3831 // rax: result string
3832 // rcx: result length
3833 // rdx: original value of rsi
3834 // rdi: first character of result
3835 // rsi: character of sub string start
3836 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
3837 __ movq(rsi, rdx); // Restore rsi.
3838 __ IncrementCounter(&Counters::sub_string_native, 1);
3839 __ ret(kArgumentsSize);
3840
3841 __ bind(&non_ascii_flat);
3842 // rax: string
3843 // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
3844 // rcx: result string length
3845 // Check for sequential two byte string
3846 __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
3847 __ j(not_equal, &runtime);
3848
3849 // Allocate the result.
3850 __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
3851
3852 // rax: result string
3853 // rcx: result string length
3854 __ movq(rdx, rsi); // esi used by following code.
3855 // Locate first character of result.
3856 __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
3857 // Load string argument and locate character of sub string start.
3858 __ movq(rsi, Operand(rsp, kStringOffset));
3859 __ movq(rbx, Operand(rsp, kFromOffset));
3860 {
3861 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
3862 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
3863 SeqAsciiString::kHeaderSize - kHeapObjectTag));
3864 }
3865
3866 // rax: result string
3867 // rcx: result length
3868 // rdx: original value of rsi
3869 // rdi: first character of result
3870 // rsi: character of sub string start
3871 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
3872 __ movq(rsi, rdx); // Restore esi.
3873
3874 __ bind(&return_rax);
3875 __ IncrementCounter(&Counters::sub_string_native, 1);
3876 __ ret(kArgumentsSize);
3877
3878 // Just jump to runtime to create the sub string.
3879 __ bind(&runtime);
3880 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3881 }
3882
3883
3884 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3885 Register left,
3886 Register right,
3887 Register scratch1,
3888 Register scratch2,
3889 Register scratch3,
3890 Register scratch4) {
3891 // Ensure that you can always subtract a string length from a non-negative
3892 // number (e.g. another length).
3893 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
3894
3895 // Find minimum length and length difference.
3896 __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
3897 __ movq(scratch4, scratch1);
3898 __ SmiSub(scratch4,
3899 scratch4,
3900 FieldOperand(right, String::kLengthOffset),
3901 NULL);
3902 // Register scratch4 now holds left.length - right.length.
3903 const Register length_difference = scratch4;
3904 Label left_shorter;
3905 __ j(less, &left_shorter);
3906 // The right string isn't longer that the left one.
3907 // Get the right string's length by subtracting the (non-negative) difference
3908 // from the left string's length.
3909 __ SmiSub(scratch1, scratch1, length_difference, NULL);
3910 __ bind(&left_shorter);
3911 // Register scratch1 now holds Min(left.length, right.length).
3912 const Register min_length = scratch1;
3913
3914 Label compare_lengths;
3915 // If min-length is zero, go directly to comparing lengths.
3916 __ SmiTest(min_length);
3917 __ j(zero, &compare_lengths);
3918
3919 __ SmiToInteger32(min_length, min_length);
3920
3921 // Registers scratch2 and scratch3 are free.
3922 Label result_not_equal;
3923 Label loop;
3924 {
3925 // Check characters 0 .. min_length - 1 in a loop.
3926 // Use scratch3 as loop index, min_length as limit and scratch2
3927 // for computation.
3928 const Register index = scratch3;
3929 __ movl(index, Immediate(0)); // Index into strings.
3930 __ bind(&loop);
3931 // Compare characters.
3932 // TODO(lrn): Could we load more than one character at a time?
3933 __ movb(scratch2, FieldOperand(left,
3934 index,
3935 times_1,
3936 SeqAsciiString::kHeaderSize));
3937 // Increment index and use -1 modifier on next load to give
3938 // the previous load extra time to complete.
3939 __ addl(index, Immediate(1));
3940 __ cmpb(scratch2, FieldOperand(right,
3941 index,
3942 times_1,
3943 SeqAsciiString::kHeaderSize - 1));
3944 __ j(not_equal, &result_not_equal);
3945 __ cmpl(index, min_length);
3946 __ j(not_equal, &loop);
3947 }
3948 // Completed loop without finding different characters.
3949 // Compare lengths (precomputed).
3950 __ bind(&compare_lengths);
3951 __ SmiTest(length_difference);
3952 __ j(not_zero, &result_not_equal);
3953
3954 // Result is EQUAL.
3955 __ Move(rax, Smi::FromInt(EQUAL));
3956 __ ret(0);
3957
3958 Label result_greater;
3959 __ bind(&result_not_equal);
3960 // Unequal comparison of left to right, either character or length.
3961 __ j(greater, &result_greater);
3962
3963 // Result is LESS.
3964 __ Move(rax, Smi::FromInt(LESS));
3965 __ ret(0);
3966
3967 // Result is GREATER.
3968 __ bind(&result_greater);
3969 __ Move(rax, Smi::FromInt(GREATER));
3970 __ ret(0);
3971 }
3972
3973
3974 void StringCompareStub::Generate(MacroAssembler* masm) {
3975 Label runtime;
3976
3977 // Stack frame on entry.
3978 // rsp[0]: return address
3979 // rsp[8]: right string
3980 // rsp[16]: left string
3981
3982 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
3983 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
3984
3985 // Check for identity.
3986 Label not_same;
3987 __ cmpq(rdx, rax);
3988 __ j(not_equal, &not_same);
3989 __ Move(rax, Smi::FromInt(EQUAL));
3990 __ IncrementCounter(&Counters::string_compare_native, 1);
3991 __ ret(2 * kPointerSize);
3992
3993 __ bind(&not_same);
3994
3995 // Check that both are sequential ASCII strings.
3996 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
3997
3998 // Inline comparison of ascii strings.
3999 __ IncrementCounter(&Counters::string_compare_native, 1);
4000 // Drop arguments from the stack
4001 __ pop(rcx);
4002 __ addq(rsp, Immediate(2 * kPointerSize));
4003 __ push(rcx);
4004 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
4005
4006 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
4007 // tagged as a small integer.
4008 __ bind(&runtime);
4009 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4010 }
4011
4012 #undef __
4013
4014 } } // namespace v8::internal
4015
4016 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/code-stubs-x64.h ('k') | src/x64/codegen-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698