Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: src/x64/macro-assembler-x64.h

Issue 3381005: X64: Templating Smi-macros to use both Label and NearLabel. (Closed)
Patch Set: Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/x64/ic-x64.cc ('k') | src/x64/macro-assembler-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
84 // For page containing |object| mark region covering |addr| dirty. 84 // For page containing |object| mark region covering |addr| dirty.
85 // RecordWriteHelper only works if the object is not in new 85 // RecordWriteHelper only works if the object is not in new
86 // space. 86 // space.
87 void RecordWriteHelper(Register object, 87 void RecordWriteHelper(Register object,
88 Register addr, 88 Register addr,
89 Register scratch); 89 Register scratch);
90 90
91 // Check if object is in new space. The condition cc can be equal or 91 // Check if object is in new space. The condition cc can be equal or
92 // not_equal. If it is equal a jump will be done if the object is on new 92 // not_equal. If it is equal a jump will be done if the object is on new
93 // space. The register scratch can be object itself, but it will be clobbered. 93 // space. The register scratch can be object itself, but it will be clobbered.
94 template <typename LabelType>
94 void InNewSpace(Register object, 95 void InNewSpace(Register object,
95 Register scratch, 96 Register scratch,
96 Condition cc, 97 Condition cc,
97 Label* branch); 98 LabelType* branch);
98 99
99 // For page containing |object| mark region covering [object+offset] 100 // For page containing |object| mark region covering [object+offset]
100 // dirty. |object| is the object being stored into, |value| is the 101 // dirty. |object| is the object being stored into, |value| is the
101 // object being stored. If |offset| is zero, then the |scratch| 102 // object being stored. If |offset| is zero, then the |scratch|
102 // register contains the array index into the elements array 103 // register contains the array index into the elements array
103 // represented as a Smi. All registers are clobbered by the 104 // represented as a Smi. All registers are clobbered by the
104 // operation. RecordWrite filters out smis so it does not update the 105 // operation. RecordWrite filters out smis so it does not update the
105 // write barrier if the value is a smi. 106 // write barrier if the value is a smi.
106 void RecordWrite(Register object, 107 void RecordWrite(Register object,
107 int offset, 108 int offset,
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 // Conversions between tagged smi values and non-tagged integer values. 215 // Conversions between tagged smi values and non-tagged integer values.
215 216
216 // Tag an integer value. The result must be known to be a valid smi value. 217 // Tag an integer value. The result must be known to be a valid smi value.
217 // Only uses the low 32 bits of the src register. Sets the N and Z flags 218 // Only uses the low 32 bits of the src register. Sets the N and Z flags
218 // based on the value of the resulting integer. 219 // based on the value of the resulting integer.
219 void Integer32ToSmi(Register dst, Register src); 220 void Integer32ToSmi(Register dst, Register src);
220 221
221 // Tag an integer value if possible, or jump the integer value cannot be 222 // Tag an integer value if possible, or jump the integer value cannot be
222 // represented as a smi. Only uses the low 32 bit of the src registers. 223 // represented as a smi. Only uses the low 32 bit of the src registers.
223 // NOTICE: Destroys the dst register even if unsuccessful! 224 // NOTICE: Destroys the dst register even if unsuccessful!
224 void Integer32ToSmi(Register dst, Register src, Label* on_overflow); 225 template <typename LabelType>
226 void Integer32ToSmi(Register dst, Register src, LabelType* on_overflow);
225 227
226 // Stores an integer32 value into a memory field that already holds a smi. 228 // Stores an integer32 value into a memory field that already holds a smi.
227 void Integer32ToSmiField(const Operand& dst, Register src); 229 void Integer32ToSmiField(const Operand& dst, Register src);
228 230
229 // Adds constant to src and tags the result as a smi. 231 // Adds constant to src and tags the result as a smi.
230 // Result must be a valid smi. 232 // Result must be a valid smi.
231 void Integer64PlusConstantToSmi(Register dst, Register src, int constant); 233 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
232 234
233 // Convert smi to 32-bit integer. I.e., not sign extended into 235 // Convert smi to 32-bit integer. I.e., not sign extended into
234 // high 32 bits of destination. 236 // high 32 bits of destination.
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 Condition CheckInteger32ValidSmiValue(Register src); 295 Condition CheckInteger32ValidSmiValue(Register src);
294 296
295 // Checks whether an 32-bit unsigned integer value is a valid for 297 // Checks whether an 32-bit unsigned integer value is a valid for
296 // conversion to a smi. 298 // conversion to a smi.
297 Condition CheckUInteger32ValidSmiValue(Register src); 299 Condition CheckUInteger32ValidSmiValue(Register src);
298 300
299 // Test-and-jump functions. Typically combines a check function 301 // Test-and-jump functions. Typically combines a check function
300 // above with a conditional jump. 302 // above with a conditional jump.
301 303
302 // Jump if the value cannot be represented by a smi. 304 // Jump if the value cannot be represented by a smi.
303 void JumpIfNotValidSmiValue(Register src, Label* on_invalid); 305 template <typename LabelType>
306 void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
304 307
305 // Jump if the unsigned integer value cannot be represented by a smi. 308 // Jump if the unsigned integer value cannot be represented by a smi.
306 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid); 309 template <typename LabelType>
310 void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
307 311
308 // Jump to label if the value is a tagged smi. 312 // Jump to label if the value is a tagged smi.
309 void JumpIfSmi(Register src, Label* on_smi); 313 template <typename LabelType>
314 void JumpIfSmi(Register src, LabelType* on_smi);
310 315
311 // Jump to label if the value is not a tagged smi. 316 // Jump to label if the value is not a tagged smi.
312 void JumpIfNotSmi(Register src, Label* on_not_smi); 317 template <typename LabelType>
318 void JumpIfNotSmi(Register src, LabelType* on_not_smi);
313 319
314 // Jump to label if the value is not a positive tagged smi. 320 // Jump to label if the value is not a positive tagged smi.
315 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi); 321 template <typename LabelType>
322 void JumpIfNotPositiveSmi(Register src, LabelType* on_not_smi);
316 323
317 // Jump to label if the value, which must be a tagged smi, has value equal 324 // Jump to label if the value, which must be a tagged smi, has value equal
318 // to the constant. 325 // to the constant.
319 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals); 326 template <typename LabelType>
327 void JumpIfSmiEqualsConstant(Register src,
328 Smi* constant,
329 LabelType* on_equals);
320 330
321 // Jump if either or both register are not smi values. 331 // Jump if either or both register are not smi values.
322 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi); 332 template <typename LabelType>
333 void JumpIfNotBothSmi(Register src1,
334 Register src2,
335 LabelType* on_not_both_smi);
323 336
324 // Jump if either or both register are not positive smi values. 337 // Jump if either or both register are not positive smi values.
338 template <typename LabelType>
325 void JumpIfNotBothPositiveSmi(Register src1, Register src2, 339 void JumpIfNotBothPositiveSmi(Register src1, Register src2,
326 Label* on_not_both_smi); 340 LabelType* on_not_both_smi);
327 341
328 // Operations on tagged smi values. 342 // Operations on tagged smi values.
329 343
330 // Smis represent a subset of integers. The subset is always equivalent to 344 // Smis represent a subset of integers. The subset is always equivalent to
331 // a two's complement interpretation of a fixed number of bits. 345 // a two's complement interpretation of a fixed number of bits.
332 346
333 // Optimistically adds an integer constant to a supposed smi. 347 // Optimistically adds an integer constant to a supposed smi.
334 // If the src is not a smi, or the result is not a smi, jump to 348 // If the src is not a smi, or the result is not a smi, jump to
335 // the label. 349 // the label.
350 template <typename LabelType>
336 void SmiTryAddConstant(Register dst, 351 void SmiTryAddConstant(Register dst,
337 Register src, 352 Register src,
338 Smi* constant, 353 Smi* constant,
339 Label* on_not_smi_result); 354 LabelType* on_not_smi_result);
340 355
341 // Add an integer constant to a tagged smi, giving a tagged smi as result. 356 // Add an integer constant to a tagged smi, giving a tagged smi as result.
342 // No overflow testing on the result is done. 357 // No overflow testing on the result is done.
343 void SmiAddConstant(Register dst, Register src, Smi* constant); 358 void SmiAddConstant(Register dst, Register src, Smi* constant);
344 359
345 // Add an integer constant to a tagged smi, giving a tagged smi as result. 360 // Add an integer constant to a tagged smi, giving a tagged smi as result.
346 // No overflow testing on the result is done. 361 // No overflow testing on the result is done.
347 void SmiAddConstant(const Operand& dst, Smi* constant); 362 void SmiAddConstant(const Operand& dst, Smi* constant);
348 363
349 // Add an integer constant to a tagged smi, giving a tagged smi as result, 364 // Add an integer constant to a tagged smi, giving a tagged smi as result,
350 // or jumping to a label if the result cannot be represented by a smi. 365 // or jumping to a label if the result cannot be represented by a smi.
366 template <typename LabelType>
351 void SmiAddConstant(Register dst, 367 void SmiAddConstant(Register dst,
352 Register src, 368 Register src,
353 Smi* constant, 369 Smi* constant,
354 Label* on_not_smi_result); 370 LabelType* on_not_smi_result);
355 371
356 // Subtract an integer constant from a tagged smi, giving a tagged smi as 372 // Subtract an integer constant from a tagged smi, giving a tagged smi as
357 // result. No testing on the result is done. Sets the N and Z flags 373 // result. No testing on the result is done. Sets the N and Z flags
358 // based on the value of the resulting integer. 374 // based on the value of the resulting integer.
359 void SmiSubConstant(Register dst, Register src, Smi* constant); 375 void SmiSubConstant(Register dst, Register src, Smi* constant);
360 376
361 // Subtract an integer constant from a tagged smi, giving a tagged smi as 377 // Subtract an integer constant from a tagged smi, giving a tagged smi as
362 // result, or jumping to a label if the result cannot be represented by a smi. 378 // result, or jumping to a label if the result cannot be represented by a smi.
379 template <typename LabelType>
363 void SmiSubConstant(Register dst, 380 void SmiSubConstant(Register dst,
364 Register src, 381 Register src,
365 Smi* constant, 382 Smi* constant,
366 Label* on_not_smi_result); 383 LabelType* on_not_smi_result);
367 384
368 // Negating a smi can give a negative zero or too large positive value. 385 // Negating a smi can give a negative zero or too large positive value.
369 // NOTICE: This operation jumps on success, not failure! 386 // NOTICE: This operation jumps on success, not failure!
387 template <typename LabelType>
370 void SmiNeg(Register dst, 388 void SmiNeg(Register dst,
371 Register src, 389 Register src,
372 Label* on_smi_result); 390 LabelType* on_smi_result);
373 391
374 // Adds smi values and return the result as a smi. 392 // Adds smi values and return the result as a smi.
375 // If dst is src1, then src1 will be destroyed, even if 393 // If dst is src1, then src1 will be destroyed, even if
376 // the operation is unsuccessful. 394 // the operation is unsuccessful.
395 template <typename LabelType>
377 void SmiAdd(Register dst, 396 void SmiAdd(Register dst,
378 Register src1, 397 Register src1,
379 Register src2, 398 Register src2,
380 Label* on_not_smi_result); 399 LabelType* on_not_smi_result);
400
401 void SmiAdd(Register dst,
402 Register src1,
403 Register src2);
381 404
382 // Subtracts smi values and return the result as a smi. 405 // Subtracts smi values and return the result as a smi.
383 // If dst is src1, then src1 will be destroyed, even if 406 // If dst is src1, then src1 will be destroyed, even if
384 // the operation is unsuccessful. 407 // the operation is unsuccessful.
408 template <typename LabelType>
385 void SmiSub(Register dst, 409 void SmiSub(Register dst,
386 Register src1, 410 Register src1,
387 Register src2, 411 Register src2,
388 Label* on_not_smi_result); 412 LabelType* on_not_smi_result);
389 413
390 void SmiSub(Register dst, 414 void SmiSub(Register dst,
391 Register src1, 415 Register src1,
416 Register src2);
417
418 template <typename LabelType>
419 void SmiSub(Register dst,
420 Register src1,
392 const Operand& src2, 421 const Operand& src2,
393 Label* on_not_smi_result); 422 LabelType* on_not_smi_result);
423
424 void SmiSub(Register dst,
425 Register src1,
426 const Operand& src2);
394 427
395 // Multiplies smi values and return the result as a smi, 428 // Multiplies smi values and return the result as a smi,
396 // if possible. 429 // if possible.
397 // If dst is src1, then src1 will be destroyed, even if 430 // If dst is src1, then src1 will be destroyed, even if
398 // the operation is unsuccessful. 431 // the operation is unsuccessful.
432 template <typename LabelType>
399 void SmiMul(Register dst, 433 void SmiMul(Register dst,
400 Register src1, 434 Register src1,
401 Register src2, 435 Register src2,
402 Label* on_not_smi_result); 436 LabelType* on_not_smi_result);
403 437
404 // Divides one smi by another and returns the quotient. 438 // Divides one smi by another and returns the quotient.
405 // Clobbers rax and rdx registers. 439 // Clobbers rax and rdx registers.
440 template <typename LabelType>
406 void SmiDiv(Register dst, 441 void SmiDiv(Register dst,
407 Register src1, 442 Register src1,
408 Register src2, 443 Register src2,
409 Label* on_not_smi_result); 444 LabelType* on_not_smi_result);
410 445
411 // Divides one smi by another and returns the remainder. 446 // Divides one smi by another and returns the remainder.
412 // Clobbers rax and rdx registers. 447 // Clobbers rax and rdx registers.
448 template <typename LabelType>
413 void SmiMod(Register dst, 449 void SmiMod(Register dst,
414 Register src1, 450 Register src1,
415 Register src2, 451 Register src2,
416 Label* on_not_smi_result); 452 LabelType* on_not_smi_result);
417 453
418 // Bitwise operations. 454 // Bitwise operations.
419 void SmiNot(Register dst, Register src); 455 void SmiNot(Register dst, Register src);
420 void SmiAnd(Register dst, Register src1, Register src2); 456 void SmiAnd(Register dst, Register src1, Register src2);
421 void SmiOr(Register dst, Register src1, Register src2); 457 void SmiOr(Register dst, Register src1, Register src2);
422 void SmiXor(Register dst, Register src1, Register src2); 458 void SmiXor(Register dst, Register src1, Register src2);
423 void SmiAndConstant(Register dst, Register src1, Smi* constant); 459 void SmiAndConstant(Register dst, Register src1, Smi* constant);
424 void SmiOrConstant(Register dst, Register src1, Smi* constant); 460 void SmiOrConstant(Register dst, Register src1, Smi* constant);
425 void SmiXorConstant(Register dst, Register src1, Smi* constant); 461 void SmiXorConstant(Register dst, Register src1, Smi* constant);
426 462
427 void SmiShiftLeftConstant(Register dst, 463 void SmiShiftLeftConstant(Register dst,
428 Register src, 464 Register src,
429 int shift_value); 465 int shift_value);
466 template <typename LabelType>
430 void SmiShiftLogicalRightConstant(Register dst, 467 void SmiShiftLogicalRightConstant(Register dst,
431 Register src, 468 Register src,
432 int shift_value, 469 int shift_value,
433 Label* on_not_smi_result); 470 LabelType* on_not_smi_result);
434 void SmiShiftArithmeticRightConstant(Register dst, 471 void SmiShiftArithmeticRightConstant(Register dst,
435 Register src, 472 Register src,
436 int shift_value); 473 int shift_value);
437 474
438 // Shifts a smi value to the left, and returns the result if that is a smi. 475 // Shifts a smi value to the left, and returns the result if that is a smi.
439 // Uses and clobbers rcx, so dst may not be rcx. 476 // Uses and clobbers rcx, so dst may not be rcx.
440 void SmiShiftLeft(Register dst, 477 void SmiShiftLeft(Register dst,
441 Register src1, 478 Register src1,
442 Register src2); 479 Register src2);
443 // Shifts a smi value to the right, shifting in zero bits at the top, and 480 // Shifts a smi value to the right, shifting in zero bits at the top, and
444 // returns the unsigned intepretation of the result if that is a smi. 481 // returns the unsigned intepretation of the result if that is a smi.
445 // Uses and clobbers rcx, so dst may not be rcx. 482 // Uses and clobbers rcx, so dst may not be rcx.
483 template <typename LabelType>
446 void SmiShiftLogicalRight(Register dst, 484 void SmiShiftLogicalRight(Register dst,
447 Register src1, 485 Register src1,
448 Register src2, 486 Register src2,
449 Label* on_not_smi_result); 487 LabelType* on_not_smi_result);
450 // Shifts a smi value to the right, sign extending the top, and 488 // Shifts a smi value to the right, sign extending the top, and
451 // returns the signed intepretation of the result. That will always 489 // returns the signed intepretation of the result. That will always
452 // be a valid smi value, since it's numerically smaller than the 490 // be a valid smi value, since it's numerically smaller than the
453 // original. 491 // original.
454 // Uses and clobbers rcx, so dst may not be rcx. 492 // Uses and clobbers rcx, so dst may not be rcx.
455 void SmiShiftArithmeticRight(Register dst, 493 void SmiShiftArithmeticRight(Register dst,
456 Register src1, 494 Register src1,
457 Register src2); 495 Register src2);
458 496
459 // Specialized operations 497 // Specialized operations
460 498
461 // Select the non-smi register of two registers where exactly one is a 499 // Select the non-smi register of two registers where exactly one is a
462 // smi. If neither are smis, jump to the failure label. 500 // smi. If neither are smis, jump to the failure label.
501 template <typename LabelType>
463 void SelectNonSmi(Register dst, 502 void SelectNonSmi(Register dst,
464 Register src1, 503 Register src1,
465 Register src2, 504 Register src2,
466 Label* on_not_smis); 505 LabelType* on_not_smis);
467 506
468 // Converts, if necessary, a smi to a combination of number and 507 // Converts, if necessary, a smi to a combination of number and
469 // multiplier to be used as a scaled index. 508 // multiplier to be used as a scaled index.
470 // The src register contains a *positive* smi value. The shift is the 509 // The src register contains a *positive* smi value. The shift is the
471 // power of two to multiply the index value by (e.g. 510 // power of two to multiply the index value by (e.g.
472 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2). 511 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
473 // The returned index register may be either src or dst, depending 512 // The returned index register may be either src or dst, depending
474 // on what is most efficient. If src and dst are different registers, 513 // on what is most efficient. If src and dst are different registers,
475 // src is always unchanged. 514 // src is always unchanged.
476 SmiIndex SmiToIndex(Register dst, Register src, int shift); 515 SmiIndex SmiToIndex(Register dst, Register src, int shift);
477 516
478 // Converts a positive smi to a negative index. 517 // Converts a positive smi to a negative index.
479 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift); 518 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
480 519
481 // Basic Smi operations. 520 // Basic Smi operations.
482 void Move(Register dst, Smi* source) { 521 void Move(Register dst, Smi* source) {
483 LoadSmiConstant(dst, source); 522 LoadSmiConstant(dst, source);
484 } 523 }
485 524
486 void Move(const Operand& dst, Smi* source) { 525 void Move(const Operand& dst, Smi* source) {
487 Register constant = GetSmiConstant(source); 526 Register constant = GetSmiConstant(source);
488 movq(dst, constant); 527 movq(dst, constant);
489 } 528 }
490 529
491 void Push(Smi* smi); 530 void Push(Smi* smi);
492 void Test(const Operand& dst, Smi* source); 531 void Test(const Operand& dst, Smi* source);
493 532
494 // --------------------------------------------------------------------------- 533 // ---------------------------------------------------------------------------
495 // String macros. 534 // String macros.
535 template <typename LabelType>
496 void JumpIfNotBothSequentialAsciiStrings(Register first_object, 536 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
497 Register second_object, 537 Register second_object,
498 Register scratch1, 538 Register scratch1,
499 Register scratch2, 539 Register scratch2,
500 Label* on_not_both_flat_ascii); 540 LabelType* on_not_both_flat_ascii);
501 541
502 // Check whether the instance type represents a flat ascii string. Jump to the 542 // Check whether the instance type represents a flat ascii string. Jump to the
503 // label if not. If the instance type can be scratched specify same register 543 // label if not. If the instance type can be scratched specify same register
504 // for both instance type and scratch. 544 // for both instance type and scratch.
505 void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type, 545 template <typename LabelType>
506 Register scratch, 546 void JumpIfInstanceTypeIsNotSequentialAscii(
507 Label *on_not_flat_ascii_string); 547 Register instance_type,
548 Register scratch,
549 LabelType *on_not_flat_ascii_string);
508 550
551 template <typename LabelType>
509 void JumpIfBothInstanceTypesAreNotSequentialAscii( 552 void JumpIfBothInstanceTypesAreNotSequentialAscii(
510 Register first_object_instance_type, 553 Register first_object_instance_type,
511 Register second_object_instance_type, 554 Register second_object_instance_type,
512 Register scratch1, 555 Register scratch1,
513 Register scratch2, 556 Register scratch2,
514 Label* on_fail); 557 LabelType* on_fail);
515 558
516 // --------------------------------------------------------------------------- 559 // ---------------------------------------------------------------------------
517 // Macro instructions. 560 // Macro instructions.
518 561
519 // Load a register with a long value as efficiently as possible. 562 // Load a register with a long value as efficiently as possible.
520 void Set(Register dst, int64_t x); 563 void Set(Register dst, int64_t x);
521 void Set(const Operand& dst, int64_t x); 564 void Set(const Operand& dst, int64_t x);
522 565
523 // Handle support 566 // Handle support
524 void Move(Register dst, Handle<Object> source); 567 void Move(Register dst, Handle<Object> source);
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after
858 // modified. It may be the "smi 1 constant" register. 901 // modified. It may be the "smi 1 constant" register.
859 Register GetSmiConstant(Smi* value); 902 Register GetSmiConstant(Smi* value);
860 903
861 // Moves the smi value to the destination register. 904 // Moves the smi value to the destination register.
862 void LoadSmiConstant(Register dst, Smi* value); 905 void LoadSmiConstant(Register dst, Smi* value);
863 906
864 // This handle will be patched with the code object on installation. 907 // This handle will be patched with the code object on installation.
865 Handle<Object> code_object_; 908 Handle<Object> code_object_;
866 909
867 // Helper functions for generating invokes. 910 // Helper functions for generating invokes.
911 template <typename LabelType>
868 void InvokePrologue(const ParameterCount& expected, 912 void InvokePrologue(const ParameterCount& expected,
869 const ParameterCount& actual, 913 const ParameterCount& actual,
870 Handle<Code> code_constant, 914 Handle<Code> code_constant,
871 Register code_register, 915 Register code_register,
872 Label* done, 916 LabelType* done,
873 InvokeFlag flag); 917 InvokeFlag flag);
874 918
875 // Activation support. 919 // Activation support.
876 void EnterFrame(StackFrame::Type type); 920 void EnterFrame(StackFrame::Type type);
877 void LeaveFrame(StackFrame::Type type); 921 void LeaveFrame(StackFrame::Type type);
878 922
879 void EnterExitFramePrologue(bool save_rax); 923 void EnterExitFramePrologue(bool save_rax);
880 void EnterExitFrameEpilogue(int result_size, int argc); 924 void EnterExitFrameEpilogue(int result_size, int argc);
881 925
882 // Allocation support helpers. 926 // Allocation support helpers.
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
954 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \ 998 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
955 masm->pop(rax); \ 999 masm->pop(rax); \
956 masm->popad(); \ 1000 masm->popad(); \
957 masm->popfd(); \ 1001 masm->popfd(); \
958 } \ 1002 } \
959 masm-> 1003 masm->
960 #else 1004 #else
961 #define ACCESS_MASM(masm) masm-> 1005 #define ACCESS_MASM(masm) masm->
962 #endif 1006 #endif
963 1007
1008 // -----------------------------------------------------------------------------
1009 // Template implementations.
1010
1011 static int kSmiShift = kSmiTagSize + kSmiShiftSize;
1012
1013 template <typename LabelType>
1014 void MacroAssembler::Integer32ToSmi(Register dst,
1015 Register src,
1016 LabelType* on_overflow) {
1017 ASSERT_EQ(0, kSmiTag);
Rico 2010/09/16 06:32:15 on_overflow label not used
Lasse Reichstein 2010/09/16 07:03:07 Nor is the function. Function removed.
1018 // 32-bit integer always fits in a long smi.
1019 if (!dst.is(src)) {
1020 movl(dst, src);
1021 }
1022 shl(dst, Immediate(kSmiShift));
1023 }
1024
1025
1026 template <typename LabelType>
1027 void MacroAssembler::SmiNeg(Register dst,
1028 Register src,
1029 LabelType* on_smi_result) {
1030 if (dst.is(src)) {
1031 ASSERT(!dst.is(kScratchRegister));
1032 movq(kScratchRegister, src);
1033 neg(dst); // Low 32 bits are retained as zero by negation.
1034 // Test if result is zero or Smi::kMinValue.
1035 cmpq(dst, kScratchRegister);
1036 j(not_equal, on_smi_result);
1037 movq(src, kScratchRegister);
1038 } else {
1039 movq(dst, src);
1040 neg(dst);
1041 cmpq(dst, src);
1042 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1043 j(not_equal, on_smi_result);
1044 }
1045 }
1046
1047
1048 template <typename LabelType>
1049 void MacroAssembler::SmiAdd(Register dst,
1050 Register src1,
1051 Register src2,
1052 LabelType* on_not_smi_result) {
1053 ASSERT_NOT_NULL(on_not_smi_result);
1054 ASSERT(!dst.is(src2));
1055 if (dst.is(src1)) {
1056 movq(kScratchRegister, src1);
1057 addq(kScratchRegister, src2);
1058 j(overflow, on_not_smi_result);
1059 movq(dst, kScratchRegister);
1060 } else {
1061 movq(dst, src1);
1062 addq(dst, src2);
1063 j(overflow, on_not_smi_result);
1064 }
1065 }
1066
1067
1068 template <typename LabelType>
1069 void MacroAssembler::SmiSub(Register dst,
1070 Register src1,
1071 Register src2,
1072 LabelType* on_not_smi_result) {
1073 ASSERT_NOT_NULL(on_not_smi_result);
1074 ASSERT(!dst.is(src2));
1075 if (dst.is(src1)) {
1076 cmpq(dst, src2);
1077 j(overflow, on_not_smi_result);
1078 subq(dst, src2);
1079 } else {
1080 movq(dst, src1);
1081 subq(dst, src2);
1082 j(overflow, on_not_smi_result);
1083 }
1084 }
1085
1086
1087 template <typename LabelType>
1088 void MacroAssembler::SmiSub(Register dst,
1089 Register src1,
1090 const Operand& src2,
1091 LabelType* on_not_smi_result) {
1092 ASSERT_NOT_NULL(on_not_smi_result);
1093 if (dst.is(src1)) {
1094 movq(kScratchRegister, src2);
1095 cmpq(src1, kScratchRegister);
1096 j(overflow, on_not_smi_result);
1097 subq(src1, kScratchRegister);
1098 } else {
1099 movq(dst, src1);
1100 subq(dst, src2);
1101 j(overflow, on_not_smi_result);
1102 }
1103 }
1104
1105
1106 template <typename LabelType>
1107 void MacroAssembler::SmiMul(Register dst,
1108 Register src1,
1109 Register src2,
1110 LabelType* on_not_smi_result) {
1111 ASSERT(!dst.is(src2));
1112 ASSERT(!dst.is(kScratchRegister));
1113 ASSERT(!src1.is(kScratchRegister));
1114 ASSERT(!src2.is(kScratchRegister));
1115
1116 if (dst.is(src1)) {
1117 NearLabel failure, zero_correct_result;
1118 movq(kScratchRegister, src1); // Create backup for later testing.
1119 SmiToInteger64(dst, src1);
1120 imul(dst, src2);
1121 j(overflow, &failure);
1122
1123 // Check for negative zero result. If product is zero, and one
1124 // argument is negative, go to slow case.
1125 NearLabel correct_result;
1126 testq(dst, dst);
1127 j(not_zero, &correct_result);
1128
1129 movq(dst, kScratchRegister);
1130 xor_(dst, src2);
1131 j(positive, &zero_correct_result); // Result was positive zero.
1132
1133 bind(&failure); // Reused failure exit, restores src1.
1134 movq(src1, kScratchRegister);
1135 jmp(on_not_smi_result);
1136
1137 bind(&zero_correct_result);
1138 xor_(dst, dst);
1139
1140 bind(&correct_result);
1141 } else {
1142 SmiToInteger64(dst, src1);
1143 imul(dst, src2);
1144 j(overflow, on_not_smi_result);
1145 // Check for negative zero result. If product is zero, and one
1146 // argument is negative, go to slow case.
1147 NearLabel correct_result;
1148 testq(dst, dst);
1149 j(not_zero, &correct_result);
1150 // One of src1 and src2 is zero, the check whether the other is
1151 // negative.
1152 movq(kScratchRegister, src1);
1153 xor_(kScratchRegister, src2);
1154 j(negative, on_not_smi_result);
1155 bind(&correct_result);
1156 }
1157 }
1158
1159
1160 template <typename LabelType>
1161 void MacroAssembler::SmiTryAddConstant(Register dst,
1162 Register src,
1163 Smi* constant,
1164 LabelType* on_not_smi_result) {
1165 // Does not assume that src is a smi.
1166 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1167 ASSERT_EQ(0, kSmiTag);
1168 ASSERT(!dst.is(kScratchRegister));
1169 ASSERT(!src.is(kScratchRegister));
1170
1171 JumpIfNotSmi(src, on_not_smi_result);
1172 Register tmp = (dst.is(src) ? kScratchRegister : dst);
1173 LoadSmiConstant(tmp, constant);
1174 addq(tmp, src);
1175 j(overflow, on_not_smi_result);
1176 if (dst.is(src)) {
1177 movq(dst, tmp);
1178 }
1179 }
1180
1181
1182 template <typename LabelType>
1183 void MacroAssembler::SmiAddConstant(Register dst,
1184 Register src,
1185 Smi* constant,
1186 LabelType* on_not_smi_result) {
1187 if (constant->value() == 0) {
1188 if (!dst.is(src)) {
1189 movq(dst, src);
1190 }
1191 } else if (dst.is(src)) {
1192 ASSERT(!dst.is(kScratchRegister));
1193
1194 LoadSmiConstant(kScratchRegister, constant);
1195 addq(kScratchRegister, src);
1196 j(overflow, on_not_smi_result);
1197 movq(dst, kScratchRegister);
1198 } else {
1199 LoadSmiConstant(dst, constant);
1200 addq(dst, src);
1201 j(overflow, on_not_smi_result);
1202 }
1203 }
1204
1205
1206 template <typename LabelType>
1207 void MacroAssembler::SmiSubConstant(Register dst,
1208 Register src,
1209 Smi* constant,
1210 LabelType* on_not_smi_result) {
1211 if (constant->value() == 0) {
1212 if (!dst.is(src)) {
1213 movq(dst, src);
1214 }
1215 } else if (dst.is(src)) {
1216 ASSERT(!dst.is(kScratchRegister));
1217 if (constant->value() == Smi::kMinValue) {
1218 // Subtracting min-value from any non-negative value will overflow.
1219 // We test the non-negativeness before doing the subtraction.
1220 testq(src, src);
1221 j(not_sign, on_not_smi_result);
1222 LoadSmiConstant(kScratchRegister, constant);
1223 subq(dst, kScratchRegister);
1224 } else {
1225 // Subtract by adding the negation.
1226 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1227 addq(kScratchRegister, dst);
1228 j(overflow, on_not_smi_result);
1229 movq(dst, kScratchRegister);
1230 }
1231 } else {
1232 if (constant->value() == Smi::kMinValue) {
1233 // Subtracting min-value from any non-negative value will overflow.
1234 // We test the non-negativeness before doing the subtraction.
1235 testq(src, src);
1236 j(not_sign, on_not_smi_result);
1237 LoadSmiConstant(dst, constant);
1238 // Adding and subtracting the min-value gives the same result, it only
1239 // differs on the overflow bit, which we don't check here.
1240 addq(dst, src);
1241 } else {
1242 // Subtract by adding the negation.
1243 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1244 addq(dst, src);
1245 j(overflow, on_not_smi_result);
1246 }
1247 }
1248 }
1249
1250
1251 template <typename LabelType>
1252 void MacroAssembler::SmiDiv(Register dst,
1253 Register src1,
1254 Register src2,
1255 LabelType* on_not_smi_result) {
1256 ASSERT(!src1.is(kScratchRegister));
1257 ASSERT(!src2.is(kScratchRegister));
1258 ASSERT(!dst.is(kScratchRegister));
1259 ASSERT(!src2.is(rax));
1260 ASSERT(!src2.is(rdx));
1261 ASSERT(!src1.is(rdx));
1262
1263 // Check for 0 divisor (result is +/-Infinity).
1264 NearLabel positive_divisor;
1265 testq(src2, src2);
1266 j(zero, on_not_smi_result);
1267
1268 if (src1.is(rax)) {
1269 movq(kScratchRegister, src1);
1270 }
1271 SmiToInteger32(rax, src1);
1272 // We need to rule out dividing Smi::kMinValue by -1, since that would
1273 // overflow in idiv and raise an exception.
1274 // We combine this with negative zero test (negative zero only happens
1275 // when dividing zero by a negative number).
1276
1277 // We overshoot a little and go to slow case if we divide min-value
1278 // by any negative value, not just -1.
1279 NearLabel safe_div;
1280 testl(rax, Immediate(0x7fffffff));
1281 j(not_zero, &safe_div);
1282 testq(src2, src2);
1283 if (src1.is(rax)) {
1284 j(positive, &safe_div);
1285 movq(src1, kScratchRegister);
1286 jmp(on_not_smi_result);
1287 } else {
1288 j(negative, on_not_smi_result);
1289 }
1290 bind(&safe_div);
1291
1292 SmiToInteger32(src2, src2);
1293 // Sign extend src1 into edx:eax.
1294 cdq();
1295 idivl(src2);
1296 Integer32ToSmi(src2, src2);
1297 // Check that the remainder is zero.
1298 testl(rdx, rdx);
1299 if (src1.is(rax)) {
1300 NearLabel smi_result;
1301 j(zero, &smi_result);
1302 movq(src1, kScratchRegister);
1303 jmp(on_not_smi_result);
1304 bind(&smi_result);
1305 } else {
1306 j(not_zero, on_not_smi_result);
1307 }
1308 if (!dst.is(src1) && src1.is(rax)) {
1309 movq(src1, kScratchRegister);
1310 }
1311 Integer32ToSmi(dst, rax);
1312 }
1313
1314
1315 template <typename LabelType>
1316 void MacroAssembler::SmiMod(Register dst,
1317 Register src1,
1318 Register src2,
1319 LabelType* on_not_smi_result) {
1320 ASSERT(!dst.is(kScratchRegister));
1321 ASSERT(!src1.is(kScratchRegister));
1322 ASSERT(!src2.is(kScratchRegister));
1323 ASSERT(!src2.is(rax));
1324 ASSERT(!src2.is(rdx));
1325 ASSERT(!src1.is(rdx));
1326 ASSERT(!src1.is(src2));
1327
1328 testq(src2, src2);
1329 j(zero, on_not_smi_result);
1330
1331 if (src1.is(rax)) {
1332 movq(kScratchRegister, src1);
1333 }
1334 SmiToInteger32(rax, src1);
1335 SmiToInteger32(src2, src2);
1336
1337 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1338 NearLabel safe_div;
1339 cmpl(rax, Immediate(Smi::kMinValue));
1340 j(not_equal, &safe_div);
1341 cmpl(src2, Immediate(-1));
1342 j(not_equal, &safe_div);
1343 // Retag inputs and go slow case.
1344 Integer32ToSmi(src2, src2);
1345 if (src1.is(rax)) {
1346 movq(src1, kScratchRegister);
1347 }
1348 jmp(on_not_smi_result);
1349 bind(&safe_div);
1350
1351 // Sign extend eax into edx:eax.
1352 cdq();
1353 idivl(src2);
1354 // Restore smi tags on inputs.
1355 Integer32ToSmi(src2, src2);
1356 if (src1.is(rax)) {
1357 movq(src1, kScratchRegister);
1358 }
1359 // Check for a negative zero result. If the result is zero, and the
1360 // dividend is negative, go slow to return a floating point negative zero.
1361 NearLabel smi_result;
1362 testl(rdx, rdx);
1363 j(not_zero, &smi_result);
1364 testq(src1, src1);
1365 j(negative, on_not_smi_result);
1366 bind(&smi_result);
1367 Integer32ToSmi(dst, rdx);
1368 }
1369
1370
1371 template <typename LabelType>
1372 void MacroAssembler::SmiShiftLogicalRightConstant(
1373 Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
1374 // Logic right shift interprets its result as an *unsigned* number.
1375 if (dst.is(src)) {
1376 UNIMPLEMENTED(); // Not used.
1377 } else {
1378 movq(dst, src);
1379 if (shift_value == 0) {
1380 testq(dst, dst);
1381 j(negative, on_not_smi_result);
1382 }
1383 shr(dst, Immediate(shift_value + kSmiShift));
1384 shl(dst, Immediate(kSmiShift));
1385 }
1386 }
1387
1388
1389 template <typename LabelType>
1390 void MacroAssembler::SmiShiftLogicalRight(Register dst,
1391 Register src1,
1392 Register src2,
1393 LabelType* on_not_smi_result) {
1394 ASSERT(!dst.is(kScratchRegister));
1395 ASSERT(!src1.is(kScratchRegister));
1396 ASSERT(!src2.is(kScratchRegister));
1397 ASSERT(!dst.is(rcx));
1398 NearLabel result_ok;
1399 if (src1.is(rcx) || src2.is(rcx)) {
1400 movq(kScratchRegister, rcx);
1401 }
1402 if (!dst.is(src1)) {
1403 movq(dst, src1);
1404 }
1405 SmiToInteger32(rcx, src2);
1406 orl(rcx, Immediate(kSmiShift));
1407 shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
1408 shl(dst, Immediate(kSmiShift));
1409 testq(dst, dst);
1410 if (src1.is(rcx) || src2.is(rcx)) {
1411 NearLabel positive_result;
1412 j(positive, &positive_result);
1413 if (src1.is(rcx)) {
1414 movq(src1, kScratchRegister);
1415 } else {
1416 movq(src2, kScratchRegister);
1417 }
1418 jmp(on_not_smi_result);
1419 bind(&positive_result);
1420 } else {
1421 j(negative, on_not_smi_result); // src2 was zero and src1 negative.
1422 }
1423 }
1424
1425
1426 template <typename LabelType>
1427 void MacroAssembler::SelectNonSmi(Register dst,
1428 Register src1,
1429 Register src2,
1430 LabelType* on_not_smis) {
1431 ASSERT(!dst.is(kScratchRegister));
1432 ASSERT(!src1.is(kScratchRegister));
1433 ASSERT(!src2.is(kScratchRegister));
1434 ASSERT(!dst.is(src1));
1435 ASSERT(!dst.is(src2));
1436 // Both operands must not be smis.
1437 #ifdef DEBUG
1438 if (allow_stub_calls()) { // Check contains a stub call.
1439 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1440 Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
1441 }
1442 #endif
1443 ASSERT_EQ(0, kSmiTag);
1444 ASSERT_EQ(0, Smi::FromInt(0));
1445 movl(kScratchRegister, Immediate(kSmiTagMask));
1446 and_(kScratchRegister, src1);
1447 testl(kScratchRegister, src2);
1448 // If non-zero then both are smis.
1449 j(not_zero, on_not_smis);
1450
1451 // Exactly one operand is a smi.
1452 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1453 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1454 subq(kScratchRegister, Immediate(1));
1455 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1456 movq(dst, src1);
1457 xor_(dst, src2);
1458 and_(dst, kScratchRegister);
1459 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1460 xor_(dst, src1);
1461 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1462 }
1463
1464
1465 template <typename LabelType>
1466 void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
1467 ASSERT_EQ(0, kSmiTag);
1468 Condition smi = CheckSmi(src);
1469 j(smi, on_smi);
1470 }
1471
1472
1473 template <typename LabelType>
1474 void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
1475 Condition smi = CheckSmi(src);
1476 j(NegateCondition(smi), on_not_smi);
1477 }
1478
1479
1480 template <typename LabelType>
1481 void MacroAssembler::JumpIfNotPositiveSmi(Register src,
1482 LabelType* on_not_positive_smi) {
1483 Condition positive_smi = CheckPositiveSmi(src);
1484 j(NegateCondition(positive_smi), on_not_positive_smi);
1485 }
1486
1487
1488 template <typename LabelType>
1489 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1490 Smi* constant,
1491 LabelType* on_equals) {
1492 SmiCompare(src, constant);
1493 j(equal, on_equals);
1494 }
1495
1496
1497 template <typename LabelType>
1498 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1499 LabelType* on_invalid) {
1500 Condition is_valid = CheckInteger32ValidSmiValue(src);
1501 j(NegateCondition(is_valid), on_invalid);
1502 }
1503
1504
1505 template <typename LabelType>
1506 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1507 LabelType* on_invalid) {
1508 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1509 j(NegateCondition(is_valid), on_invalid);
1510 }
1511
1512
1513 template <typename LabelType>
1514 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1515 Register src2,
1516 LabelType* on_not_both_smi) {
1517 Condition both_smi = CheckBothSmi(src1, src2);
1518 j(NegateCondition(both_smi), on_not_both_smi);
1519 }
1520
1521
1522 template <typename LabelType>
1523 void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1,
1524 Register src2,
1525 LabelType* on_not_both_smi) {
1526 Condition both_smi = CheckBothPositiveSmi(src1, src2);
1527 j(NegateCondition(both_smi), on_not_both_smi);
1528 }
1529
1530
1531 template <typename LabelType>
1532 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
1533 Register second_object,
1534 Register scratch1,
1535 Register scratch2,
1536 LabelType* on_fail) {
1537 // Check that both objects are not smis.
1538 Condition either_smi = CheckEitherSmi(first_object, second_object);
1539 j(either_smi, on_fail);
1540
1541 // Load instance type for both strings.
1542 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
1543 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
1544 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
1545 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
1546
1547 // Check that both are flat ascii strings.
1548 ASSERT(kNotStringTag != 0);
1549 const int kFlatAsciiStringMask =
1550 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1551 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1552
1553 andl(scratch1, Immediate(kFlatAsciiStringMask));
1554 andl(scratch2, Immediate(kFlatAsciiStringMask));
1555 // Interleave the bits to check both scratch1 and scratch2 in one test.
1556 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1557 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1558 cmpl(scratch1,
1559 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1560 j(not_equal, on_fail);
1561 }
1562
1563
1564 template <typename LabelType>
1565 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
1566 Register instance_type,
1567 Register scratch,
1568 LabelType *failure) {
1569 if (!scratch.is(instance_type)) {
1570 movl(scratch, instance_type);
1571 }
1572
1573 const int kFlatAsciiStringMask =
1574 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1575
1576 andl(scratch, Immediate(kFlatAsciiStringMask));
1577 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
1578 j(not_equal, failure);
1579 }
1580
1581
1582 template <typename LabelType>
1583 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
1584 Register first_object_instance_type,
1585 Register second_object_instance_type,
1586 Register scratch1,
1587 Register scratch2,
1588 LabelType* on_fail) {
1589 // Load instance type for both strings.
1590 movq(scratch1, first_object_instance_type);
1591 movq(scratch2, second_object_instance_type);
1592
1593 // Check that both are flat ascii strings.
1594 ASSERT(kNotStringTag != 0);
1595 const int kFlatAsciiStringMask =
1596 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1597 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1598
1599 andl(scratch1, Immediate(kFlatAsciiStringMask));
1600 andl(scratch2, Immediate(kFlatAsciiStringMask));
1601 // Interleave the bits to check both scratch1 and scratch2 in one test.
1602 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1603 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1604 cmpl(scratch1,
1605 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1606 j(not_equal, on_fail);
1607 }
1608
1609
1610 template <typename LabelType>
1611 void MacroAssembler::InNewSpace(Register object,
1612 Register scratch,
1613 Condition cc,
1614 LabelType* branch) {
1615 if (Serializer::enabled()) {
1616 // Can't do arithmetic on external references if it might get serialized.
1617 // The mask isn't really an address. We load it as an external reference in
1618 // case the size of the new space is different between the snapshot maker
1619 // and the running system.
1620 if (scratch.is(object)) {
1621 movq(kScratchRegister, ExternalReference::new_space_mask());
1622 and_(scratch, kScratchRegister);
1623 } else {
1624 movq(scratch, ExternalReference::new_space_mask());
1625 and_(scratch, object);
1626 }
1627 movq(kScratchRegister, ExternalReference::new_space_start());
1628 cmpq(scratch, kScratchRegister);
1629 j(cc, branch);
1630 } else {
1631 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
1632 intptr_t new_space_start =
1633 reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
1634 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
1635 if (scratch.is(object)) {
1636 addq(scratch, kScratchRegister);
1637 } else {
1638 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
1639 }
1640 and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
1641 j(cc, branch);
1642 }
1643 }
1644
1645
1646 template <typename LabelType>
1647 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1648 const ParameterCount& actual,
1649 Handle<Code> code_constant,
1650 Register code_register,
1651 LabelType* done,
1652 InvokeFlag flag) {
1653 bool definitely_matches = false;
1654 NearLabel invoke;
1655 if (expected.is_immediate()) {
1656 ASSERT(actual.is_immediate());
1657 if (expected.immediate() == actual.immediate()) {
1658 definitely_matches = true;
1659 } else {
1660 Set(rax, actual.immediate());
1661 if (expected.immediate() ==
1662 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
1663 // Don't worry about adapting arguments for built-ins that
1664 // don't want that done. Skip adaption code by making it look
1665 // like we have a match between expected and actual number of
1666 // arguments.
1667 definitely_matches = true;
1668 } else {
1669 Set(rbx, expected.immediate());
1670 }
1671 }
1672 } else {
1673 if (actual.is_immediate()) {
1674 // Expected is in register, actual is immediate. This is the
1675 // case when we invoke function values without going through the
1676 // IC mechanism.
1677 cmpq(expected.reg(), Immediate(actual.immediate()));
1678 j(equal, &invoke);
1679 ASSERT(expected.reg().is(rbx));
1680 Set(rax, actual.immediate());
1681 } else if (!expected.reg().is(actual.reg())) {
1682 // Both expected and actual are in (different) registers. This
1683 // is the case when we invoke functions using call and apply.
1684 cmpq(expected.reg(), actual.reg());
1685 j(equal, &invoke);
1686 ASSERT(actual.reg().is(rax));
1687 ASSERT(expected.reg().is(rbx));
1688 }
1689 }
1690
1691 if (!definitely_matches) {
1692 Handle<Code> adaptor =
1693 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
1694 if (!code_constant.is_null()) {
1695 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
1696 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1697 } else if (!code_register.is(rdx)) {
1698 movq(rdx, code_register);
1699 }
1700
1701 if (flag == CALL_FUNCTION) {
1702 Call(adaptor, RelocInfo::CODE_TARGET);
1703 jmp(done);
1704 } else {
1705 Jump(adaptor, RelocInfo::CODE_TARGET);
1706 }
1707 bind(&invoke);
1708 }
1709 }
1710
964 1711
965 } } // namespace v8::internal 1712 } } // namespace v8::internal
966 1713
967 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_ 1714 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_
OLDNEW
« no previous file with comments | « src/x64/ic-x64.cc ('k') | src/x64/macro-assembler-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698