| OLD | NEW | 
|     1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file |     1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file | 
|     2 // for details. All rights reserved. Use of this source code is governed by a |     2 // for details. All rights reserved. Use of this source code is governed by a | 
|     3 // BSD-style license that can be found in the LICENSE file. |     3 // BSD-style license that can be found in the LICENSE file. | 
|     4  |     4  | 
|     5 #include "vm/globals.h"  // NOLINT |     5 #include "vm/globals.h"  // NOLINT | 
|     6 #if defined(TARGET_ARCH_X64) |     6 #if defined(TARGET_ARCH_X64) | 
|     7  |     7  | 
|     8 #include "vm/assembler.h" |     8 #include "vm/assembler.h" | 
|     9 #include "vm/cpu.h" |     9 #include "vm/cpu.h" | 
|    10 #include "vm/heap.h" |    10 #include "vm/heap.h" | 
| (...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   480 void Assembler::cmovlessq(Register dst, Register src) { |   480 void Assembler::cmovlessq(Register dst, Register src) { | 
|   481   AssemblerBuffer::EnsureCapacity ensured(&buffer_); |   481   AssemblerBuffer::EnsureCapacity ensured(&buffer_); | 
|   482   Operand operand(src); |   482   Operand operand(src); | 
|   483   EmitOperandREX(dst, operand, REX_W); |   483   EmitOperandREX(dst, operand, REX_W); | 
|   484   EmitUint8(0x0F); |   484   EmitUint8(0x0F); | 
|   485   EmitUint8(0x4C); |   485   EmitUint8(0x4C); | 
|   486   EmitOperand(dst & 7, operand); |   486   EmitOperand(dst & 7, operand); | 
|   487 } |   487 } | 
|   488  |   488  | 
|   489  |   489  | 
|   490  |  | 
|   491 void Assembler::movss(XmmRegister dst, const Address& src) { |   490 void Assembler::movss(XmmRegister dst, const Address& src) { | 
|   492   ASSERT(dst <= XMM15); |   491   ASSERT(dst <= XMM15); | 
|   493   AssemblerBuffer::EnsureCapacity ensured(&buffer_); |   492   AssemblerBuffer::EnsureCapacity ensured(&buffer_); | 
|   494   EmitUint8(0xF3); |   493   EmitUint8(0xF3); | 
|   495   EmitREX_RB(dst, src); |   494   EmitREX_RB(dst, src); | 
|   496   EmitUint8(0x0F); |   495   EmitUint8(0x0F); | 
|   497   EmitUint8(0x10); |   496   EmitUint8(0x10); | 
|   498   EmitOperand(dst & 7, src); |   497   EmitOperand(dst & 7, src); | 
|   499 } |   498 } | 
|   500  |   499  | 
| (...skipping 2284 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2785  |  2784  | 
|  2786  |  2785  | 
|  2787 void Assembler::LoadObjectHelper(Register dst, |  2786 void Assembler::LoadObjectHelper(Register dst, | 
|  2788                                  const Object& object, |  2787                                  const Object& object, | 
|  2789                                  bool is_unique) { |  2788                                  bool is_unique) { | 
|  2790   ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |  2789   ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 
|  2791   ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |  2790   ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 
|  2792   if (Thread::CanLoadFromThread(object)) { |  2791   if (Thread::CanLoadFromThread(object)) { | 
|  2793     movq(dst, Address(THR, Thread::OffsetFromThread(object))); |  2792     movq(dst, Address(THR, Thread::OffsetFromThread(object))); | 
|  2794   } else if (CanLoadFromObjectPool(object)) { |  2793   } else if (CanLoadFromObjectPool(object)) { | 
|  2795     const intptr_t idx = |  2794     const intptr_t idx = is_unique ? object_pool_wrapper_.AddObject(object) | 
|  2796         is_unique ? object_pool_wrapper_.AddObject(object) |  2795                                    : object_pool_wrapper_.FindObject(object); | 
|  2797                   : object_pool_wrapper_.FindObject(object); |  | 
|  2798     const int32_t offset = ObjectPool::element_offset(idx); |  2796     const int32_t offset = ObjectPool::element_offset(idx); | 
|  2799     LoadWordFromPoolOffset(dst, offset - kHeapObjectTag); |  2797     LoadWordFromPoolOffset(dst, offset - kHeapObjectTag); | 
|  2800   } else { |  2798   } else { | 
|  2801     ASSERT(object.IsSmi()); |  2799     ASSERT(object.IsSmi()); | 
|  2802     LoadImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw()))); |  2800     LoadImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw()))); | 
|  2803   } |  2801   } | 
|  2804 } |  2802 } | 
|  2805  |  2803  | 
|  2806  |  2804  | 
|  2807 void Assembler::LoadFunctionFromCalleePool(Register dst, |  2805 void Assembler::LoadFunctionFromCalleePool(Register dst, | 
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2856 } |  2854 } | 
|  2857  |  2855  | 
|  2858  |  2856  | 
|  2859 void Assembler::CompareObject(Register reg, const Object& object) { |  2857 void Assembler::CompareObject(Register reg, const Object& object) { | 
|  2860   ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |  2858   ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 
|  2861   ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |  2859   ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 
|  2862   if (Thread::CanLoadFromThread(object)) { |  2860   if (Thread::CanLoadFromThread(object)) { | 
|  2863     cmpq(reg, Address(THR, Thread::OffsetFromThread(object))); |  2861     cmpq(reg, Address(THR, Thread::OffsetFromThread(object))); | 
|  2864   } else if (CanLoadFromObjectPool(object)) { |  2862   } else if (CanLoadFromObjectPool(object)) { | 
|  2865     const intptr_t idx = object_pool_wrapper_.FindObject(object, kNotPatchable); |  2863     const intptr_t idx = object_pool_wrapper_.FindObject(object, kNotPatchable); | 
|  2866     const int32_t offset =  ObjectPool::element_offset(idx); |  2864     const int32_t offset = ObjectPool::element_offset(idx); | 
|  2867     cmpq(reg, Address(PP, offset-kHeapObjectTag)); |  2865     cmpq(reg, Address(PP, offset - kHeapObjectTag)); | 
|  2868   } else { |  2866   } else { | 
|  2869     ASSERT(object.IsSmi()); |  2867     ASSERT(object.IsSmi()); | 
|  2870     CompareImmediate( |  2868     CompareImmediate(reg, Immediate(reinterpret_cast<int64_t>(object.raw()))); | 
|  2871         reg, Immediate(reinterpret_cast<int64_t>(object.raw()))); |  | 
|  2872   } |  2869   } | 
|  2873 } |  2870 } | 
|  2874  |  2871  | 
|  2875  |  2872  | 
|  2876 intptr_t Assembler::FindImmediate(int64_t imm) { |  2873 intptr_t Assembler::FindImmediate(int64_t imm) { | 
|  2877   return object_pool_wrapper_.FindImmediate(imm); |  2874   return object_pool_wrapper_.FindImmediate(imm); | 
|  2878 } |  2875 } | 
|  2879  |  2876  | 
|  2880  |  2877  | 
|  2881 void Assembler::LoadImmediate(Register reg, const Immediate& imm) { |  2878 void Assembler::LoadImmediate(Register reg, const Immediate& imm) { | 
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2925   // For the value we are only interested in the new/old bit and the tag bit. |  2922   // For the value we are only interested in the new/old bit and the tag bit. | 
|  2926   andl(value, Immediate(kNewObjectAlignmentOffset | kHeapObjectTag)); |  2923   andl(value, Immediate(kNewObjectAlignmentOffset | kHeapObjectTag)); | 
|  2927   // Shift the tag bit into the carry. |  2924   // Shift the tag bit into the carry. | 
|  2928   shrl(value, Immediate(1)); |  2925   shrl(value, Immediate(1)); | 
|  2929   // Add the tag bits together, if the value is not a Smi the addition will |  2926   // Add the tag bits together, if the value is not a Smi the addition will | 
|  2930   // overflow into the next bit, leaving us with a zero low bit. |  2927   // overflow into the next bit, leaving us with a zero low bit. | 
|  2931   adcl(value, object); |  2928   adcl(value, object); | 
|  2932   // Mask out higher, uninteresting bits which were polluted by dest. |  2929   // Mask out higher, uninteresting bits which were polluted by dest. | 
|  2933   andl(value, Immediate(kObjectAlignment - 1)); |  2930   andl(value, Immediate(kObjectAlignment - 1)); | 
|  2934   // Compare with the expected bit pattern. |  2931   // Compare with the expected bit pattern. | 
|  2935   cmpl(value, Immediate( |  2932   cmpl(value, Immediate((kNewObjectAlignmentOffset >> 1) + kHeapObjectTag + | 
|  2936       (kNewObjectAlignmentOffset >> 1) + kHeapObjectTag + |  2933                         kOldObjectAlignmentOffset + kHeapObjectTag)); | 
|  2937       kOldObjectAlignmentOffset + kHeapObjectTag)); |  | 
|  2938   j(NOT_ZERO, no_update, Assembler::kNearJump); |  2934   j(NOT_ZERO, no_update, Assembler::kNearJump); | 
|  2939 } |  2935 } | 
|  2940  |  2936  | 
|  2941  |  2937  | 
|  2942 void Assembler::StoreIntoObject(Register object, |  2938 void Assembler::StoreIntoObject(Register object, | 
|  2943                                 const Address& dest, |  2939                                 const Address& dest, | 
|  2944                                 Register value, |  2940                                 Register value, | 
|  2945                                 bool can_value_be_smi) { |  2941                                 bool can_value_be_smi) { | 
|  2946   ASSERT(object != value); |  2942   ASSERT(object != value); | 
|  2947   movq(dest, value); |  2943   movq(dest, value); | 
| (...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  3365       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |  3361       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 
|  3366   movq(temp_reg, Address(temp_reg, table_offset)); |  3362   movq(temp_reg, Address(temp_reg, table_offset)); | 
|  3367   testb(Address(temp_reg, state_offset), |  3363   testb(Address(temp_reg, state_offset), | 
|  3368         Immediate(ClassHeapStats::TraceAllocationMask())); |  3364         Immediate(ClassHeapStats::TraceAllocationMask())); | 
|  3369   // We are tracing for this class, jump to the trace label which will use |  3365   // We are tracing for this class, jump to the trace label which will use | 
|  3370   // the allocation stub. |  3366   // the allocation stub. | 
|  3371   j(NOT_ZERO, trace, near_jump); |  3367   j(NOT_ZERO, trace, near_jump); | 
|  3372 } |  3368 } | 
|  3373  |  3369  | 
|  3374  |  3370  | 
|  3375 void Assembler::UpdateAllocationStats(intptr_t cid, |  3371 void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) { | 
|  3376                                       Heap::Space space) { |  | 
|  3377   ASSERT(cid > 0); |  3372   ASSERT(cid > 0); | 
|  3378   intptr_t counter_offset = |  3373   intptr_t counter_offset = | 
|  3379       ClassTable::CounterOffsetFor(cid, space == Heap::kNew); |  3374       ClassTable::CounterOffsetFor(cid, space == Heap::kNew); | 
|  3380   Register temp_reg = TMP; |  3375   Register temp_reg = TMP; | 
|  3381   LoadIsolate(temp_reg); |  3376   LoadIsolate(temp_reg); | 
|  3382   intptr_t table_offset = |  3377   intptr_t table_offset = | 
|  3383       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |  3378       Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 
|  3384   movq(temp_reg, Address(temp_reg, table_offset)); |  3379   movq(temp_reg, Address(temp_reg, table_offset)); | 
|  3385   incq(Address(temp_reg, counter_offset)); |  3380   incq(Address(temp_reg, counter_offset)); | 
|  3386 } |  3381 } | 
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  3500     return; |  3495     return; | 
|  3501   } |  3496   } | 
|  3502   intptr_t bytes_needed = alignment - mod; |  3497   intptr_t bytes_needed = alignment - mod; | 
|  3503   while (bytes_needed > MAX_NOP_SIZE) { |  3498   while (bytes_needed > MAX_NOP_SIZE) { | 
|  3504     nop(MAX_NOP_SIZE); |  3499     nop(MAX_NOP_SIZE); | 
|  3505     bytes_needed -= MAX_NOP_SIZE; |  3500     bytes_needed -= MAX_NOP_SIZE; | 
|  3506   } |  3501   } | 
|  3507   if (bytes_needed) { |  3502   if (bytes_needed) { | 
|  3508     nop(bytes_needed); |  3503     nop(bytes_needed); | 
|  3509   } |  3504   } | 
|  3510   ASSERT(((offset + buffer_.GetPosition()) & (alignment-1)) == 0); |  3505   ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0); | 
|  3511 } |  3506 } | 
|  3512  |  3507  | 
|  3513  |  3508  | 
|  3514 void Assembler::EmitOperand(int rm, const Operand& operand) { |  3509 void Assembler::EmitOperand(int rm, const Operand& operand) { | 
|  3515   ASSERT(rm >= 0 && rm < 8); |  3510   ASSERT(rm >= 0 && rm < 8); | 
|  3516   const intptr_t length = operand.length_; |  3511   const intptr_t length = operand.length_; | 
|  3517   ASSERT(length > 0); |  3512   ASSERT(length > 0); | 
|  3518   // Emit the ModRM byte updated with the given RM value. |  3513   // Emit the ModRM byte updated with the given RM value. | 
|  3519   ASSERT((operand.encoding_[0] & 0x38) == 0); |  3514   ASSERT((operand.encoding_[0] & 0x38) == 0); | 
|  3520   EmitUint8(operand.encoding_[0] + (rm << 3)); |  3515   EmitUint8(operand.encoding_[0] + (rm << 3)); | 
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  3625   } |  3620   } | 
|  3626   EmitUint8(0xD3); |  3621   EmitUint8(0xD3); | 
|  3627   EmitOperand(rm, Operand(operand)); |  3622   EmitOperand(rm, Operand(operand)); | 
|  3628 } |  3623 } | 
|  3629  |  3624  | 
|  3630  |  3625  | 
|  3631 void Assembler::LoadClassId(Register result, Register object) { |  3626 void Assembler::LoadClassId(Register result, Register object) { | 
|  3632   ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); |  3627   ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); | 
|  3633   ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); |  3628   ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); | 
|  3634   ASSERT(sizeof(classid_t) == sizeof(uint32_t)); |  3629   ASSERT(sizeof(classid_t) == sizeof(uint32_t)); | 
|  3635   const intptr_t class_id_offset = Object::tags_offset() + |  3630   const intptr_t class_id_offset = | 
|  3636       RawObject::kClassIdTagPos / kBitsPerByte; |  3631       Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; | 
|  3637   movl(result, FieldAddress(object, class_id_offset)); |  3632   movl(result, FieldAddress(object, class_id_offset)); | 
|  3638 } |  3633 } | 
|  3639  |  3634  | 
|  3640  |  3635  | 
|  3641 void Assembler::LoadClassById(Register result, Register class_id) { |  3636 void Assembler::LoadClassById(Register result, Register class_id) { | 
|  3642   ASSERT(result != class_id); |  3637   ASSERT(result != class_id); | 
|  3643   LoadIsolate(result); |  3638   LoadIsolate(result); | 
|  3644   const intptr_t offset = |  3639   const intptr_t offset = | 
|  3645       Isolate::class_table_offset() + ClassTable::table_offset(); |  3640       Isolate::class_table_offset() + ClassTable::table_offset(); | 
|  3646   movq(result, Address(result, offset)); |  3641   movq(result, Address(result, offset)); | 
| (...skipping 13 matching lines...) Expand all  Loading... | 
|  3660 } |  3655 } | 
|  3661  |  3656  | 
|  3662  |  3657  | 
|  3663 void Assembler::SmiUntagOrCheckClass(Register object, |  3658 void Assembler::SmiUntagOrCheckClass(Register object, | 
|  3664                                      intptr_t class_id, |  3659                                      intptr_t class_id, | 
|  3665                                      Label* is_smi) { |  3660                                      Label* is_smi) { | 
|  3666   ASSERT(kSmiTagShift == 1); |  3661   ASSERT(kSmiTagShift == 1); | 
|  3667   ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); |  3662   ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); | 
|  3668   ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); |  3663   ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); | 
|  3669   ASSERT(sizeof(classid_t) == sizeof(uint32_t)); |  3664   ASSERT(sizeof(classid_t) == sizeof(uint32_t)); | 
|  3670   const intptr_t class_id_offset = Object::tags_offset() + |  3665   const intptr_t class_id_offset = | 
|  3671       RawObject::kClassIdTagPos / kBitsPerByte; |  3666       Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; | 
|  3672  |  3667  | 
|  3673   // Untag optimistically. Tag bit is shifted into the CARRY. |  3668   // Untag optimistically. Tag bit is shifted into the CARRY. | 
|  3674   SmiUntag(object); |  3669   SmiUntag(object); | 
|  3675   j(NOT_CARRY, is_smi, kNearJump); |  3670   j(NOT_CARRY, is_smi, kNearJump); | 
|  3676   // Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale |  3671   // Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale | 
|  3677   // factor in the addressing mode to compensate for this. |  3672   // factor in the addressing mode to compensate for this. | 
|  3678   movl(TMP, Address(object, TIMES_2, class_id_offset)); |  3673   movl(TMP, Address(object, TIMES_2, class_id_offset)); | 
|  3679   cmpl(TMP, Immediate(class_id)); |  3674   cmpl(TMP, Immediate(class_id)); | 
|  3680 } |  3675 } | 
|  3681  |  3676  | 
| (...skipping 27 matching lines...) Expand all  Loading... | 
|  3709  |  3704  | 
|  3710 Address Assembler::ElementAddressForIntIndex(bool is_external, |  3705 Address Assembler::ElementAddressForIntIndex(bool is_external, | 
|  3711                                              intptr_t cid, |  3706                                              intptr_t cid, | 
|  3712                                              intptr_t index_scale, |  3707                                              intptr_t index_scale, | 
|  3713                                              Register array, |  3708                                              Register array, | 
|  3714                                              intptr_t index) { |  3709                                              intptr_t index) { | 
|  3715   if (is_external) { |  3710   if (is_external) { | 
|  3716     return Address(array, index * index_scale); |  3711     return Address(array, index * index_scale); | 
|  3717   } else { |  3712   } else { | 
|  3718     const int64_t disp = static_cast<int64_t>(index) * index_scale + |  3713     const int64_t disp = static_cast<int64_t>(index) * index_scale + | 
|  3719         Instance::DataOffsetFor(cid); |  3714                          Instance::DataOffsetFor(cid); | 
|  3720     ASSERT(Utils::IsInt(32, disp)); |  3715     ASSERT(Utils::IsInt(32, disp)); | 
|  3721     return FieldAddress(array, static_cast<int32_t>(disp)); |  3716     return FieldAddress(array, static_cast<int32_t>(disp)); | 
|  3722   } |  3717   } | 
|  3723 } |  3718 } | 
|  3724  |  3719  | 
|  3725  |  3720  | 
|  3726 static ScaleFactor ToScaleFactor(intptr_t index_scale) { |  3721 static ScaleFactor ToScaleFactor(intptr_t index_scale) { | 
|  3727   // Note that index is expected smi-tagged, (i.e, times 2) for all arrays with |  3722   // Note that index is expected smi-tagged, (i.e, times 2) for all arrays with | 
|  3728   // index scale factor > 1. E.g., for Uint8Array and OneByteString the index is |  3723   // index scale factor > 1. E.g., for Uint8Array and OneByteString the index is | 
|  3729   // expected to be untagged before accessing. |  3724   // expected to be untagged before accessing. | 
|  3730   ASSERT(kSmiTagShift == 1); |  3725   ASSERT(kSmiTagShift == 1); | 
|  3731   switch (index_scale) { |  3726   switch (index_scale) { | 
|  3732     case 1: return TIMES_1; |  3727     case 1: | 
|  3733     case 2: return TIMES_1; |  3728       return TIMES_1; | 
|  3734     case 4: return TIMES_2; |  3729     case 2: | 
|  3735     case 8: return TIMES_4; |  3730       return TIMES_1; | 
|  3736     case 16: return TIMES_8; |  3731     case 4: | 
 |  3732       return TIMES_2; | 
 |  3733     case 8: | 
 |  3734       return TIMES_4; | 
 |  3735     case 16: | 
 |  3736       return TIMES_8; | 
|  3737     default: |  3737     default: | 
|  3738       UNREACHABLE(); |  3738       UNREACHABLE(); | 
|  3739       return TIMES_1; |  3739       return TIMES_1; | 
|  3740   } |  3740   } | 
|  3741 } |  3741 } | 
|  3742  |  3742  | 
|  3743  |  3743  | 
|  3744 Address Assembler::ElementAddressForRegIndex(bool is_external, |  3744 Address Assembler::ElementAddressForRegIndex(bool is_external, | 
|  3745                                              intptr_t cid, |  3745                                              intptr_t cid, | 
|  3746                                              intptr_t index_scale, |  3746                                              intptr_t index_scale, | 
|  3747                                              Register array, |  3747                                              Register array, | 
|  3748                                              Register index) { |  3748                                              Register index) { | 
|  3749   if (is_external) { |  3749   if (is_external) { | 
|  3750     return Address(array, index, ToScaleFactor(index_scale), 0); |  3750     return Address(array, index, ToScaleFactor(index_scale), 0); | 
|  3751   } else { |  3751   } else { | 
|  3752     return FieldAddress(array, |  3752     return FieldAddress(array, index, ToScaleFactor(index_scale), | 
|  3753                         index, |  | 
|  3754                         ToScaleFactor(index_scale), |  | 
|  3755                         Instance::DataOffsetFor(cid)); |  3753                         Instance::DataOffsetFor(cid)); | 
|  3756   } |  3754   } | 
|  3757 } |  3755 } | 
|  3758  |  3756  | 
|  3759  |  3757  | 
|  3760 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { |  3758 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | 
|  3761   "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", |  3759     "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", | 
|  3762   "r8", "r9", "r10", "r11", "r12", "r13", "thr", "pp" |  3760     "r8",  "r9",  "r10", "r11", "r12", "r13", "thr", "pp"}; | 
|  3763 }; |  | 
|  3764  |  3761  | 
|  3765  |  3762  | 
|  3766 const char* Assembler::RegisterName(Register reg) { |  3763 const char* Assembler::RegisterName(Register reg) { | 
|  3767   ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); |  3764   ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | 
|  3768   return cpu_reg_names[reg]; |  3765   return cpu_reg_names[reg]; | 
|  3769 } |  3766 } | 
|  3770  |  3767  | 
|  3771  |  3768  | 
|  3772 static const char* xmm_reg_names[kNumberOfXmmRegisters] = { |  3769 static const char* xmm_reg_names[kNumberOfXmmRegisters] = { | 
|  3773   "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", |  3770     "xmm0", "xmm1", "xmm2",  "xmm3",  "xmm4",  "xmm5",  "xmm6",  "xmm7", | 
|  3774   "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" |  3771     "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"}; | 
|  3775 }; |  | 
|  3776  |  3772  | 
|  3777  |  3773  | 
|  3778 const char* Assembler::FpuRegisterName(FpuRegister reg) { |  3774 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 
|  3779   ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters)); |  3775   ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters)); | 
|  3780   return xmm_reg_names[reg]; |  3776   return xmm_reg_names[reg]; | 
|  3781 } |  3777 } | 
|  3782  |  3778  | 
|  3783 }  // namespace dart |  3779 }  // namespace dart | 
|  3784  |  3780  | 
|  3785 #endif  // defined TARGET_ARCH_X64 |  3781 #endif  // defined TARGET_ARCH_X64 | 
| OLD | NEW |