| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 #include "codegen-inl.h" | 31 #include "codegen-inl.h" |
| 32 #include "assembler-x64.h" | 32 #include "assembler-x64.h" |
| 33 #include "macro-assembler-x64.h" | 33 #include "macro-assembler-x64.h" |
| 34 #include "serialize.h" | 34 #include "serialize.h" |
| 35 #include "debug.h" | 35 #include "debug.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 MacroAssembler::MacroAssembler(void* buffer, int size) | 40 MacroAssembler::MacroAssembler(void* buffer, int size) |
| 41 : Assembler(buffer, size), | 41 : Assembler(buffer, size), |
| 42 unresolved_(0), | 42 unresolved_(0), |
| 43 generating_stub_(false), | 43 generating_stub_(false), |
| 44 allow_stub_calls_(true), | 44 allow_stub_calls_(true), |
| 45 code_object_(Heap::undefined_value()) { | 45 code_object_(Heap::undefined_value()) { |
| 46 } | 46 } |
| 47 | 47 |
| 48 | 48 |
| 49 void MacroAssembler::LoadRoot(Register destination, | 49 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { |
| 50 Heap::RootListIndex index) { | |
| 51 movq(destination, Operand(r13, index << kPointerSizeLog2)); | 50 movq(destination, Operand(r13, index << kPointerSizeLog2)); |
| 52 } | 51 } |
| 53 | 52 |
| 54 | 53 |
| 55 void MacroAssembler::PushRoot(Heap::RootListIndex index) { | 54 void MacroAssembler::PushRoot(Heap::RootListIndex index) { |
| 56 push(Operand(r13, index << kPointerSizeLog2)); | 55 push(Operand(r13, index << kPointerSizeLog2)); |
| 57 } | 56 } |
| 58 | 57 |
| 59 | 58 |
| 60 void MacroAssembler::CompareRoot(Register with, | 59 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { |
| 61 Heap::RootListIndex index) { | |
| 62 cmpq(with, Operand(r13, index << kPointerSizeLog2)); | 60 cmpq(with, Operand(r13, index << kPointerSizeLog2)); |
| 63 } | 61 } |
| 64 | 62 |
| 65 | 63 |
| 66 void MacroAssembler::CompareRoot(Operand with, | 64 void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) { |
| 67 Heap::RootListIndex index) { | |
| 68 LoadRoot(kScratchRegister, index); | 65 LoadRoot(kScratchRegister, index); |
| 69 cmpq(with, kScratchRegister); | 66 cmpq(with, kScratchRegister); |
| 70 } | 67 } |
| 71 | 68 |
| 72 | 69 |
| 73 static void RecordWriteHelper(MacroAssembler* masm, | 70 static void RecordWriteHelper(MacroAssembler* masm, |
| 74 Register object, | 71 Register object, |
| 75 Register addr, | 72 Register addr, |
| 76 Register scratch) { | 73 Register scratch) { |
| 77 Label fast; | 74 Label fast; |
| (...skipping 13 matching lines...) Expand all Loading... |
| 91 | 88 |
| 92 // If the bit offset lies beyond the normal remembered set range, it is in | 89 // If the bit offset lies beyond the normal remembered set range, it is in |
| 93 // the extra remembered set area of a large object. | 90 // the extra remembered set area of a large object. |
| 94 masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); | 91 masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); |
| 95 masm->j(less, &fast); | 92 masm->j(less, &fast); |
| 96 | 93 |
| 97 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the | 94 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the |
| 98 // extra remembered set after the large object. | 95 // extra remembered set after the large object. |
| 99 | 96 |
| 100 // Load the array length into 'scratch'. | 97 // Load the array length into 'scratch'. |
| 101 masm->movl(scratch, | 98 masm->movl(scratch, Operand(page_start, Page::kObjectStartOffset |
| 102 Operand(page_start, | 99 + FixedArray::kLengthOffset)); |
| 103 Page::kObjectStartOffset + FixedArray::kLengthOffset)); | |
| 104 Register array_length = scratch; | 100 Register array_length = scratch; |
| 105 | 101 |
| 106 // Extra remembered set starts right after the large object (a FixedArray), at | 102 // Extra remembered set starts right after the large object (a FixedArray), at |
| 107 // page_start + kObjectStartOffset + objectSize | 103 // page_start + kObjectStartOffset + objectSize |
| 108 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. | 104 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. |
| 109 // Add the delta between the end of the normal RSet and the start of the | 105 // Add the delta between the end of the normal RSet and the start of the |
| 110 // extra RSet to 'page_start', so that addressing the bit using | 106 // extra RSet to 'page_start', so that addressing the bit using |
| 111 // 'pointer_offset' hits the extra RSet words. | 107 // 'pointer_offset' hits the extra RSet words. |
| 112 masm->lea(page_start, | 108 masm->lea(page_start, Operand(page_start, array_length, times_pointer_size, |
| 113 Operand(page_start, array_length, times_pointer_size, | 109 Page::kObjectStartOffset |
| 114 Page::kObjectStartOffset + FixedArray::kHeaderSize | 110 + FixedArray::kHeaderSize |
| 115 - Page::kRSetEndOffset)); | 111 - Page::kRSetEndOffset)); |
| 116 | 112 |
| 117 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | 113 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 118 // to limit code size. We should probably evaluate this decision by | 114 // to limit code size. We should probably evaluate this decision by |
| 119 // measuring the performance of an equivalent implementation using | 115 // measuring the performance of an equivalent implementation using |
| 120 // "simpler" instructions | 116 // "simpler" instructions |
| 121 masm->bind(&fast); | 117 masm->bind(&fast); |
| 122 masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset); | 118 masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset); |
| 123 } | 119 } |
| 124 | 120 |
| 125 | 121 |
| 126 class RecordWriteStub : public CodeStub { | 122 class RecordWriteStub : public CodeStub { |
| 127 public: | 123 public: |
| 128 RecordWriteStub(Register object, Register addr, Register scratch) | 124 RecordWriteStub(Register object, Register addr, Register scratch) |
| 129 : object_(object), addr_(addr), scratch_(scratch) { } | 125 : object_(object), addr_(addr), scratch_(scratch) { |
| 126 } |
| 130 | 127 |
| 131 void Generate(MacroAssembler* masm); | 128 void Generate(MacroAssembler* masm); |
| 132 | 129 |
| 133 private: | 130 private: |
| 134 Register object_; | 131 Register object_; |
| 135 Register addr_; | 132 Register addr_; |
| 136 Register scratch_; | 133 Register scratch_; |
| 137 | 134 |
| 138 #ifdef DEBUG | 135 #ifdef DEBUG |
| 139 void Print() { | 136 void Print() { |
| 140 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", | 137 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", |
| 141 object_.code(), addr_.code(), scratch_.code()); | 138 object_.code(), addr_.code(), scratch_.code()); |
| 142 } | 139 } |
| 143 #endif | 140 #endif |
| 144 | 141 |
| 145 // Minor key encoding in 12 bits of three registers (object, address and | 142 // Minor key encoding in 12 bits of three registers (object, address and |
| 146 // scratch) OOOOAAAASSSS. | 143 // scratch) OOOOAAAASSSS. |
| 147 class ScratchBits: public BitField<uint32_t, 0, 4> {}; | 144 class ScratchBits : public BitField<uint32_t, 0, 4> {}; |
| 148 class AddressBits: public BitField<uint32_t, 4, 4> {}; | 145 class AddressBits : public BitField<uint32_t, 4, 4> {}; |
| 149 class ObjectBits: public BitField<uint32_t, 8, 4> {}; | 146 class ObjectBits : public BitField<uint32_t, 8, 4> {}; |
| 150 | 147 |
| 151 Major MajorKey() { return RecordWrite; } | 148 Major MajorKey() { |
| 149 return RecordWrite; |
| 150 } |
| 152 | 151 |
| 153 int MinorKey() { | 152 int MinorKey() { |
| 154 // Encode the registers. | 153 // Encode the registers. |
| 155 return ObjectBits::encode(object_.code()) | | 154 return ObjectBits::encode(object_.code()) | |
| 156 AddressBits::encode(addr_.code()) | | 155 AddressBits::encode(addr_.code()) | |
| 157 ScratchBits::encode(scratch_.code()); | 156 ScratchBits::encode(scratch_.code()); |
| 158 } | 157 } |
| 159 }; | 158 }; |
| 160 | 159 |
| 161 | |
| 162 void RecordWriteStub::Generate(MacroAssembler* masm) { | 160 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 163 RecordWriteHelper(masm, object_, addr_, scratch_); | 161 RecordWriteHelper(masm, object_, addr_, scratch_); |
| 164 masm->ret(0); | 162 masm->ret(0); |
| 165 } | 163 } |
| 166 | 164 |
| 167 | |
| 168 // Set the remembered set bit for [object+offset]. | 165 // Set the remembered set bit for [object+offset]. |
| 169 // object is the object being stored into, value is the object being stored. | 166 // object is the object being stored into, value is the object being stored. |
| 170 // If offset is zero, then the scratch register contains the array index into | 167 // If offset is zero, then the smi_index register contains the array index into |
| 171 // the elements array represented as a Smi. | 168 // the elements array represented as a smi. Otherwise it can be used as a |
| 169 // scratch register. |
| 172 // All registers are clobbered by the operation. | 170 // All registers are clobbered by the operation. |
| 173 void MacroAssembler::RecordWrite(Register object, | 171 void MacroAssembler::RecordWrite(Register object, |
| 174 int offset, | 172 int offset, |
| 175 Register value, | 173 Register value, |
| 176 Register scratch) { | 174 Register smi_index) { |
| 177 // First, check if a remembered set write is even needed. The tests below | 175 // First, check if a remembered set write is even needed. The tests below |
| 178 // catch stores of Smis and stores into young gen (which does not have space | 176 // catch stores of Smis and stores into young gen (which does not have space |
| 179 // for the remembered set bits. | 177 // for the remembered set bits. |
| 180 Label done; | 178 Label done; |
| 179 JumpIfSmi(value, &done); |
| 181 | 180 |
| 181 RecordWriteNonSmi(object, offset, value, smi_index); |
| 182 bind(&done); |
| 183 } |
| 184 |
| 185 |
| 186 void MacroAssembler::RecordWriteNonSmi(Register object, |
| 187 int offset, |
| 188 Register scratch, |
| 189 Register smi_index) { |
| 190 Label done; |
| 182 // Test that the object address is not in the new space. We cannot | 191 // Test that the object address is not in the new space. We cannot |
| 183 // set remembered set bits in the new space. | 192 // set remembered set bits in the new space. |
| 184 movq(value, object); | 193 movq(scratch, object); |
| 185 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); | 194 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); |
| 186 and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); | 195 and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); |
| 187 movq(kScratchRegister, ExternalReference::new_space_start()); | 196 movq(kScratchRegister, ExternalReference::new_space_start()); |
| 188 cmpq(value, kScratchRegister); | 197 cmpq(scratch, kScratchRegister); |
| 189 j(equal, &done); | 198 j(equal, &done); |
| 190 | 199 |
| 191 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { | 200 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { |
| 192 // Compute the bit offset in the remembered set, leave it in 'value'. | 201 // Compute the bit offset in the remembered set, leave it in 'value'. |
| 193 lea(value, Operand(object, offset)); | 202 lea(scratch, Operand(object, offset)); |
| 194 ASSERT(is_int32(Page::kPageAlignmentMask)); | 203 ASSERT(is_int32(Page::kPageAlignmentMask)); |
| 195 and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); | 204 and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); |
| 196 shr(value, Immediate(kObjectAlignmentBits)); | 205 shr(scratch, Immediate(kObjectAlignmentBits)); |
| 197 | 206 |
| 198 // Compute the page address from the heap object pointer, leave it in | 207 // Compute the page address from the heap object pointer, leave it in |
| 199 // 'object' (immediate value is sign extended). | 208 // 'object' (immediate value is sign extended). |
| 200 and_(object, Immediate(~Page::kPageAlignmentMask)); | 209 and_(object, Immediate(~Page::kPageAlignmentMask)); |
| 201 | 210 |
| 202 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | 211 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 203 // to limit code size. We should probably evaluate this decision by | 212 // to limit code size. We should probably evaluate this decision by |
| 204 // measuring the performance of an equivalent implementation using | 213 // measuring the performance of an equivalent implementation using |
| 205 // "simpler" instructions | 214 // "simpler" instructions |
| 206 bts(Operand(object, Page::kRSetOffset), value); | 215 bts(Operand(object, Page::kRSetOffset), scratch); |
| 207 } else { | 216 } else { |
| 208 Register dst = scratch; | 217 Register dst = smi_index; |
| 209 if (offset != 0) { | 218 if (offset != 0) { |
| 210 lea(dst, Operand(object, offset)); | 219 lea(dst, Operand(object, offset)); |
| 211 } else { | 220 } else { |
| 212 // array access: calculate the destination address in the same manner as | 221 // array access: calculate the destination address in the same manner as |
| 213 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset | 222 // KeyedStoreIC::GenerateGeneric. |
| 214 // into an array of pointers. | 223 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); |
| 215 lea(dst, Operand(object, dst, times_half_pointer_size, | 224 lea(dst, Operand(object, |
| 225 index.reg, |
| 226 index.scale, |
| 216 FixedArray::kHeaderSize - kHeapObjectTag)); | 227 FixedArray::kHeaderSize - kHeapObjectTag)); |
| 217 } | 228 } |
| 218 // If we are already generating a shared stub, not inlining the | 229 // If we are already generating a shared stub, not inlining the |
| 219 // record write code isn't going to save us any memory. | 230 // record write code isn't going to save us any memory. |
| 220 if (generating_stub()) { | 231 if (generating_stub()) { |
| 221 RecordWriteHelper(this, object, dst, value); | 232 RecordWriteHelper(this, object, dst, scratch); |
| 222 } else { | 233 } else { |
| 223 RecordWriteStub stub(object, dst, value); | 234 RecordWriteStub stub(object, dst, scratch); |
| 224 CallStub(&stub); | 235 CallStub(&stub); |
| 225 } | 236 } |
| 226 } | 237 } |
| 227 | 238 |
| 228 bind(&done); | 239 bind(&done); |
| 229 } | 240 } |
| 230 | 241 |
| 231 | 242 |
| 232 void MacroAssembler::Assert(Condition cc, const char* msg) { | 243 void MacroAssembler::Assert(Condition cc, const char* msg) { |
| 233 if (FLAG_debug_code) Check(cc, msg); | 244 if (FLAG_debug_code) |
| 245 Check(cc, msg); |
| 234 } | 246 } |
| 235 | 247 |
| 236 | 248 |
| 237 void MacroAssembler::Check(Condition cc, const char* msg) { | 249 void MacroAssembler::Check(Condition cc, const char* msg) { |
| 238 Label L; | 250 Label L; |
| 239 j(cc, &L); | 251 j(cc, &L); |
| 240 Abort(msg); | 252 Abort(msg); |
| 241 // will not return here | 253 // will not return here |
| 242 bind(&L); | 254 bind(&L); |
| 243 } | 255 } |
| 244 | 256 |
| 245 | 257 |
| 246 void MacroAssembler::NegativeZeroTest(Register result, | 258 void MacroAssembler::NegativeZeroTest(Register result, Register op, |
| 247 Register op, | |
| 248 Label* then_label) { | 259 Label* then_label) { |
| 249 Label ok; | 260 Label ok; |
| 250 testl(result, result); | 261 testl(result, result); |
| 251 j(not_zero, &ok); | 262 j(not_zero, &ok); |
| 252 testl(op, op); | 263 testl(op, op); |
| 253 j(sign, then_label); | 264 j(sign, then_label); |
| 254 bind(&ok); | 265 bind(&ok); |
| 255 } | 266 } |
| 256 | 267 |
| 257 | 268 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 318 } | 329 } |
| 319 | 330 |
| 320 Runtime::FunctionId function_id = | 331 Runtime::FunctionId function_id = |
| 321 static_cast<Runtime::FunctionId>(f->stub_id); | 332 static_cast<Runtime::FunctionId>(f->stub_id); |
| 322 RuntimeStub stub(function_id, num_arguments); | 333 RuntimeStub stub(function_id, num_arguments); |
| 323 CallStub(&stub); | 334 CallStub(&stub); |
| 324 } | 335 } |
| 325 | 336 |
| 326 | 337 |
| 327 void MacroAssembler::TailCallRuntime(ExternalReference const& ext, | 338 void MacroAssembler::TailCallRuntime(ExternalReference const& ext, |
| 328 int num_arguments, | 339 int num_arguments, int result_size) { |
| 329 int result_size) { | |
| 330 // ----------- S t a t e ------------- | 340 // ----------- S t a t e ------------- |
| 331 // -- rsp[0] : return address | 341 // -- rsp[0] : return address |
| 332 // -- rsp[8] : argument num_arguments - 1 | 342 // -- rsp[8] : argument num_arguments - 1 |
| 333 // ... | 343 // ... |
| 334 // -- rsp[8 * num_arguments] : argument 0 (receiver) | 344 // -- rsp[8 * num_arguments] : argument 0 (receiver) |
| 335 // ----------------------------------- | 345 // ----------------------------------- |
| 336 | 346 |
| 337 // TODO(1236192): Most runtime routines don't need the number of | 347 // TODO(1236192): Most runtime routines don't need the number of |
| 338 // arguments passed in because it is constant. At some point we | 348 // arguments passed in because it is constant. At some point we |
| 339 // should remove this need and make the runtime routine entry code | 349 // should remove this need and make the runtime routine entry code |
| (...skipping 23 matching lines...) Expand all Loading... |
| 363 if (!resolved) { | 373 if (!resolved) { |
| 364 uint32_t flags = | 374 uint32_t flags = |
| 365 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | | 375 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
| 366 Bootstrapper::FixupFlagsUseCodeObject::encode(true); | 376 Bootstrapper::FixupFlagsUseCodeObject::encode(true); |
| 367 Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name }; | 377 Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name }; |
| 368 unresolved_.Add(entry); | 378 unresolved_.Add(entry); |
| 369 } | 379 } |
| 370 addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); | 380 addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
| 371 } | 381 } |
| 372 | 382 |
| 373 | |
| 374 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, | 383 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, |
| 375 bool* resolved) { | 384 bool* resolved) { |
| 376 // Move the builtin function into the temporary function slot by | 385 // Move the builtin function into the temporary function slot by |
| 377 // reading it from the builtins object. NOTE: We should be able to | 386 // reading it from the builtins object. NOTE: We should be able to |
| 378 // reduce this to two instructions by putting the function table in | 387 // reduce this to two instructions by putting the function table in |
| 379 // the global object instead of the "builtins" object and by using a | 388 // the global object instead of the "builtins" object and by using a |
| 380 // real register for the function. | 389 // real register for the function. |
| 381 movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | 390 movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 382 movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset)); | 391 movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset)); |
| 383 int builtins_offset = | 392 int builtins_offset = |
| 384 JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize); | 393 JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize); |
| 385 movq(rdi, FieldOperand(rdx, builtins_offset)); | 394 movq(rdi, FieldOperand(rdx, builtins_offset)); |
| 386 | 395 |
| 387 | |
| 388 return Builtins::GetCode(id, resolved); | 396 return Builtins::GetCode(id, resolved); |
| 389 } | 397 } |
| 390 | 398 |
| 391 | 399 |
| 392 void MacroAssembler::Set(Register dst, int64_t x) { | 400 void MacroAssembler::Set(Register dst, int64_t x) { |
| 393 if (x == 0) { | 401 if (x == 0) { |
| 394 xor_(dst, dst); | 402 xor_(dst, dst); |
| 395 } else if (is_int32(x)) { | 403 } else if (is_int32(x)) { |
| 396 movq(dst, Immediate(x)); | 404 movq(dst, Immediate(x)); |
| 397 } else if (is_uint32(x)) { | 405 } else if (is_uint32(x)) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 409 } else if (is_int32(x)) { | 417 } else if (is_int32(x)) { |
| 410 movq(dst, Immediate(x)); | 418 movq(dst, Immediate(x)); |
| 411 } else if (is_uint32(x)) { | 419 } else if (is_uint32(x)) { |
| 412 movl(dst, Immediate(x)); | 420 movl(dst, Immediate(x)); |
| 413 } else { | 421 } else { |
| 414 movq(kScratchRegister, x, RelocInfo::NONE); | 422 movq(kScratchRegister, x, RelocInfo::NONE); |
| 415 movq(dst, kScratchRegister); | 423 movq(dst, kScratchRegister); |
| 416 } | 424 } |
| 417 } | 425 } |
| 418 | 426 |
| 419 | |
| 420 // ---------------------------------------------------------------------------- | 427 // ---------------------------------------------------------------------------- |
| 421 // Smi tagging, untagging and tag detection. | 428 // Smi tagging, untagging and tag detection. |
| 422 | 429 |
| 430 #ifdef V8_LONG_SMI |
| 431 |
| 432 static int kSmiShift = kSmiTagSize + kSmiShiftSize; |
| 423 | 433 |
| 424 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { | 434 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
| 425 ASSERT_EQ(1, kSmiTagSize); | |
| 426 ASSERT_EQ(0, kSmiTag); | 435 ASSERT_EQ(0, kSmiTag); |
| 427 #ifdef DEBUG | 436 if (!dst.is(src)) { |
| 428 cmpq(src, Immediate(0xC0000000u)); | 437 movl(dst, src); |
| 429 Check(positive, "Smi conversion overflow"); | |
| 430 #endif | |
| 431 if (dst.is(src)) { | |
| 432 addl(dst, src); | |
| 433 } else { | |
| 434 lea(dst, Operand(src, src, times_1, 0)); | |
| 435 } | 438 } |
| 439 shl(dst, Immediate(kSmiShift)); |
| 436 } | 440 } |
| 437 | 441 |
| 438 | 442 |
| 439 void MacroAssembler::Integer32ToSmi(Register dst, | 443 void MacroAssembler::Integer32ToSmi(Register dst, |
| 440 Register src, | 444 Register src, |
| 441 Label* on_overflow) { | 445 Label* on_overflow) { |
| 442 ASSERT_EQ(1, kSmiTagSize); | |
| 443 ASSERT_EQ(0, kSmiTag); | 446 ASSERT_EQ(0, kSmiTag); |
| 447 // 32-bit integer always fits in a long smi. |
| 444 if (!dst.is(src)) { | 448 if (!dst.is(src)) { |
| 445 movl(dst, src); | 449 movl(dst, src); |
| 446 } | 450 } |
| 447 addl(dst, src); | 451 shl(dst, Immediate(kSmiShift)); |
| 448 j(overflow, on_overflow); | |
| 449 } | 452 } |
| 450 | 453 |
| 451 | 454 |
| 452 void MacroAssembler::Integer64AddToSmi(Register dst, | 455 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
| 453 Register src, | 456 Register src, |
| 454 int constant) { | 457 int constant) { |
| 455 #ifdef DEBUG | 458 if (dst.is(src)) { |
| 456 movl(kScratchRegister, src); | 459 addq(dst, Immediate(constant)); |
| 457 addl(kScratchRegister, Immediate(constant)); | 460 } else { |
| 458 Check(no_overflow, "Add-and-smi-convert overflow"); | 461 lea(dst, Operand(src, constant)); |
| 459 Condition valid = CheckInteger32ValidSmiValue(kScratchRegister); | 462 } |
| 460 Check(valid, "Add-and-smi-convert overflow"); | 463 shl(dst, Immediate(kSmiShift)); |
| 461 #endif | |
| 462 lea(dst, Operand(src, src, times_1, constant << kSmiTagSize)); | |
| 463 } | 464 } |
| 464 | 465 |
| 465 | 466 |
| 466 void MacroAssembler::SmiToInteger32(Register dst, Register src) { | 467 void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
| 467 ASSERT_EQ(1, kSmiTagSize); | |
| 468 ASSERT_EQ(0, kSmiTag); | 468 ASSERT_EQ(0, kSmiTag); |
| 469 if (!dst.is(src)) { | 469 if (!dst.is(src)) { |
| 470 movl(dst, src); | 470 movq(dst, src); |
| 471 } | 471 } |
| 472 sarl(dst, Immediate(kSmiTagSize)); | 472 shr(dst, Immediate(kSmiShift)); |
| 473 } | 473 } |
| 474 | 474 |
| 475 | 475 |
| 476 void MacroAssembler::SmiToInteger64(Register dst, Register src) { | 476 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
| 477 ASSERT_EQ(1, kSmiTagSize); | |
| 478 ASSERT_EQ(0, kSmiTag); | 477 ASSERT_EQ(0, kSmiTag); |
| 479 movsxlq(dst, src); | 478 if (!dst.is(src)) { |
| 480 sar(dst, Immediate(kSmiTagSize)); | 479 movq(dst, src); |
| 480 } |
| 481 sar(dst, Immediate(kSmiShift)); |
| 481 } | 482 } |
| 482 | 483 |
| 483 | 484 |
| 485 void MacroAssembler::SmiTest(Register src) { |
| 486 testq(src, src); |
| 487 } |
| 488 |
| 489 |
| 490 void MacroAssembler::SmiCompare(Register dst, Register src) { |
| 491 cmpq(dst, src); |
| 492 } |
| 493 |
| 494 |
| 495 void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
| 496 ASSERT(!dst.is(kScratchRegister)); |
| 497 if (src->value() == 0) { |
| 498 testq(dst, dst); |
| 499 } else { |
| 500 Move(kScratchRegister, src); |
| 501 cmpq(dst, kScratchRegister); |
| 502 } |
| 503 } |
| 504 |
| 505 |
| 506 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
| 507 cmpq(dst, src); |
| 508 } |
| 509 |
| 510 |
| 511 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
| 512 if (src->value() == 0) { |
| 513 // Only tagged long smi to have 32-bit representation. |
| 514 cmpq(dst, Immediate(0)); |
| 515 } else { |
| 516 Move(kScratchRegister, src); |
| 517 cmpq(dst, kScratchRegister); |
| 518 } |
| 519 } |
| 520 |
| 521 |
| 484 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, | 522 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
| 485 Register src, | 523 Register src, |
| 486 int power) { | 524 int power) { |
| 487 ASSERT(power >= 0); | 525 ASSERT(power >= 0); |
| 488 ASSERT(power < 64); | 526 ASSERT(power < 64); |
| 489 if (power == 0) { | 527 if (power == 0) { |
| 490 SmiToInteger64(dst, src); | 528 SmiToInteger64(dst, src); |
| 491 return; | 529 return; |
| 492 } | 530 } |
| 531 if (!dst.is(src)) { |
| 532 movq(dst, src); |
| 533 } |
| 534 if (power < kSmiShift) { |
| 535 sar(dst, Immediate(kSmiShift - power)); |
| 536 } else if (power > kSmiShift) { |
| 537 shl(dst, Immediate(power - kSmiShift)); |
| 538 } |
| 539 } |
| 540 |
| 541 |
| 542 Condition MacroAssembler::CheckSmi(Register src) { |
| 543 ASSERT_EQ(0, kSmiTag); |
| 544 testb(src, Immediate(kSmiTagMask)); |
| 545 return zero; |
| 546 } |
| 547 |
| 548 |
| 549 Condition MacroAssembler::CheckPositiveSmi(Register src) { |
| 550 ASSERT_EQ(0, kSmiTag); |
| 551 movq(kScratchRegister, src); |
| 552 rol(kScratchRegister, Immediate(1)); |
| 553 testl(kScratchRegister, Immediate(0x03)); |
| 554 return zero; |
| 555 } |
| 556 |
| 557 |
| 558 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
| 559 if (first.is(second)) { |
| 560 return CheckSmi(first); |
| 561 } |
| 562 movl(kScratchRegister, first); |
| 563 orl(kScratchRegister, second); |
| 564 testb(kScratchRegister, Immediate(kSmiTagMask)); |
| 565 return zero; |
| 566 } |
| 567 |
| 568 |
| 569 Condition MacroAssembler::CheckIsMinSmi(Register src) { |
| 570 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 571 movq(kScratchRegister, src); |
| 572 rol(kScratchRegister, Immediate(1)); |
| 573 cmpq(kScratchRegister, Immediate(1)); |
| 574 return equal; |
| 575 } |
| 576 |
| 577 |
| 578 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
| 579 // A 32-bit integer value can always be converted to a smi. |
| 580 return always; |
| 581 } |
| 582 |
| 583 |
| 584 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) { |
| 585 if (dst.is(src)) { |
| 586 ASSERT(!dst.is(kScratchRegister)); |
| 587 movq(kScratchRegister, src); |
| 588 neg(dst); // Low 32 bits are retained as zero by negation. |
| 589 // Test if result is zero or Smi::kMinValue. |
| 590 cmpq(dst, kScratchRegister); |
| 591 j(not_equal, on_smi_result); |
| 592 movq(src, kScratchRegister); |
| 593 } else { |
| 594 movq(dst, src); |
| 595 neg(dst); |
| 596 cmpq(dst, src); |
| 597 // If the result is zero or Smi::kMinValue, negation failed to create a smi. |
| 598 j(not_equal, on_smi_result); |
| 599 } |
| 600 } |
| 601 |
| 602 |
| 603 void MacroAssembler::SmiAdd(Register dst, |
| 604 Register src1, |
| 605 Register src2, |
| 606 Label* on_not_smi_result) { |
| 607 ASSERT(!dst.is(src2)); |
| 608 if (dst.is(src1)) { |
| 609 addq(dst, src2); |
| 610 Label smi_result; |
| 611 j(no_overflow, &smi_result); |
| 612 // Restore src1. |
| 613 subq(src1, src2); |
| 614 jmp(on_not_smi_result); |
| 615 bind(&smi_result); |
| 616 } else { |
| 617 movq(dst, src1); |
| 618 addq(dst, src2); |
| 619 j(overflow, on_not_smi_result); |
| 620 } |
| 621 } |
| 622 |
| 623 |
| 624 void MacroAssembler::SmiSub(Register dst, |
| 625 Register src1, |
| 626 Register src2, |
| 627 Label* on_not_smi_result) { |
| 628 ASSERT(!dst.is(src2)); |
| 629 if (dst.is(src1)) { |
| 630 subq(dst, src2); |
| 631 Label smi_result; |
| 632 j(no_overflow, &smi_result); |
| 633 // Restore src1. |
| 634 addq(src1, src2); |
| 635 jmp(on_not_smi_result); |
| 636 bind(&smi_result); |
| 637 } else { |
| 638 movq(dst, src1); |
| 639 subq(dst, src2); |
| 640 j(overflow, on_not_smi_result); |
| 641 } |
| 642 } |
| 643 |
| 644 |
| 645 void MacroAssembler::SmiMul(Register dst, |
| 646 Register src1, |
| 647 Register src2, |
| 648 Label* on_not_smi_result) { |
| 649 ASSERT(!dst.is(src2)); |
| 650 ASSERT(!dst.is(kScratchRegister)); |
| 651 ASSERT(!src1.is(kScratchRegister)); |
| 652 ASSERT(!src2.is(kScratchRegister)); |
| 653 |
| 654 if (dst.is(src1)) { |
| 655 Label failure, zero_correct_result; |
| 656 movq(kScratchRegister, src1); // Create backup for later testing. |
| 657 SmiToInteger64(dst, src1); |
| 658 imul(dst, src2); |
| 659 j(overflow, &failure); |
| 660 |
| 661 // Check for negative zero result. If product is zero, and one |
| 662 // argument is negative, go to slow case. |
| 663 Label correct_result; |
| 664 testq(dst, dst); |
| 665 j(not_zero, &correct_result); |
| 666 |
| 667 movq(dst, kScratchRegister); |
| 668 xor_(dst, src2); |
| 669 j(positive, &zero_correct_result); // Result was positive zero. |
| 670 |
| 671 bind(&failure); // Reused failure exit, restores src1. |
| 672 movq(src1, kScratchRegister); |
| 673 jmp(on_not_smi_result); |
| 674 |
| 675 bind(&zero_correct_result); |
| 676 xor_(dst, dst); |
| 677 |
| 678 bind(&correct_result); |
| 679 } else { |
| 680 SmiToInteger64(dst, src1); |
| 681 imul(dst, src2); |
| 682 j(overflow, on_not_smi_result); |
| 683 // Check for negative zero result. If product is zero, and one |
| 684 // argument is negative, go to slow case. |
| 685 Label correct_result; |
| 686 testq(dst, dst); |
| 687 j(not_zero, &correct_result); |
| 688 // One of src1 and src2 is zero, the check whether the other is |
| 689 // negative. |
| 690 movq(kScratchRegister, src1); |
| 691 xor_(kScratchRegister, src2); |
| 692 j(negative, on_not_smi_result); |
| 693 bind(&correct_result); |
| 694 } |
| 695 } |
| 696 |
| 697 |
| 698 void MacroAssembler::SmiTryAddConstant(Register dst, |
| 699 Register src, |
| 700 Smi* constant, |
| 701 Label* on_not_smi_result) { |
| 702 // Does not assume that src is a smi. |
| 703 ASSERT_EQ(1, kSmiTagMask); |
| 704 ASSERT_EQ(0, kSmiTag); |
| 705 ASSERT(!dst.is(kScratchRegister)); |
| 706 ASSERT(!src.is(kScratchRegister)); |
| 707 |
| 708 JumpIfNotSmi(src, on_not_smi_result); |
| 709 Register tmp = (dst.is(src) ? kScratchRegister : dst); |
| 710 Move(tmp, constant); |
| 711 addq(tmp, src); |
| 712 j(overflow, on_not_smi_result); |
| 713 if (dst.is(src)) { |
| 714 movq(dst, tmp); |
| 715 } |
| 716 } |
| 717 |
| 718 |
| 719 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
| 720 if (constant->value() == 0) { |
| 721 if (!dst.is(src)) { |
| 722 movq(dst, src); |
| 723 } |
| 724 } else if (dst.is(src)) { |
| 725 ASSERT(!dst.is(kScratchRegister)); |
| 726 |
| 727 Move(kScratchRegister, constant); |
| 728 addq(dst, kScratchRegister); |
| 729 } else { |
| 730 Move(dst, constant); |
| 731 addq(dst, src); |
| 732 } |
| 733 } |
| 734 |
| 735 |
| 736 void MacroAssembler::SmiAddConstant(Register dst, |
| 737 Register src, |
| 738 Smi* constant, |
| 739 Label* on_not_smi_result) { |
| 740 if (constant->value() == 0) { |
| 741 if (!dst.is(src)) { |
| 742 movq(dst, src); |
| 743 } |
| 744 } else if (dst.is(src)) { |
| 745 ASSERT(!dst.is(kScratchRegister)); |
| 746 |
| 747 Move(kScratchRegister, constant); |
| 748 addq(dst, kScratchRegister); |
| 749 Label result_ok; |
| 750 j(no_overflow, &result_ok); |
| 751 subq(dst, kScratchRegister); |
| 752 jmp(on_not_smi_result); |
| 753 bind(&result_ok); |
| 754 } else { |
| 755 Move(dst, constant); |
| 756 addq(dst, src); |
| 757 j(overflow, on_not_smi_result); |
| 758 } |
| 759 } |
| 760 |
| 761 |
| 762 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
| 763 if (constant->value() == 0) { |
| 764 if (!dst.is(src)) { |
| 765 movq(dst, src); |
| 766 } |
| 767 } else if (dst.is(src)) { |
| 768 ASSERT(!dst.is(kScratchRegister)); |
| 769 |
| 770 Move(kScratchRegister, constant); |
| 771 subq(dst, kScratchRegister); |
| 772 } else { |
| 773 // Subtract by adding the negative, to do it in two operations. |
| 774 if (constant->value() == Smi::kMinValue) { |
| 775 Move(kScratchRegister, constant); |
| 776 movq(dst, src); |
| 777 subq(dst, kScratchRegister); |
| 778 } else { |
| 779 Move(dst, Smi::FromInt(-constant->value())); |
| 780 addq(dst, src); |
| 781 } |
| 782 } |
| 783 } |
| 784 |
| 785 |
| 786 void MacroAssembler::SmiSubConstant(Register dst, |
| 787 Register src, |
| 788 Smi* constant, |
| 789 Label* on_not_smi_result) { |
| 790 if (constant->value() == 0) { |
| 791 if (!dst.is(src)) { |
| 792 movq(dst, src); |
| 793 } |
| 794 } else if (dst.is(src)) { |
| 795 ASSERT(!dst.is(kScratchRegister)); |
| 796 |
| 797 Move(kScratchRegister, constant); |
| 798 subq(dst, kScratchRegister); |
| 799 Label sub_success; |
| 800 j(no_overflow, &sub_success); |
| 801 addq(src, kScratchRegister); |
| 802 jmp(on_not_smi_result); |
| 803 bind(&sub_success); |
| 804 } else { |
| 805 if (constant->value() == Smi::kMinValue) { |
| 806 Move(kScratchRegister, constant); |
| 807 movq(dst, src); |
| 808 subq(dst, kScratchRegister); |
| 809 j(overflow, on_not_smi_result); |
| 810 } else { |
| 811 Move(dst, Smi::FromInt(-(constant->value()))); |
| 812 addq(dst, src); |
| 813 j(overflow, on_not_smi_result); |
| 814 } |
| 815 } |
| 816 } |
| 817 |
| 818 |
| 819 void MacroAssembler::SmiDiv(Register dst, |
| 820 Register src1, |
| 821 Register src2, |
| 822 Label* on_not_smi_result) { |
| 823 ASSERT(!src1.is(kScratchRegister)); |
| 824 ASSERT(!src2.is(kScratchRegister)); |
| 825 ASSERT(!dst.is(kScratchRegister)); |
| 826 ASSERT(!src2.is(rax)); |
| 827 ASSERT(!src2.is(rdx)); |
| 828 ASSERT(!src1.is(rdx)); |
| 829 |
| 830 // Check for 0 divisor (result is +/-Infinity). |
| 831 Label positive_divisor; |
| 832 testq(src2, src2); |
| 833 j(zero, on_not_smi_result); |
| 834 |
| 835 if (src1.is(rax)) { |
| 836 movq(kScratchRegister, src1); |
| 837 } |
| 838 SmiToInteger32(rax, src1); |
| 839 // We need to rule out dividing Smi::kMinValue by -1, since that would |
| 840 // overflow in idiv and raise an exception. |
| 841 // We combine this with negative zero test (negative zero only happens |
| 842 // when dividing zero by a negative number). |
| 843 |
| 844 // We overshoot a little and go to slow case if we divide min-value |
| 845 // by any negative value, not just -1. |
| 846 Label safe_div; |
| 847 testl(rax, Immediate(0x7fffffff)); |
| 848 j(not_zero, &safe_div); |
| 849 testq(src2, src2); |
| 850 if (src1.is(rax)) { |
| 851 j(positive, &safe_div); |
| 852 movq(src1, kScratchRegister); |
| 853 jmp(on_not_smi_result); |
| 854 } else { |
| 855 j(negative, on_not_smi_result); |
| 856 } |
| 857 bind(&safe_div); |
| 858 |
| 859 SmiToInteger32(src2, src2); |
| 860 // Sign extend src1 into edx:eax. |
| 861 cdq(); |
| 862 idivl(src2); |
| 863 Integer32ToSmi(src2, src2); |
| 864 // Check that the remainder is zero. |
| 865 testl(rdx, rdx); |
| 866 if (src1.is(rax)) { |
| 867 Label smi_result; |
| 868 j(zero, &smi_result); |
| 869 movq(src1, kScratchRegister); |
| 870 jmp(on_not_smi_result); |
| 871 bind(&smi_result); |
| 872 } else { |
| 873 j(not_zero, on_not_smi_result); |
| 874 } |
| 875 if (!dst.is(src1) && src1.is(rax)) { |
| 876 movq(src1, kScratchRegister); |
| 877 } |
| 878 Integer32ToSmi(dst, rax); |
| 879 } |
| 880 |
| 881 |
| 882 void MacroAssembler::SmiMod(Register dst, |
| 883 Register src1, |
| 884 Register src2, |
| 885 Label* on_not_smi_result) { |
| 886 ASSERT(!dst.is(kScratchRegister)); |
| 887 ASSERT(!src1.is(kScratchRegister)); |
| 888 ASSERT(!src2.is(kScratchRegister)); |
| 889 ASSERT(!src2.is(rax)); |
| 890 ASSERT(!src2.is(rdx)); |
| 891 ASSERT(!src1.is(rdx)); |
| 892 ASSERT(!src1.is(src2)); |
| 893 |
| 894 testq(src2, src2); |
| 895 j(zero, on_not_smi_result); |
| 896 |
| 897 if (src1.is(rax)) { |
| 898 movq(kScratchRegister, src1); |
| 899 } |
| 900 SmiToInteger32(rax, src1); |
| 901 SmiToInteger32(src2, src2); |
| 902 |
| 903 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow). |
| 904 Label safe_div; |
| 905 cmpl(rax, Immediate(Smi::kMinValue)); |
| 906 j(not_equal, &safe_div); |
| 907 cmpl(src2, Immediate(-1)); |
| 908 j(not_equal, &safe_div); |
| 909 // Retag inputs and go slow case. |
| 910 Integer32ToSmi(src2, src2); |
| 911 if (src1.is(rax)) { |
| 912 movq(src1, kScratchRegister); |
| 913 } |
| 914 jmp(on_not_smi_result); |
| 915 bind(&safe_div); |
| 916 |
| 917 // Sign extend eax into edx:eax. |
| 918 cdq(); |
| 919 idivl(src2); |
| 920 // Restore smi tags on inputs. |
| 921 Integer32ToSmi(src2, src2); |
| 922 if (src1.is(rax)) { |
| 923 movq(src1, kScratchRegister); |
| 924 } |
| 925 // Check for a negative zero result. If the result is zero, and the |
| 926 // dividend is negative, go slow to return a floating point negative zero. |
| 927 Label smi_result; |
| 928 testl(rdx, rdx); |
| 929 j(not_zero, &smi_result); |
| 930 testq(src1, src1); |
| 931 j(negative, on_not_smi_result); |
| 932 bind(&smi_result); |
| 933 Integer32ToSmi(dst, rdx); |
| 934 } |
| 935 |
| 936 |
| 937 void MacroAssembler::SmiNot(Register dst, Register src) { |
| 938 ASSERT(!dst.is(kScratchRegister)); |
| 939 ASSERT(!src.is(kScratchRegister)); |
| 940 // Set tag and padding bits before negating, so that they are zero afterwards. |
| 941 movl(kScratchRegister, Immediate(~0)); |
| 942 if (dst.is(src)) { |
| 943 xor_(dst, kScratchRegister); |
| 944 } else { |
| 945 lea(dst, Operand(src, kScratchRegister, times_1, 0)); |
| 946 } |
| 947 not_(dst); |
| 948 } |
| 949 |
| 950 |
| 951 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
| 952 ASSERT(!dst.is(src2)); |
| 953 if (!dst.is(src1)) { |
| 954 movq(dst, src1); |
| 955 } |
| 956 and_(dst, src2); |
| 957 } |
| 958 |
| 959 |
| 960 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { |
| 961 if (constant->value() == 0) { |
| 962 xor_(dst, dst); |
| 963 } else if (dst.is(src)) { |
| 964 ASSERT(!dst.is(kScratchRegister)); |
| 965 Move(kScratchRegister, constant); |
| 966 and_(dst, kScratchRegister); |
| 967 } else { |
| 968 Move(dst, constant); |
| 969 and_(dst, src); |
| 970 } |
| 971 } |
| 972 |
| 973 |
| 974 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
| 975 if (!dst.is(src1)) { |
| 976 movq(dst, src1); |
| 977 } |
| 978 or_(dst, src2); |
| 979 } |
| 980 |
| 981 |
| 982 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { |
| 983 if (dst.is(src)) { |
| 984 ASSERT(!dst.is(kScratchRegister)); |
| 985 Move(kScratchRegister, constant); |
| 986 or_(dst, kScratchRegister); |
| 987 } else { |
| 988 Move(dst, constant); |
| 989 or_(dst, src); |
| 990 } |
| 991 } |
| 992 |
| 993 |
| 994 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
| 995 if (!dst.is(src1)) { |
| 996 movq(dst, src1); |
| 997 } |
| 998 xor_(dst, src2); |
| 999 } |
| 1000 |
| 1001 |
| 1002 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { |
| 1003 if (dst.is(src)) { |
| 1004 ASSERT(!dst.is(kScratchRegister)); |
| 1005 Move(kScratchRegister, constant); |
| 1006 xor_(dst, kScratchRegister); |
| 1007 } else { |
| 1008 Move(dst, constant); |
| 1009 xor_(dst, src); |
| 1010 } |
| 1011 } |
| 1012 |
| 1013 |
| 1014 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
| 1015 Register src, |
| 1016 int shift_value) { |
| 1017 ASSERT(is_uint5(shift_value)); |
| 1018 if (shift_value > 0) { |
| 1019 if (dst.is(src)) { |
| 1020 sar(dst, Immediate(shift_value + kSmiShift)); |
| 1021 shl(dst, Immediate(kSmiShift)); |
| 1022 } else { |
| 1023 UNIMPLEMENTED(); // Not used. |
| 1024 } |
| 1025 } |
| 1026 } |
| 1027 |
| 1028 |
| 1029 void MacroAssembler::SmiShiftLogicalRightConstant(Register dst, |
| 1030 Register src, |
| 1031 int shift_value, |
| 1032 Label* on_not_smi_result) { |
| 1033 // Logic right shift interprets its result as an *unsigned* number. |
| 1034 if (dst.is(src)) { |
| 1035 UNIMPLEMENTED(); // Not used. |
| 1036 } else { |
| 1037 movq(dst, src); |
| 1038 if (shift_value == 0) { |
| 1039 testq(dst, dst); |
| 1040 j(negative, on_not_smi_result); |
| 1041 } |
| 1042 shr(dst, Immediate(shift_value + kSmiShift)); |
| 1043 shl(dst, Immediate(kSmiShift)); |
| 1044 } |
| 1045 } |
| 1046 |
| 1047 |
| 1048 void MacroAssembler::SmiShiftLeftConstant(Register dst, |
| 1049 Register src, |
| 1050 int shift_value, |
| 1051 Label* on_not_smi_result) { |
| 1052 if (!dst.is(src)) { |
| 1053 movq(dst, src); |
| 1054 } |
| 1055 if (shift_value > 0) { |
| 1056 shl(dst, Immediate(shift_value)); |
| 1057 } |
| 1058 } |
| 1059 |
| 1060 |
| 1061 void MacroAssembler::SmiShiftLeft(Register dst, |
| 1062 Register src1, |
| 1063 Register src2, |
| 1064 Label* on_not_smi_result) { |
| 1065 ASSERT(!dst.is(rcx)); |
| 1066 Label result_ok; |
| 1067 // Untag shift amount. |
| 1068 if (!dst.is(src1)) { |
| 1069 movq(dst, src1); |
| 1070 } |
| 1071 SmiToInteger32(rcx, src2); |
| 1072 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| 1073 and_(rcx, Immediate(0x1f)); |
| 1074 shl(dst); |
| 1075 } |
| 1076 |
| 1077 |
| 1078 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
| 1079 Register src1, |
| 1080 Register src2, |
| 1081 Label* on_not_smi_result) { |
| 1082 ASSERT(!dst.is(kScratchRegister)); |
| 1083 ASSERT(!src1.is(kScratchRegister)); |
| 1084 ASSERT(!src2.is(kScratchRegister)); |
| 1085 ASSERT(!dst.is(rcx)); |
| 1086 Label result_ok; |
| 1087 if (src1.is(rcx) || src2.is(rcx)) { |
| 1088 movq(kScratchRegister, rcx); |
| 1089 } |
| 1090 if (!dst.is(src1)) { |
| 1091 movq(dst, src1); |
| 1092 } |
| 1093 SmiToInteger32(rcx, src2); |
| 1094 orl(rcx, Immediate(kSmiShift)); |
| 1095 shr(dst); // Shift is rcx modulo 0x1f + 32. |
| 1096 shl(dst, Immediate(kSmiShift)); |
| 1097 testq(dst, dst); |
| 1098 if (src1.is(rcx) || src2.is(rcx)) { |
| 1099 Label positive_result; |
| 1100 j(positive, &positive_result); |
| 1101 if (src1.is(rcx)) { |
| 1102 movq(src1, kScratchRegister); |
| 1103 } else { |
| 1104 movq(src2, kScratchRegister); |
| 1105 } |
| 1106 jmp(on_not_smi_result); |
| 1107 bind(&positive_result); |
| 1108 } else { |
| 1109 j(negative, on_not_smi_result); // src2 was zero and src1 negative. |
| 1110 } |
| 1111 } |
| 1112 |
| 1113 |
| 1114 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
| 1115 Register src1, |
| 1116 Register src2) { |
| 1117 ASSERT(!dst.is(kScratchRegister)); |
| 1118 ASSERT(!src1.is(kScratchRegister)); |
| 1119 ASSERT(!src2.is(kScratchRegister)); |
| 1120 ASSERT(!dst.is(rcx)); |
| 1121 if (src1.is(rcx)) { |
| 1122 movq(kScratchRegister, src1); |
| 1123 } else if (src2.is(rcx)) { |
| 1124 movq(kScratchRegister, src2); |
| 1125 } |
| 1126 if (!dst.is(src1)) { |
| 1127 movq(dst, src1); |
| 1128 } |
| 1129 SmiToInteger32(rcx, src2); |
| 1130 orl(rcx, Immediate(kSmiShift)); |
| 1131 sar(dst); // Shift 32 + original rcx & 0x1f. |
| 1132 shl(dst, Immediate(kSmiShift)); |
| 1133 if (src1.is(rcx)) { |
| 1134 movq(src1, kScratchRegister); |
| 1135 } else if (src2.is(rcx)) { |
| 1136 movq(src2, kScratchRegister); |
| 1137 } |
| 1138 } |
| 1139 |
| 1140 |
| 1141 void MacroAssembler::SelectNonSmi(Register dst, |
| 1142 Register src1, |
| 1143 Register src2, |
| 1144 Label* on_not_smis) { |
| 1145 ASSERT(!dst.is(kScratchRegister)); |
| 1146 ASSERT(!src1.is(kScratchRegister)); |
| 1147 ASSERT(!src2.is(kScratchRegister)); |
| 1148 ASSERT(!dst.is(src1)); |
| 1149 ASSERT(!dst.is(src2)); |
| 1150 // Both operands must not be smis. |
| 1151 #ifdef DEBUG |
| 1152 if (allow_stub_calls()) { // Check contains a stub call. |
| 1153 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); |
| 1154 Check(not_both_smis, "Both registers were smis in SelectNonSmi."); |
| 1155 } |
| 1156 #endif |
| 1157 ASSERT_EQ(0, kSmiTag); |
| 1158 ASSERT_EQ(0, Smi::FromInt(0)); |
| 1159 movl(kScratchRegister, Immediate(kSmiTagMask)); |
| 1160 and_(kScratchRegister, src1); |
| 1161 testl(kScratchRegister, src2); |
| 1162 // If non-zero then both are smis. |
| 1163 j(not_zero, on_not_smis); |
| 1164 |
| 1165 // Exactly one operand is a smi. |
| 1166 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
| 1167 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. |
| 1168 subq(kScratchRegister, Immediate(1)); |
| 1169 // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
| 1170 movq(dst, src1); |
| 1171 xor_(dst, src2); |
| 1172 and_(dst, kScratchRegister); |
| 1173 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
| 1174 xor_(dst, src1); |
| 1175 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
| 1176 } |
| 1177 |
| 1178 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
| 1179 Register src, |
| 1180 int shift) { |
| 1181 ASSERT(is_uint6(shift)); |
| 1182 // There is a possible optimization if shift is in the range 60-63, but that |
| 1183 // will (and must) never happen. |
| 1184 if (!dst.is(src)) { |
| 1185 movq(dst, src); |
| 1186 } |
| 1187 if (shift < kSmiShift) { |
| 1188 sar(dst, Immediate(kSmiShift - shift)); |
| 1189 } else { |
| 1190 shl(dst, Immediate(shift - kSmiShift)); |
| 1191 } |
| 1192 return SmiIndex(dst, times_1); |
| 1193 } |
| 1194 |
| 1195 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
| 1196 Register src, |
| 1197 int shift) { |
| 1198 // Register src holds a positive smi. |
| 1199 ASSERT(is_uint6(shift)); |
| 1200 if (!dst.is(src)) { |
| 1201 movq(dst, src); |
| 1202 } |
| 1203 neg(dst); |
| 1204 if (shift < kSmiShift) { |
| 1205 sar(dst, Immediate(kSmiShift - shift)); |
| 1206 } else { |
| 1207 shl(dst, Immediate(shift - kSmiShift)); |
| 1208 } |
| 1209 return SmiIndex(dst, times_1); |
| 1210 } |
| 1211 |
| 1212 #else // ! V8_LONG_SMI |
| 1213 // 31 bit smi operations |
| 1214 |
| 1215 // Extracts the low 32 bits of a Smi pointer, where the taqgged smi value |
| 1216 // is stored. |
| 1217 static int32_t SmiValue(Smi* smi) { |
| 1218 return static_cast<int32_t>(reinterpret_cast<intptr_t>(smi)); |
| 1219 } |
| 1220 |
| 1221 |
| 1222 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
| 1223 ASSERT_EQ(1, kSmiTagSize); |
| 1224 ASSERT_EQ(0, kSmiTag); |
| 1225 #ifdef DEBUG |
| 1226 if (allow_stub_calls()) { |
| 1227 cmpl(src, Immediate(0xC0000000u)); |
| 1228 Check(positive, "Smi conversion overflow"); |
| 1229 } |
| 1230 #endif |
| 1231 if (dst.is(src)) { |
| 1232 addl(dst, src); |
| 1233 } else { |
| 1234 lea(dst, Operand(src, src, times_1, 0)); |
| 1235 } |
| 1236 } |
| 1237 |
| 1238 |
| 1239 void MacroAssembler::Integer32ToSmi(Register dst, |
| 1240 Register src, |
| 1241 Label* on_overflow) { |
| 1242 ASSERT_EQ(1, kSmiTagSize); |
| 1243 ASSERT_EQ(0, kSmiTag); |
| 1244 if (!dst.is(src)) { |
| 1245 movl(dst, src); |
| 1246 } |
| 1247 addl(dst, src); |
| 1248 j(overflow, on_overflow); |
| 1249 } |
| 1250 |
| 1251 |
| 1252 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
| 1253 Register src, |
| 1254 int constant) { |
| 1255 #ifdef DEBUG |
| 1256 if (allow_stub_calls()) { |
| 1257 movl(kScratchRegister, src); |
| 1258 addl(kScratchRegister, Immediate(constant)); |
| 1259 Check(no_overflow, "Add-and-smi-convert overflow"); |
| 1260 Condition valid = CheckInteger32ValidSmiValue(kScratchRegister); |
| 1261 Check(valid, "Add-and-smi-convert overflow"); |
| 1262 } |
| 1263 #endif |
| 1264 lea(dst, Operand(src, src, times_1, constant << kSmiTagSize)); |
| 1265 } |
| 1266 |
| 1267 |
| 1268 void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
| 1269 ASSERT_EQ(1, kSmiTagSize); |
| 1270 ASSERT_EQ(0, kSmiTag); |
| 1271 if (!dst.is(src)) { |
| 1272 movl(dst, src); |
| 1273 } |
| 1274 sarl(dst, Immediate(kSmiTagSize)); |
| 1275 } |
| 1276 |
| 1277 |
| 1278 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
| 1279 ASSERT_EQ(1, kSmiTagSize); |
| 1280 ASSERT_EQ(0, kSmiTag); |
| 1281 movsxlq(dst, src); |
| 1282 sar(dst, Immediate(kSmiTagSize)); |
| 1283 } |
| 1284 |
| 1285 |
| 1286 void MacroAssembler::SmiTest(Register src) { |
| 1287 testl(src, src); |
| 1288 } |
| 1289 |
| 1290 |
| 1291 void MacroAssembler::SmiCompare(Register dst, Register src) { |
| 1292 cmpl(dst, src); |
| 1293 } |
| 1294 |
| 1295 |
| 1296 void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
| 1297 ASSERT(!dst.is(kScratchRegister)); |
| 1298 if (src->value() == 0) { |
| 1299 testl(dst, dst); |
| 1300 } else { |
| 1301 cmpl(dst, Immediate(SmiValue(src))); |
| 1302 } |
| 1303 } |
| 1304 |
| 1305 |
| 1306 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
| 1307 cmpl(dst, src); |
| 1308 } |
| 1309 |
| 1310 |
| 1311 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
| 1312 if (src->value() == 0) { |
| 1313 movl(kScratchRegister, dst); |
| 1314 testl(kScratchRegister, kScratchRegister); |
| 1315 } else { |
| 1316 cmpl(dst, Immediate(SmiValue(src))); |
| 1317 } |
| 1318 } |
| 1319 |
| 1320 |
| 1321 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
| 1322 Register src, |
| 1323 int power) { |
| 1324 ASSERT(power >= 0); |
| 1325 ASSERT(power < 64); |
| 1326 if (power == 0) { |
| 1327 SmiToInteger64(dst, src); |
| 1328 return; |
| 1329 } |
| 493 movsxlq(dst, src); | 1330 movsxlq(dst, src); |
| 494 shl(dst, Immediate(power - 1)); | 1331 shl(dst, Immediate(power - 1)); |
| 495 } | 1332 } |
| 496 | 1333 |
| 497 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { | |
| 498 ASSERT_EQ(0, kSmiTag); | |
| 499 testl(src, Immediate(kSmiTagMask)); | |
| 500 j(zero, on_smi); | |
| 501 } | |
| 502 | |
| 503 | |
| 504 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) { | |
| 505 Condition not_smi = CheckNotSmi(src); | |
| 506 j(not_smi, on_not_smi); | |
| 507 } | |
| 508 | |
| 509 | |
| 510 void MacroAssembler::JumpIfNotPositiveSmi(Register src, | |
| 511 Label* on_not_positive_smi) { | |
| 512 Condition not_positive_smi = CheckNotPositiveSmi(src); | |
| 513 j(not_positive_smi, on_not_positive_smi); | |
| 514 } | |
| 515 | |
| 516 | |
| 517 void MacroAssembler::JumpIfSmiEqualsConstant(Register src, | |
| 518 int constant, | |
| 519 Label* on_equals) { | |
| 520 if (Smi::IsValid(constant)) { | |
| 521 Condition are_equal = CheckSmiEqualsConstant(src, constant); | |
| 522 j(are_equal, on_equals); | |
| 523 } | |
| 524 } | |
| 525 | |
| 526 | |
| 527 void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src, | |
| 528 int constant, | |
| 529 Label* on_greater_equals) { | |
| 530 if (Smi::IsValid(constant)) { | |
| 531 Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant); | |
| 532 j(are_greater_equal, on_greater_equals); | |
| 533 } else if (constant < Smi::kMinValue) { | |
| 534 jmp(on_greater_equals); | |
| 535 } | |
| 536 } | |
| 537 | |
| 538 | |
| 539 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) { | |
| 540 Condition is_valid = CheckInteger32ValidSmiValue(src); | |
| 541 j(ReverseCondition(is_valid), on_invalid); | |
| 542 } | |
| 543 | |
| 544 | |
| 545 | |
| 546 void MacroAssembler::JumpIfNotBothSmi(Register src1, | |
| 547 Register src2, | |
| 548 Label* on_not_both_smi) { | |
| 549 Condition not_both_smi = CheckNotBothSmi(src1, src2); | |
| 550 j(not_both_smi, on_not_both_smi); | |
| 551 } | |
| 552 | |
| 553 Condition MacroAssembler::CheckSmi(Register src) { | 1334 Condition MacroAssembler::CheckSmi(Register src) { |
| 554 testb(src, Immediate(kSmiTagMask)); | 1335 testb(src, Immediate(kSmiTagMask)); |
| 555 return zero; | 1336 return zero; |
| 556 } | 1337 } |
| 557 | 1338 |
| 558 | |
| 559 Condition MacroAssembler::CheckNotSmi(Register src) { | |
| 560 ASSERT_EQ(0, kSmiTag); | |
| 561 testb(src, Immediate(kSmiTagMask)); | |
| 562 return not_zero; | |
| 563 } | |
| 564 | |
| 565 | |
| 566 Condition MacroAssembler::CheckPositiveSmi(Register src) { | 1339 Condition MacroAssembler::CheckPositiveSmi(Register src) { |
| 567 ASSERT_EQ(0, kSmiTag); | 1340 ASSERT_EQ(0, kSmiTag); |
| 568 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); | 1341 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); |
| 569 return zero; | 1342 return zero; |
| 570 } | 1343 } |
| 571 | 1344 |
| 572 | |
| 573 Condition MacroAssembler::CheckNotPositiveSmi(Register src) { | |
| 574 ASSERT_EQ(0, kSmiTag); | |
| 575 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); | |
| 576 return not_zero; | |
| 577 } | |
| 578 | |
| 579 | |
| 580 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { | 1345 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
| 581 if (first.is(second)) { | 1346 if (first.is(second)) { |
| 582 return CheckSmi(first); | 1347 return CheckSmi(first); |
| 583 } | 1348 } |
| 584 movl(kScratchRegister, first); | 1349 movl(kScratchRegister, first); |
| 585 orl(kScratchRegister, second); | 1350 orl(kScratchRegister, second); |
| 586 return CheckSmi(kScratchRegister); | 1351 return CheckSmi(kScratchRegister); |
| 587 } | 1352 } |
| 588 | 1353 |
| 589 | |
| 590 Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) { | |
| 591 ASSERT_EQ(0, kSmiTag); | |
| 592 if (first.is(second)) { | |
| 593 return CheckNotSmi(first); | |
| 594 } | |
| 595 movl(kScratchRegister, first); | |
| 596 or_(kScratchRegister, second); | |
| 597 return CheckNotSmi(kScratchRegister); | |
| 598 } | |
| 599 | |
| 600 | |
| 601 Condition MacroAssembler::CheckIsMinSmi(Register src) { | 1354 Condition MacroAssembler::CheckIsMinSmi(Register src) { |
| 602 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 1355 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 603 cmpl(src, Immediate(0x40000000)); | 1356 cmpl(src, Immediate(0x80000000u)); |
| 604 return equal; | 1357 return equal; |
| 605 } | 1358 } |
| 606 | 1359 |
| 607 Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) { | |
| 608 if (constant == 0) { | |
| 609 testl(src, src); | |
| 610 return zero; | |
| 611 } | |
| 612 if (Smi::IsValid(constant)) { | |
| 613 cmpl(src, Immediate(Smi::FromInt(constant))); | |
| 614 return zero; | |
| 615 } | |
| 616 // Can't be equal. | |
| 617 UNREACHABLE(); | |
| 618 return no_condition; | |
| 619 } | |
| 620 | |
| 621 | |
| 622 Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src, | |
| 623 int constant) { | |
| 624 if (constant == 0) { | |
| 625 testl(src, Immediate(static_cast<uint32_t>(0x80000000u))); | |
| 626 return positive; | |
| 627 } | |
| 628 if (Smi::IsValid(constant)) { | |
| 629 cmpl(src, Immediate(Smi::FromInt(constant))); | |
| 630 return greater_equal; | |
| 631 } | |
| 632 // Can't be equal. | |
| 633 UNREACHABLE(); | |
| 634 return no_condition; | |
| 635 } | |
| 636 | |
| 637 | |
| 638 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { | 1360 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
| 639 // A 32-bit integer value can be converted to a smi if it is in the | 1361 // A 32-bit integer value can be converted to a smi if it is in the |
| 640 // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit | 1362 // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit |
| 641 // representation have bits 30 and 31 be equal. | 1363 // representation have bits 30 and 31 be equal. |
| 642 cmpl(src, Immediate(0xC0000000u)); | 1364 cmpl(src, Immediate(0xC0000000u)); |
| 643 return positive; | 1365 return positive; |
| 644 } | 1366 } |
| 645 | 1367 |
| 646 | 1368 |
| 647 void MacroAssembler::SmiNeg(Register dst, | 1369 void MacroAssembler::SmiNeg(Register dst, |
| 648 Register src, | 1370 Register src, |
| 649 Label* on_not_smi_result) { | 1371 Label* on_smi_result) { |
| 650 if (!dst.is(src)) { | 1372 if (!dst.is(src)) { |
| 651 movl(dst, src); | 1373 movl(dst, src); |
| 652 } | 1374 } |
| 653 negl(dst); | 1375 negl(dst); |
| 654 testl(dst, Immediate(0x7fffffff)); | 1376 testl(dst, Immediate(0x7fffffff)); |
| 655 // If the result is zero or 0x80000000, negation failed to create a smi. | 1377 // If the result is zero or 0x80000000, negation failed to create a smi. |
| 656 j(equal, on_not_smi_result); | 1378 j(not_equal, on_smi_result); |
| 657 } | 1379 } |
| 658 | 1380 |
| 659 | 1381 |
| 660 void MacroAssembler::SmiAdd(Register dst, | 1382 void MacroAssembler::SmiAdd(Register dst, |
| 661 Register src1, | 1383 Register src1, |
| 662 Register src2, | 1384 Register src2, |
| 663 Label* on_not_smi_result) { | 1385 Label* on_not_smi_result) { |
| 664 ASSERT(!dst.is(src2)); | 1386 ASSERT(!dst.is(src2)); |
| 665 if (!dst.is(src1)) { | 1387 if (!dst.is(src1)) { |
| 666 movl(dst, src1); | 1388 movl(dst, src1); |
| 667 } | 1389 } |
| 668 addl(dst, src2); | 1390 addl(dst, src2); |
| 669 if (!dst.is(src1)) { | 1391 if (!dst.is(src1)) { |
| 670 j(overflow, on_not_smi_result); | 1392 j(overflow, on_not_smi_result); |
| 671 } else { | 1393 } else { |
| 672 Label smi_result; | 1394 Label smi_result; |
| 673 j(no_overflow, &smi_result); | 1395 j(no_overflow, &smi_result); |
| 674 // Restore src1. | 1396 // Restore src1. |
| 675 subl(src1, src2); | 1397 subl(src1, src2); |
| 676 jmp(on_not_smi_result); | 1398 jmp(on_not_smi_result); |
| 677 bind(&smi_result); | 1399 bind(&smi_result); |
| 678 } | 1400 } |
| 679 } | 1401 } |
| 680 | 1402 |
| 681 | 1403 |
| 682 | |
| 683 void MacroAssembler::SmiSub(Register dst, | 1404 void MacroAssembler::SmiSub(Register dst, |
| 684 Register src1, | 1405 Register src1, |
| 685 Register src2, | 1406 Register src2, |
| 686 Label* on_not_smi_result) { | 1407 Label* on_not_smi_result) { |
| 687 ASSERT(!dst.is(src2)); | 1408 ASSERT(!dst.is(src2)); |
| 688 if (!dst.is(src1)) { | 1409 if (!dst.is(src1)) { |
| 689 movl(dst, src1); | 1410 movl(dst, src1); |
| 690 } | 1411 } |
| 691 subl(dst, src2); | 1412 subl(dst, src2); |
| 692 if (!dst.is(src1)) { | 1413 if (!dst.is(src1)) { |
| 693 j(overflow, on_not_smi_result); | 1414 j(overflow, on_not_smi_result); |
| 694 } else { | 1415 } else { |
| 695 Label smi_result; | 1416 Label smi_result; |
| 696 j(no_overflow, &smi_result); | 1417 j(no_overflow, &smi_result); |
| 697 // Restore src1. | 1418 // Restore src1. |
| 698 addl(src1, src2); | 1419 addl(src1, src2); |
| 699 jmp(on_not_smi_result); | 1420 jmp(on_not_smi_result); |
| 700 bind(&smi_result); | 1421 bind(&smi_result); |
| 701 } | 1422 } |
| 702 } | 1423 } |
| 703 | 1424 |
| 704 | 1425 |
| 705 void MacroAssembler::SmiMul(Register dst, | 1426 void MacroAssembler::SmiMul(Register dst, |
| 706 Register src1, | 1427 Register src1, |
| 707 Register src2, | 1428 Register src2, |
| 708 Label* on_not_smi_result) { | 1429 Label* on_not_smi_result) { |
| 709 ASSERT(!dst.is(src2)); | 1430 ASSERT(!dst.is(src2)); |
| 710 | 1431 |
| 711 if (dst.is(src1)) { | 1432 if (dst.is(src1)) { |
| 1433 // Copy src1 before overwriting. |
| 712 movq(kScratchRegister, src1); | 1434 movq(kScratchRegister, src1); |
| 713 } | 1435 } |
| 714 SmiToInteger32(dst, src1); | 1436 SmiToInteger32(dst, src1); |
| 715 | 1437 |
| 716 imull(dst, src2); | 1438 imull(dst, src2); |
| 717 j(overflow, on_not_smi_result); | 1439 j(overflow, on_not_smi_result); |
| 718 | 1440 |
| 719 // Check for negative zero result. If product is zero, and one | 1441 // Check for negative zero result. If product is zero, and one |
| 720 // argument is negative, go to slow case. The frame is unchanged | 1442 // argument is negative, go to slow case. The frame is unchanged |
| 721 // in this block, so local control flow can use a Label rather | 1443 // in this block, so local control flow can use a Label rather |
| 722 // than a JumpTarget. | 1444 // than a JumpTarget. |
| 723 Label non_zero_result; | 1445 Label non_zero_result; |
| 724 testl(dst, dst); | 1446 testl(dst, dst); |
| 725 j(not_zero, &non_zero_result); | 1447 j(not_zero, &non_zero_result); |
| 726 | 1448 |
| 727 // Test whether either operand is negative (the other must be zero). | 1449 // Test whether either operand is negative (the other must be zero). |
| 1450 if (!dst.is(src1)) { |
| 1451 movl(kScratchRegister, src1); |
| 1452 } |
| 728 orl(kScratchRegister, src2); | 1453 orl(kScratchRegister, src2); |
| 729 j(negative, on_not_smi_result); | 1454 j(negative, on_not_smi_result); |
| 1455 |
| 730 bind(&non_zero_result); | 1456 bind(&non_zero_result); |
| 731 } | 1457 } |
| 732 | 1458 |
| 733 | 1459 |
| 734 void MacroAssembler::SmiTryAddConstant(Register dst, | 1460 void MacroAssembler::SmiTryAddConstant(Register dst, |
| 735 Register src, | 1461 Register src, |
| 736 int32_t constant, | 1462 Smi* constant, |
| 737 Label* on_not_smi_result) { | 1463 Label* on_not_smi_result) { |
| 738 // Does not assume that src is a smi. | 1464 // Does not assume that src is a smi. |
| 739 ASSERT_EQ(1, kSmiTagMask); | 1465 ASSERT_EQ(1, kSmiTagMask); |
| 740 ASSERT_EQ(0, kSmiTag); | 1466 ASSERT_EQ(0, kSmiTag); |
| 741 ASSERT(Smi::IsValid(constant)); | |
| 742 | 1467 |
| 743 Register tmp = (src.is(dst) ? kScratchRegister : dst); | 1468 Register tmp = (src.is(dst) ? kScratchRegister : dst); |
| 744 movl(tmp, src); | 1469 movl(tmp, src); |
| 745 addl(tmp, Immediate(Smi::FromInt(constant))); | 1470 addl(tmp, Immediate(SmiValue(constant))); |
| 746 if (tmp.is(kScratchRegister)) { | 1471 if (tmp.is(kScratchRegister)) { |
| 747 j(overflow, on_not_smi_result); | 1472 j(overflow, on_not_smi_result); |
| 748 testl(tmp, Immediate(kSmiTagMask)); | 1473 testl(tmp, Immediate(kSmiTagMask)); |
| 749 j(not_zero, on_not_smi_result); | 1474 j(not_zero, on_not_smi_result); |
| 750 movl(dst, tmp); | 1475 movl(dst, tmp); |
| 751 } else { | 1476 } else { |
| 752 movl(kScratchRegister, Immediate(kSmiTagMask)); | 1477 movl(kScratchRegister, Immediate(kSmiTagMask)); |
| 753 cmovl(overflow, dst, kScratchRegister); | 1478 cmovl(overflow, dst, kScratchRegister); |
| 754 testl(dst, kScratchRegister); | 1479 testl(dst, kScratchRegister); |
| 755 j(not_zero, on_not_smi_result); | 1480 j(not_zero, on_not_smi_result); |
| 756 } | 1481 } |
| 757 } | 1482 } |
| 758 | 1483 |
| 759 | 1484 |
| 760 void MacroAssembler::SmiAddConstant(Register dst, | 1485 void MacroAssembler::SmiAddConstant(Register dst, |
| 761 Register src, | 1486 Register src, |
| 762 int32_t constant, | 1487 Smi* constant) { |
| 1488 ASSERT_EQ(1, kSmiTagMask); |
| 1489 ASSERT_EQ(0, kSmiTag); |
| 1490 int32_t smi_value = SmiValue(constant); |
| 1491 if (dst.is(src)) { |
| 1492 addl(dst, Immediate(smi_value)); |
| 1493 } else { |
| 1494 lea(dst, Operand(src, smi_value)); |
| 1495 } |
| 1496 } |
| 1497 |
| 1498 |
| 1499 void MacroAssembler::SmiAddConstant(Register dst, |
| 1500 Register src, |
| 1501 Smi* constant, |
| 763 Label* on_not_smi_result) { | 1502 Label* on_not_smi_result) { |
| 764 ASSERT(Smi::IsValid(constant)); | 1503 ASSERT_EQ(1, kSmiTagMask); |
| 765 if (on_not_smi_result == NULL) { | 1504 ASSERT_EQ(0, kSmiTag); |
| 766 if (dst.is(src)) { | 1505 int32_t smi_value = SmiValue(constant); |
| 767 movl(dst, src); | 1506 if (!dst.is(src)) { |
| 768 } else { | 1507 movl(dst, src); |
| 769 lea(dst, Operand(src, constant << kSmiTagSize)); | 1508 addl(dst, Immediate(smi_value)); |
| 770 } | 1509 j(overflow, on_not_smi_result); |
| 771 } else { | 1510 } else { |
| 772 if (!dst.is(src)) { | 1511 addl(dst, Immediate(smi_value)); |
| 773 movl(dst, src); | 1512 Label result_ok; |
| 774 } | 1513 j(no_overflow, &result_ok); |
| 775 addl(dst, Immediate(Smi::FromInt(constant))); | 1514 subl(dst, Immediate(smi_value)); |
| 776 if (!dst.is(src)) { | 1515 jmp(on_not_smi_result); |
| 777 j(overflow, on_not_smi_result); | 1516 bind(&result_ok); |
| 778 } else { | |
| 779 Label result_ok; | |
| 780 j(no_overflow, &result_ok); | |
| 781 subl(dst, Immediate(Smi::FromInt(constant))); | |
| 782 jmp(on_not_smi_result); | |
| 783 bind(&result_ok); | |
| 784 } | |
| 785 } | 1517 } |
| 786 } | 1518 } |
| 787 | 1519 |
| 788 | 1520 |
| 789 void MacroAssembler::SmiSubConstant(Register dst, | 1521 void MacroAssembler::SmiSubConstant(Register dst, |
| 790 Register src, | 1522 Register src, |
| 791 int32_t constant, | 1523 Smi* constant) { |
| 1524 ASSERT_EQ(1, kSmiTagMask); |
| 1525 ASSERT_EQ(0, kSmiTag); |
| 1526 if (!dst.is(src)) { |
| 1527 movl(dst, src); |
| 1528 } |
| 1529 subl(dst, Immediate(SmiValue(constant))); |
| 1530 } |
| 1531 |
| 1532 |
| 1533 void MacroAssembler::SmiSubConstant(Register dst, |
| 1534 Register src, |
| 1535 Smi* constant, |
| 792 Label* on_not_smi_result) { | 1536 Label* on_not_smi_result) { |
| 793 ASSERT(Smi::IsValid(constant)); | 1537 ASSERT_EQ(1, kSmiTagMask); |
| 794 Smi* smi_value = Smi::FromInt(constant); | 1538 ASSERT_EQ(0, kSmiTag); |
| 1539 int32_t smi_value = SmiValue(constant); |
| 795 if (dst.is(src)) { | 1540 if (dst.is(src)) { |
| 796 // Optimistic subtract - may change value of dst register, | 1541 // Optimistic subtract - may change value of dst register, |
| 797 // if it has garbage bits in the higher half, but will not change | 1542 // if it has garbage bits in the higher half, but will not change |
| 798 // the value as a tagged smi. | 1543 // the value as a tagged smi. |
| 799 subl(dst, Immediate(smi_value)); | 1544 subl(dst, Immediate(smi_value)); |
| 800 if (on_not_smi_result != NULL) { | 1545 Label add_success; |
| 801 Label add_success; | 1546 j(no_overflow, &add_success); |
| 802 j(no_overflow, &add_success); | 1547 addl(dst, Immediate(smi_value)); |
| 803 addl(dst, Immediate(smi_value)); | 1548 jmp(on_not_smi_result); |
| 804 jmp(on_not_smi_result); | 1549 bind(&add_success); |
| 805 bind(&add_success); | |
| 806 } | |
| 807 } else { | 1550 } else { |
| 808 UNIMPLEMENTED(); // Not used yet. | 1551 movl(dst, src); |
| 1552 subl(dst, Immediate(smi_value)); |
| 1553 j(overflow, on_not_smi_result); |
| 809 } | 1554 } |
| 810 } | 1555 } |
| 811 | 1556 |
| 812 | 1557 |
| 813 void MacroAssembler::SmiDiv(Register dst, | 1558 void MacroAssembler::SmiDiv(Register dst, |
| 814 Register src1, | 1559 Register src1, |
| 815 Register src2, | 1560 Register src2, |
| 816 Label* on_not_smi_result) { | 1561 Label* on_not_smi_result) { |
| 817 ASSERT(!src2.is(rax)); | 1562 ASSERT(!src2.is(rax)); |
| 818 ASSERT(!src2.is(rdx)); | 1563 ASSERT(!src2.is(rdx)); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 906 | 1651 |
| 907 | 1652 |
| 908 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { | 1653 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
| 909 if (!dst.is(src1)) { | 1654 if (!dst.is(src1)) { |
| 910 movl(dst, src1); | 1655 movl(dst, src1); |
| 911 } | 1656 } |
| 912 and_(dst, src2); | 1657 and_(dst, src2); |
| 913 } | 1658 } |
| 914 | 1659 |
| 915 | 1660 |
| 916 void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) { | 1661 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { |
| 917 ASSERT(Smi::IsValid(constant)); | |
| 918 if (!dst.is(src)) { | 1662 if (!dst.is(src)) { |
| 919 movl(dst, src); | 1663 movl(dst, src); |
| 920 } | 1664 } |
| 921 and_(dst, Immediate(Smi::FromInt(constant))); | 1665 int32_t smi_value = SmiValue(constant); |
| 1666 and_(dst, Immediate(smi_value)); |
| 922 } | 1667 } |
| 923 | 1668 |
| 924 | 1669 |
| 925 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { | 1670 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
| 926 if (!dst.is(src1)) { | 1671 if (!dst.is(src1)) { |
| 927 movl(dst, src1); | 1672 movl(dst, src1); |
| 928 } | 1673 } |
| 929 or_(dst, src2); | 1674 or_(dst, src2); |
| 930 } | 1675 } |
| 931 | 1676 |
| 932 | 1677 |
| 933 void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) { | 1678 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { |
| 934 ASSERT(Smi::IsValid(constant)); | |
| 935 if (!dst.is(src)) { | 1679 if (!dst.is(src)) { |
| 936 movl(dst, src); | 1680 movl(dst, src); |
| 937 } | 1681 } |
| 938 or_(dst, Immediate(Smi::FromInt(constant))); | 1682 int32_t smi_value = SmiValue(constant); |
| 1683 or_(dst, Immediate(smi_value)); |
| 939 } | 1684 } |
| 940 | 1685 |
| 1686 |
| 941 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { | 1687 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
| 942 if (!dst.is(src1)) { | 1688 if (!dst.is(src1)) { |
| 943 movl(dst, src1); | 1689 movl(dst, src1); |
| 944 } | 1690 } |
| 945 xor_(dst, src2); | 1691 xor_(dst, src2); |
| 946 } | 1692 } |
| 947 | 1693 |
| 948 | 1694 |
| 949 void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) { | 1695 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { |
| 950 ASSERT(Smi::IsValid(constant)); | |
| 951 if (!dst.is(src)) { | 1696 if (!dst.is(src)) { |
| 952 movl(dst, src); | 1697 movl(dst, src); |
| 953 } | 1698 } |
| 954 xor_(dst, Immediate(Smi::FromInt(constant))); | 1699 int32_t smi_value = SmiValue(constant); |
| 1700 xor_(dst, Immediate(smi_value)); |
| 955 } | 1701 } |
| 956 | 1702 |
| 957 | 1703 |
| 958 | |
| 959 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, | 1704 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
| 960 Register src, | 1705 Register src, |
| 961 int shift_value) { | 1706 int shift_value) { |
| 962 if (shift_value > 0) { | 1707 if (shift_value > 0) { |
| 963 if (dst.is(src)) { | 1708 if (dst.is(src)) { |
| 964 sarl(dst, Immediate(shift_value)); | 1709 sarl(dst, Immediate(shift_value)); |
| 965 and_(dst, Immediate(~kSmiTagMask)); | 1710 and_(dst, Immediate(~kSmiTagMask)); |
| 966 } else { | 1711 } else { |
| 967 UNIMPLEMENTED(); // Not used. | 1712 UNIMPLEMENTED(); // Not used. |
| 968 } | 1713 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 994 addl(dst, dst); | 1739 addl(dst, dst); |
| 995 } | 1740 } |
| 996 } | 1741 } |
| 997 | 1742 |
| 998 | 1743 |
| 999 void MacroAssembler::SmiShiftLeftConstant(Register dst, | 1744 void MacroAssembler::SmiShiftLeftConstant(Register dst, |
| 1000 Register src, | 1745 Register src, |
| 1001 int shift_value, | 1746 int shift_value, |
| 1002 Label* on_not_smi_result) { | 1747 Label* on_not_smi_result) { |
| 1003 if (dst.is(src)) { | 1748 if (dst.is(src)) { |
| 1004 UNIMPLEMENTED(); // Not used. | 1749 if (shift_value > 0) { |
| 1750 movq(kScratchRegister, src); |
| 1751 // Treat scratch as an untagged integer value equal to two times the |
| 1752 // smi value of src, i.e., already shifted left by one. |
| 1753 if (shift_value > 1) { |
| 1754 shll(kScratchRegister, Immediate(shift_value - 1)); |
| 1755 } |
| 1756 JumpIfNotValidSmiValue(kScratchRegister, on_not_smi_result); |
| 1757 // Convert int result to Smi, checking that it is in smi range. |
| 1758 ASSERT(kSmiTagSize == 1); // adjust code if not the case |
| 1759 Integer32ToSmi(dst, kScratchRegister); |
| 1760 } |
| 1005 } else { | 1761 } else { |
| 1006 movl(dst, src); | 1762 movl(dst, src); |
| 1007 if (shift_value > 0) { | 1763 if (shift_value > 0) { |
| 1008 // Treat dst as an untagged integer value equal to two times the | 1764 // Treat dst as an untagged integer value equal to two times the |
| 1009 // smi value of src, i.e., already shifted left by one. | 1765 // smi value of src, i.e., already shifted left by one. |
| 1010 if (shift_value > 1) { | 1766 if (shift_value > 1) { |
| 1011 shll(dst, Immediate(shift_value - 1)); | 1767 shll(dst, Immediate(shift_value - 1)); |
| 1012 } | 1768 } |
| 1013 // Convert int result to Smi, checking that it is in smi range. | 1769 // Convert int result to Smi, checking that it is in smi range. |
| 1014 ASSERT(kSmiTagSize == 1); // adjust code if not the case | 1770 ASSERT(kSmiTagSize == 1); // adjust code if not the case |
| 1015 Integer32ToSmi(dst, dst, on_not_smi_result); | 1771 Integer32ToSmi(dst, dst, on_not_smi_result); |
| 1016 } | 1772 } |
| 1017 } | 1773 } |
| 1018 } | 1774 } |
| 1019 | 1775 |
| 1020 | 1776 |
| 1021 void MacroAssembler::SmiShiftLeft(Register dst, | 1777 void MacroAssembler::SmiShiftLeft(Register dst, |
| 1022 Register src1, | 1778 Register src1, |
| 1023 Register src2, | 1779 Register src2, |
| 1024 Label* on_not_smi_result) { | 1780 Label* on_not_smi_result) { |
| 1025 ASSERT(!dst.is(rcx)); | 1781 ASSERT(!dst.is(rcx)); |
| 1026 Label result_ok; | 1782 Label result_ok; |
| 1027 // Untag both operands. | 1783 // Untag both operands. |
| 1784 if (dst.is(src1) || src1.is(rcx)) { |
| 1785 movq(kScratchRegister, src1); |
| 1786 } |
| 1028 SmiToInteger32(dst, src1); | 1787 SmiToInteger32(dst, src1); |
| 1029 SmiToInteger32(rcx, src2); | 1788 SmiToInteger32(rcx, src2); |
| 1030 shll(dst); | 1789 shll(dst); |
| 1031 // Check that the *signed* result fits in a smi. | 1790 // Check that the *signed* result fits in a smi. |
| 1032 Condition is_valid = CheckInteger32ValidSmiValue(dst); | 1791 Condition is_valid = CheckInteger32ValidSmiValue(dst); |
| 1033 j(is_valid, &result_ok); | 1792 j(is_valid, &result_ok); |
| 1034 // Restore the relevant bits of the source registers | 1793 // Restore the relevant bits of the source registers |
| 1035 // and call the slow version. | 1794 // and call the slow version. |
| 1036 if (dst.is(src1)) { | 1795 if (dst.is(src1) || src1.is(rcx)) { |
| 1037 shrl(dst); | 1796 movq(src1, kScratchRegister); |
| 1038 Integer32ToSmi(dst, dst); | |
| 1039 } | 1797 } |
| 1040 Integer32ToSmi(rcx, rcx); | 1798 if (src2.is(rcx)) { |
| 1799 Integer32ToSmi(rcx, rcx); |
| 1800 } |
| 1041 jmp(on_not_smi_result); | 1801 jmp(on_not_smi_result); |
| 1042 bind(&result_ok); | 1802 bind(&result_ok); |
| 1043 Integer32ToSmi(dst, dst); | 1803 Integer32ToSmi(dst, dst); |
| 1044 } | 1804 } |
| 1045 | 1805 |
| 1046 | 1806 |
| 1047 void MacroAssembler::SmiShiftLogicalRight(Register dst, | 1807 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
| 1048 Register src1, | 1808 Register src1, |
| 1049 Register src2, | 1809 Register src2, |
| 1050 Label* on_not_smi_result) { | 1810 Label* on_not_smi_result) { |
| 1811 ASSERT(!dst.is(kScratchRegister)); |
| 1812 ASSERT(!src1.is(kScratchRegister)); |
| 1813 ASSERT(!src2.is(kScratchRegister)); |
| 1051 ASSERT(!dst.is(rcx)); | 1814 ASSERT(!dst.is(rcx)); |
| 1052 Label result_ok; | 1815 Label result_ok; |
| 1053 // Untag both operands. | 1816 // Untag both operands. |
| 1817 if (src1.is(rcx)) { |
| 1818 movq(kScratchRegister, src1); |
| 1819 } |
| 1054 SmiToInteger32(dst, src1); | 1820 SmiToInteger32(dst, src1); |
| 1055 SmiToInteger32(rcx, src2); | 1821 SmiToInteger32(rcx, src2); |
| 1056 | 1822 |
| 1057 shrl(dst); | 1823 shrl(dst); |
| 1058 // Check that the *unsigned* result fits in a smi. | 1824 // Check that the *unsigned* result fits in a smi. |
| 1059 // I.e., that it is a valid positive smi value. The positive smi | 1825 // I.e., that it is a valid positive smi value. The positive smi |
| 1060 // values are 0..0x3fffffff, i.e., neither of the top-most two | 1826 // values are 0..0x3fffffff, i.e., neither of the top-most two |
| 1061 // bits can be set. | 1827 // bits can be set. |
| 1062 // | 1828 // |
| 1063 // These two cases can only happen with shifts by 0 or 1 when | 1829 // These two cases can only happen with shifts by 0 or 1 when |
| 1064 // handed a valid smi. If the answer cannot be represented by a | 1830 // handed a valid smi. If the answer cannot be represented by a |
| 1065 // smi, restore the left and right arguments, and jump to slow | 1831 // smi, restore the left and right arguments, and jump to slow |
| 1066 // case. The low bit of the left argument may be lost, but only | 1832 // case. The low bit of the left argument may be lost, but only |
| 1067 // in a case where it is dropped anyway. | 1833 // in a case where it is dropped anyway. |
| 1068 testl(dst, Immediate(0xc0000000)); | 1834 testl(dst, Immediate(0xc0000000)); |
| 1069 j(zero, &result_ok); | 1835 j(zero, &result_ok); |
| 1070 if (dst.is(src1)) { | 1836 if (dst.is(src1)) { |
| 1071 shll(dst); | 1837 shll(dst); |
| 1072 Integer32ToSmi(dst, dst); | 1838 Integer32ToSmi(dst, dst); |
| 1839 } else if (src1.is(rcx)) { |
| 1840 movq(rcx, kScratchRegister); |
| 1841 } else if (src2.is(rcx)) { |
| 1842 Integer32ToSmi(src2, src2); |
| 1073 } | 1843 } |
| 1074 Integer32ToSmi(rcx, rcx); | |
| 1075 jmp(on_not_smi_result); | 1844 jmp(on_not_smi_result); |
| 1076 bind(&result_ok); | 1845 bind(&result_ok); |
| 1077 // Smi-tag the result in answer. | 1846 // Smi-tag the result in answer. |
| 1078 Integer32ToSmi(dst, dst); | 1847 Integer32ToSmi(dst, dst); |
| 1079 } | 1848 } |
| 1080 | 1849 |
| 1081 | 1850 |
| 1082 void MacroAssembler::SmiShiftArithmeticRight(Register dst, | 1851 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
| 1083 Register src1, | 1852 Register src1, |
| 1084 Register src2) { | 1853 Register src2) { |
| 1085 ASSERT(!dst.is(rcx)); | 1854 ASSERT(!dst.is(rcx)); |
| 1086 // Untag both operands. | 1855 // Untag both operands. |
| 1087 SmiToInteger32(dst, src1); | 1856 SmiToInteger32(dst, src1); |
| 1088 SmiToInteger32(rcx, src2); | 1857 SmiToInteger32(rcx, src2); |
| 1089 // Shift as integer. | 1858 // Shift as integer. |
| 1090 sarl(dst); | 1859 sarl(dst); |
| 1091 // Retag result. | 1860 // Retag result. |
| 1092 Integer32ToSmi(dst, dst); | 1861 Integer32ToSmi(dst, dst); |
| 1093 } | 1862 } |
| 1094 | 1863 |
| 1095 | 1864 |
| 1096 void MacroAssembler::SelectNonSmi(Register dst, | 1865 void MacroAssembler::SelectNonSmi(Register dst, |
| 1097 Register src1, | 1866 Register src1, |
| 1098 Register src2, | 1867 Register src2, |
| 1099 Label* on_not_smis) { | 1868 Label* on_not_smis) { |
| 1100 ASSERT(!dst.is(src1)); | 1869 ASSERT(!dst.is(src1)); |
| 1101 ASSERT(!dst.is(src2)); | 1870 ASSERT(!dst.is(src2)); |
| 1102 // Both operands must not be smis. | 1871 // Both operands must not be smis. |
| 1103 #ifdef DEBUG | 1872 #ifdef DEBUG |
| 1104 Condition not_both_smis = CheckNotBothSmi(src1, src2); | 1873 if (allow_stub_calls()) { |
| 1105 Check(not_both_smis, "Both registers were smis."); | 1874 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); |
| 1875 Check(not_both_smis, "Both registers were smis."); |
| 1876 } |
| 1106 #endif | 1877 #endif |
| 1107 ASSERT_EQ(0, kSmiTag); | 1878 ASSERT_EQ(0, kSmiTag); |
| 1108 ASSERT_EQ(0, Smi::FromInt(0)); | 1879 ASSERT_EQ(0, Smi::FromInt(0)); |
| 1109 movq(kScratchRegister, Immediate(kSmiTagMask)); | 1880 movq(kScratchRegister, Immediate(kSmiTagMask)); |
| 1110 and_(kScratchRegister, src1); | 1881 and_(kScratchRegister, src1); |
| 1111 testl(kScratchRegister, src2); | 1882 testl(kScratchRegister, src2); |
| 1112 j(not_zero, on_not_smis); | 1883 j(not_zero, on_not_smis); |
| 1113 // One operand is a smi. | 1884 // One operand is a smi. |
| 1114 | 1885 |
| 1115 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); | 1886 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1159 neg(dst); | 1930 neg(dst); |
| 1160 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); | 1931 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); |
| 1161 } | 1932 } |
| 1162 // Shift by shift-kSmiTagSize. | 1933 // Shift by shift-kSmiTagSize. |
| 1163 movl(dst, src); | 1934 movl(dst, src); |
| 1164 neg(dst); | 1935 neg(dst); |
| 1165 shl(dst, Immediate(shift - kSmiTagSize)); | 1936 shl(dst, Immediate(shift - kSmiTagSize)); |
| 1166 return SmiIndex(dst, times_1); | 1937 return SmiIndex(dst, times_1); |
| 1167 } | 1938 } |
| 1168 | 1939 |
| 1940 #endif // V8_LONG_SMI |
| 1169 | 1941 |
| 1170 | 1942 |
| 1943 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { |
| 1944 ASSERT_EQ(0, kSmiTag); |
| 1945 Condition smi = CheckSmi(src); |
| 1946 j(smi, on_smi); |
| 1947 } |
| 1948 |
| 1949 |
| 1950 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) { |
| 1951 Condition smi = CheckSmi(src); |
| 1952 j(NegateCondition(smi), on_not_smi); |
| 1953 } |
| 1954 |
| 1955 |
| 1956 void MacroAssembler::JumpIfNotPositiveSmi(Register src, |
| 1957 Label* on_not_positive_smi) { |
| 1958 Condition positive_smi = CheckPositiveSmi(src); |
| 1959 j(NegateCondition(positive_smi), on_not_positive_smi); |
| 1960 } |
| 1961 |
| 1962 |
| 1963 void MacroAssembler::JumpIfSmiEqualsConstant(Register src, |
| 1964 Smi* constant, |
| 1965 Label* on_equals) { |
| 1966 SmiCompare(src, constant); |
| 1967 j(equal, on_equals); |
| 1968 } |
| 1969 |
| 1970 |
| 1971 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) { |
| 1972 Condition is_valid = CheckInteger32ValidSmiValue(src); |
| 1973 j(NegateCondition(is_valid), on_invalid); |
| 1974 } |
| 1975 |
| 1976 |
| 1977 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2, |
| 1978 Label* on_not_both_smi) { |
| 1979 Condition both_smi = CheckBothSmi(src1, src2); |
| 1980 j(NegateCondition(both_smi), on_not_both_smi); |
| 1981 } |
| 1982 |
| 1171 bool MacroAssembler::IsUnsafeSmi(Smi* value) { | 1983 bool MacroAssembler::IsUnsafeSmi(Smi* value) { |
| 1172 return false; | 1984 return false; |
| 1173 } | 1985 } |
| 1174 | 1986 |
| 1987 |
| 1175 void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) { | 1988 void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) { |
| 1176 UNIMPLEMENTED(); | 1989 UNIMPLEMENTED(); |
| 1177 } | 1990 } |
| 1178 | 1991 |
| 1179 | 1992 |
| 1993 void MacroAssembler::Move(Register dst, Smi* source) { |
| 1994 if (IsUnsafeSmi(source)) { |
| 1995 LoadUnsafeSmi(dst, source); |
| 1996 } else { |
| 1997 Set(dst, reinterpret_cast<int64_t>(source)); |
| 1998 } |
| 1999 } |
| 2000 |
| 2001 |
| 2002 void MacroAssembler::Move(const Operand& dst, Smi* source) { |
| 2003 if (IsUnsafeSmi(source)) { |
| 2004 LoadUnsafeSmi(kScratchRegister, source); |
| 2005 movq(dst, kScratchRegister); |
| 2006 } else { |
| 2007 Set(dst, reinterpret_cast<int64_t>(source)); |
| 2008 } |
| 2009 } |
| 2010 |
| 2011 |
| 1180 void MacroAssembler::Move(Register dst, Handle<Object> source) { | 2012 void MacroAssembler::Move(Register dst, Handle<Object> source) { |
| 1181 ASSERT(!source->IsFailure()); | 2013 ASSERT(!source->IsFailure()); |
| 1182 if (source->IsSmi()) { | 2014 if (source->IsSmi()) { |
| 1183 if (IsUnsafeSmi(source)) { | 2015 Move(dst, Smi::cast(*source)); |
| 1184 LoadUnsafeSmi(dst, source); | |
| 1185 } else { | |
| 1186 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); | |
| 1187 movq(dst, Immediate(smi)); | |
| 1188 } | |
| 1189 } else { | 2016 } else { |
| 1190 movq(dst, source, RelocInfo::EMBEDDED_OBJECT); | 2017 movq(dst, source, RelocInfo::EMBEDDED_OBJECT); |
| 1191 } | 2018 } |
| 1192 } | 2019 } |
| 1193 | 2020 |
| 1194 | 2021 |
| 1195 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { | 2022 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { |
| 2023 ASSERT(!source->IsFailure()); |
| 1196 if (source->IsSmi()) { | 2024 if (source->IsSmi()) { |
| 1197 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); | 2025 Move(dst, Smi::cast(*source)); |
| 1198 movq(dst, Immediate(smi)); | |
| 1199 } else { | 2026 } else { |
| 1200 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); | 2027 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); |
| 1201 movq(dst, kScratchRegister); | 2028 movq(dst, kScratchRegister); |
| 1202 } | 2029 } |
| 1203 } | 2030 } |
| 1204 | 2031 |
| 1205 | 2032 |
| 1206 void MacroAssembler::Cmp(Register dst, Handle<Object> source) { | 2033 void MacroAssembler::Cmp(Register dst, Handle<Object> source) { |
| 1207 Move(kScratchRegister, source); | 2034 if (source->IsSmi()) { |
| 1208 cmpq(dst, kScratchRegister); | 2035 SmiCompare(dst, Smi::cast(*source)); |
| 2036 } else { |
| 2037 Move(kScratchRegister, source); |
| 2038 cmpq(dst, kScratchRegister); |
| 2039 } |
| 1209 } | 2040 } |
| 1210 | 2041 |
| 1211 | 2042 |
| 1212 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { | 2043 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { |
| 1213 if (source->IsSmi()) { | 2044 if (source->IsSmi()) { |
| 1214 if (IsUnsafeSmi(source)) { | 2045 SmiCompare(dst, Smi::cast(*source)); |
| 1215 LoadUnsafeSmi(kScratchRegister, source); | |
| 1216 cmpl(dst, kScratchRegister); | |
| 1217 } else { | |
| 1218 // For smi-comparison, it suffices to compare the low 32 bits. | |
| 1219 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); | |
| 1220 cmpl(dst, Immediate(smi)); | |
| 1221 } | |
| 1222 } else { | 2046 } else { |
| 1223 ASSERT(source->IsHeapObject()); | 2047 ASSERT(source->IsHeapObject()); |
| 1224 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); | 2048 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); |
| 1225 cmpq(dst, kScratchRegister); | 2049 cmpq(dst, kScratchRegister); |
| 1226 } | 2050 } |
| 1227 } | 2051 } |
| 1228 | 2052 |
| 1229 | 2053 |
| 1230 void MacroAssembler::Push(Handle<Object> source) { | 2054 void MacroAssembler::Push(Handle<Object> source) { |
| 1231 if (source->IsSmi()) { | 2055 if (source->IsSmi()) { |
| 1232 if (IsUnsafeSmi(source)) { | 2056 Push(Smi::cast(*source)); |
| 1233 LoadUnsafeSmi(kScratchRegister, source); | |
| 1234 push(kScratchRegister); | |
| 1235 } else { | |
| 1236 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); | |
| 1237 push(Immediate(smi)); | |
| 1238 } | |
| 1239 } else { | 2057 } else { |
| 1240 ASSERT(source->IsHeapObject()); | 2058 ASSERT(source->IsHeapObject()); |
| 1241 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); | 2059 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); |
| 1242 push(kScratchRegister); | 2060 push(kScratchRegister); |
| 1243 } | 2061 } |
| 1244 } | 2062 } |
| 1245 | 2063 |
| 1246 | 2064 |
| 1247 void MacroAssembler::Push(Smi* source) { | 2065 void MacroAssembler::Push(Smi* source) { |
| 1248 if (IsUnsafeSmi(source)) { | 2066 if (IsUnsafeSmi(source)) { |
| 1249 LoadUnsafeSmi(kScratchRegister, source); | 2067 LoadUnsafeSmi(kScratchRegister, source); |
| 1250 push(kScratchRegister); | 2068 push(kScratchRegister); |
| 1251 } else { | 2069 } else { |
| 1252 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source)); | 2070 intptr_t smi = reinterpret_cast<intptr_t>(source); |
| 1253 push(Immediate(smi)); | 2071 if (is_int32(smi)) { |
| 2072 push(Immediate(static_cast<int32_t>(smi))); |
| 2073 } else { |
| 2074 Set(kScratchRegister, smi); |
| 2075 push(kScratchRegister); |
| 2076 } |
| 1254 } | 2077 } |
| 1255 } | 2078 } |
| 1256 | 2079 |
| 2080 |
| 2081 void MacroAssembler::Test(const Operand& src, Smi* source) { |
| 2082 if (IsUnsafeSmi(source)) { |
| 2083 LoadUnsafeSmi(kScratchRegister, source); |
| 2084 testq(src, kScratchRegister); |
| 2085 } else { |
| 2086 intptr_t smi = reinterpret_cast<intptr_t>(source); |
| 2087 if (is_int32(smi)) { |
| 2088 testl(src, Immediate(static_cast<int32_t>(smi))); |
| 2089 } else { |
| 2090 Move(kScratchRegister, source); |
| 2091 testq(src, kScratchRegister); |
| 2092 } |
| 2093 } |
| 2094 } |
| 2095 |
| 1257 | 2096 |
| 1258 void MacroAssembler::Jump(ExternalReference ext) { | 2097 void MacroAssembler::Jump(ExternalReference ext) { |
| 1259 movq(kScratchRegister, ext); | 2098 movq(kScratchRegister, ext); |
| 1260 jmp(kScratchRegister); | 2099 jmp(kScratchRegister); |
| 1261 } | 2100 } |
| 1262 | 2101 |
| 1263 | 2102 |
| 1264 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { | 2103 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { |
| 1265 movq(kScratchRegister, destination, rmode); | 2104 movq(kScratchRegister, destination, rmode); |
| 1266 jmp(kScratchRegister); | 2105 jmp(kScratchRegister); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1294 | 2133 |
| 1295 void MacroAssembler::PushTryHandler(CodeLocation try_location, | 2134 void MacroAssembler::PushTryHandler(CodeLocation try_location, |
| 1296 HandlerType type) { | 2135 HandlerType type) { |
| 1297 // Adjust this code if not the case. | 2136 // Adjust this code if not the case. |
| 1298 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | 2137 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 1299 | 2138 |
| 1300 // The pc (return address) is already on TOS. This code pushes state, | 2139 // The pc (return address) is already on TOS. This code pushes state, |
| 1301 // frame pointer and current handler. Check that they are expected | 2140 // frame pointer and current handler. Check that they are expected |
| 1302 // next on the stack, in that order. | 2141 // next on the stack, in that order. |
| 1303 ASSERT_EQ(StackHandlerConstants::kStateOffset, | 2142 ASSERT_EQ(StackHandlerConstants::kStateOffset, |
| 1304 StackHandlerConstants::kPCOffset - kPointerSize); | 2143 StackHandlerConstants::kPCOffset - kPointerSize); |
| 1305 ASSERT_EQ(StackHandlerConstants::kFPOffset, | 2144 ASSERT_EQ(StackHandlerConstants::kFPOffset, |
| 1306 StackHandlerConstants::kStateOffset - kPointerSize); | 2145 StackHandlerConstants::kStateOffset - kPointerSize); |
| 1307 ASSERT_EQ(StackHandlerConstants::kNextOffset, | 2146 ASSERT_EQ(StackHandlerConstants::kNextOffset, |
| 1308 StackHandlerConstants::kFPOffset - kPointerSize); | 2147 StackHandlerConstants::kFPOffset - kPointerSize); |
| 1309 | 2148 |
| 1310 if (try_location == IN_JAVASCRIPT) { | 2149 if (try_location == IN_JAVASCRIPT) { |
| 1311 if (type == TRY_CATCH_HANDLER) { | 2150 if (type == TRY_CATCH_HANDLER) { |
| 1312 push(Immediate(StackHandler::TRY_CATCH)); | 2151 push(Immediate(StackHandler::TRY_CATCH)); |
| 1313 } else { | 2152 } else { |
| 1314 push(Immediate(StackHandler::TRY_FINALLY)); | 2153 push(Immediate(StackHandler::TRY_FINALLY)); |
| 1315 } | 2154 } |
| 1316 push(rbp); | 2155 push(rbp); |
| 1317 } else { | 2156 } else { |
| 1318 ASSERT(try_location == IN_JS_ENTRY); | 2157 ASSERT(try_location == IN_JS_ENTRY); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1344 } else { | 2183 } else { |
| 1345 shrl(rax, Immediate(8)); | 2184 shrl(rax, Immediate(8)); |
| 1346 and_(rax, Immediate(0xFF)); | 2185 and_(rax, Immediate(0xFF)); |
| 1347 push(rax); | 2186 push(rax); |
| 1348 popfq(); | 2187 popfq(); |
| 1349 } | 2188 } |
| 1350 pop(rax); | 2189 pop(rax); |
| 1351 } | 2190 } |
| 1352 | 2191 |
| 1353 | 2192 |
| 1354 void MacroAssembler::CmpObjectType(Register heap_object, | 2193 void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type, |
| 1355 InstanceType type, | |
| 1356 Register map) { | 2194 Register map) { |
| 1357 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset)); | 2195 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset)); |
| 1358 CmpInstanceType(map, type); | 2196 CmpInstanceType(map, type); |
| 1359 } | 2197 } |
| 1360 | 2198 |
| 1361 | 2199 |
| 1362 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { | 2200 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { |
| 1363 cmpb(FieldOperand(map, Map::kInstanceTypeOffset), | 2201 cmpb(FieldOperand(map, Map::kInstanceTypeOffset), |
| 1364 Immediate(static_cast<int8_t>(type))); | 2202 Immediate(static_cast<int8_t>(type))); |
| 1365 } | 2203 } |
| 1366 | 2204 |
| 1367 | 2205 |
| 1368 void MacroAssembler::TryGetFunctionPrototype(Register function, | 2206 void MacroAssembler::TryGetFunctionPrototype(Register function, |
| 1369 Register result, | 2207 Register result, Label* miss) { |
| 1370 Label* miss) { | |
| 1371 // Check that the receiver isn't a smi. | 2208 // Check that the receiver isn't a smi. |
| 1372 testl(function, Immediate(kSmiTagMask)); | 2209 testl(function, Immediate(kSmiTagMask)); |
| 1373 j(zero, miss); | 2210 j(zero, miss); |
| 1374 | 2211 |
| 1375 // Check that the function really is a function. | 2212 // Check that the function really is a function. |
| 1376 CmpObjectType(function, JS_FUNCTION_TYPE, result); | 2213 CmpObjectType(function, JS_FUNCTION_TYPE, result); |
| 1377 j(not_equal, miss); | 2214 j(not_equal, miss); |
| 1378 | 2215 |
| 1379 // Make sure that the function has an instance prototype. | 2216 // Make sure that the function has an instance prototype. |
| 1380 Label non_instance; | 2217 Label non_instance; |
| 1381 testb(FieldOperand(result, Map::kBitFieldOffset), | 2218 testb(FieldOperand(result, Map::kBitFieldOffset), Immediate(1 |
| 1382 Immediate(1 << Map::kHasNonInstancePrototype)); | 2219 << Map::kHasNonInstancePrototype)); |
| 1383 j(not_zero, &non_instance); | 2220 j(not_zero, &non_instance); |
| 1384 | 2221 |
| 1385 // Get the prototype or initial map from the function. | 2222 // Get the prototype or initial map from the function. |
| 1386 movq(result, | 2223 movq(result, |
| 1387 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2224 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 1388 | 2225 |
| 1389 // If the prototype or initial map is the hole, don't return it and | 2226 // If the prototype or initial map is the hole, don't return it and |
| 1390 // simply miss the cache instead. This will allow us to allocate a | 2227 // simply miss the cache instead. This will allow us to allocate a |
| 1391 // prototype object on-demand in the runtime system. | 2228 // prototype object on-demand in the runtime system. |
| 1392 CompareRoot(result, Heap::kTheHoleValueRootIndex); | 2229 CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1439 movq(kScratchRegister, ExternalReference(counter)); | 2276 movq(kScratchRegister, ExternalReference(counter)); |
| 1440 Operand operand(kScratchRegister, 0); | 2277 Operand operand(kScratchRegister, 0); |
| 1441 if (value == 1) { | 2278 if (value == 1) { |
| 1442 decl(operand); | 2279 decl(operand); |
| 1443 } else { | 2280 } else { |
| 1444 subl(operand, Immediate(value)); | 2281 subl(operand, Immediate(value)); |
| 1445 } | 2282 } |
| 1446 } | 2283 } |
| 1447 } | 2284 } |
| 1448 | 2285 |
| 1449 | |
| 1450 #ifdef ENABLE_DEBUGGER_SUPPORT | 2286 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 1451 | 2287 |
| 1452 void MacroAssembler::PushRegistersFromMemory(RegList regs) { | 2288 void MacroAssembler::PushRegistersFromMemory(RegList regs) { |
| 1453 ASSERT((regs & ~kJSCallerSaved) == 0); | 2289 ASSERT((regs & ~kJSCallerSaved) == 0); |
| 1454 // Push the content of the memory location to the stack. | 2290 // Push the content of the memory location to the stack. |
| 1455 for (int i = 0; i < kNumJSCallerSaved; i++) { | 2291 for (int i = 0; i < kNumJSCallerSaved; i++) { |
| 1456 int r = JSCallerSavedCode(i); | 2292 int r = JSCallerSavedCode(i); |
| 1457 if ((regs & (1 << r)) != 0) { | 2293 if ((regs & (1 << r)) != 0) { |
| 1458 ExternalReference reg_addr = | 2294 ExternalReference reg_addr = |
| 1459 ExternalReference(Debug_Address::Register(i)); | 2295 ExternalReference(Debug_Address::Register(i)); |
| 1460 movq(kScratchRegister, reg_addr); | 2296 movq(kScratchRegister, reg_addr); |
| 1461 push(Operand(kScratchRegister, 0)); | 2297 push(Operand(kScratchRegister, 0)); |
| 1462 } | 2298 } |
| 1463 } | 2299 } |
| 1464 } | 2300 } |
| 1465 | 2301 |
| 2302 |
| 1466 void MacroAssembler::SaveRegistersToMemory(RegList regs) { | 2303 void MacroAssembler::SaveRegistersToMemory(RegList regs) { |
| 1467 ASSERT((regs & ~kJSCallerSaved) == 0); | 2304 ASSERT((regs & ~kJSCallerSaved) == 0); |
| 1468 // Copy the content of registers to memory location. | 2305 // Copy the content of registers to memory location. |
| 1469 for (int i = 0; i < kNumJSCallerSaved; i++) { | 2306 for (int i = 0; i < kNumJSCallerSaved; i++) { |
| 1470 int r = JSCallerSavedCode(i); | 2307 int r = JSCallerSavedCode(i); |
| 1471 if ((regs & (1 << r)) != 0) { | 2308 if ((regs & (1 << r)) != 0) { |
| 1472 Register reg = { r }; | 2309 Register reg = {r}; |
| 1473 ExternalReference reg_addr = | 2310 ExternalReference reg_addr = |
| 1474 ExternalReference(Debug_Address::Register(i)); | 2311 ExternalReference(Debug_Address::Register(i)); |
| 1475 movq(kScratchRegister, reg_addr); | 2312 movq(kScratchRegister, reg_addr); |
| 1476 movq(Operand(kScratchRegister, 0), reg); | 2313 movq(Operand(kScratchRegister, 0), reg); |
| 1477 } | 2314 } |
| 1478 } | 2315 } |
| 1479 } | 2316 } |
| 1480 | 2317 |
| 1481 | 2318 |
| 1482 void MacroAssembler::RestoreRegistersFromMemory(RegList regs) { | 2319 void MacroAssembler::RestoreRegistersFromMemory(RegList regs) { |
| 1483 ASSERT((regs & ~kJSCallerSaved) == 0); | 2320 ASSERT((regs & ~kJSCallerSaved) == 0); |
| 1484 // Copy the content of memory location to registers. | 2321 // Copy the content of memory location to registers. |
| 1485 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { | 2322 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { |
| 1486 int r = JSCallerSavedCode(i); | 2323 int r = JSCallerSavedCode(i); |
| 1487 if ((regs & (1 << r)) != 0) { | 2324 if ((regs & (1 << r)) != 0) { |
| 1488 Register reg = { r }; | 2325 Register reg = {r}; |
| 1489 ExternalReference reg_addr = | 2326 ExternalReference reg_addr = |
| 1490 ExternalReference(Debug_Address::Register(i)); | 2327 ExternalReference(Debug_Address::Register(i)); |
| 1491 movq(kScratchRegister, reg_addr); | 2328 movq(kScratchRegister, reg_addr); |
| 1492 movq(reg, Operand(kScratchRegister, 0)); | 2329 movq(reg, Operand(kScratchRegister, 0)); |
| 1493 } | 2330 } |
| 1494 } | 2331 } |
| 1495 } | 2332 } |
| 1496 | 2333 |
| 1497 | 2334 |
| 1498 void MacroAssembler::PopRegistersToMemory(RegList regs) { | 2335 void MacroAssembler::PopRegistersToMemory(RegList regs) { |
| 1499 ASSERT((regs & ~kJSCallerSaved) == 0); | 2336 ASSERT((regs & ~kJSCallerSaved) == 0); |
| 1500 // Pop the content from the stack to the memory location. | 2337 // Pop the content from the stack to the memory location. |
| 1501 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { | 2338 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { |
| 1502 int r = JSCallerSavedCode(i); | 2339 int r = JSCallerSavedCode(i); |
| 1503 if ((regs & (1 << r)) != 0) { | 2340 if ((regs & (1 << r)) != 0) { |
| 1504 ExternalReference reg_addr = | 2341 ExternalReference reg_addr = |
| 1505 ExternalReference(Debug_Address::Register(i)); | 2342 ExternalReference(Debug_Address::Register(i)); |
| 1506 movq(kScratchRegister, reg_addr); | 2343 movq(kScratchRegister, reg_addr); |
| 1507 pop(Operand(kScratchRegister, 0)); | 2344 pop(Operand(kScratchRegister, 0)); |
| 1508 } | 2345 } |
| 1509 } | 2346 } |
| 1510 } | 2347 } |
| 1511 | 2348 |
| 1512 | 2349 |
| 1513 void MacroAssembler::CopyRegistersFromStackToMemory(Register base, | 2350 void MacroAssembler::CopyRegistersFromStackToMemory(Register base, |
| 1514 Register scratch, | 2351 Register scratch, |
| 1515 RegList regs) { | 2352 RegList regs) { |
| 1516 ASSERT(!scratch.is(kScratchRegister)); | 2353 ASSERT(!scratch.is(kScratchRegister)); |
| 1517 ASSERT(!base.is(kScratchRegister)); | 2354 ASSERT(!base.is(kScratchRegister)); |
| 1518 ASSERT(!base.is(scratch)); | 2355 ASSERT(!base.is(scratch)); |
| 1519 ASSERT((regs & ~kJSCallerSaved) == 0); | 2356 ASSERT((regs & ~kJSCallerSaved) == 0); |
| 1520 // Copy the content of the stack to the memory location and adjust base. | 2357 // Copy the content of the stack to the memory location and adjust base. |
| 1521 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { | 2358 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { |
| 1522 int r = JSCallerSavedCode(i); | 2359 int r = JSCallerSavedCode(i); |
| 1523 if ((regs & (1 << r)) != 0) { | 2360 if ((regs & (1 << r)) != 0) { |
| 1524 movq(scratch, Operand(base, 0)); | 2361 movq(scratch, Operand(base, 0)); |
| 1525 ExternalReference reg_addr = | 2362 ExternalReference reg_addr = |
| 1526 ExternalReference(Debug_Address::Register(i)); | 2363 ExternalReference(Debug_Address::Register(i)); |
| 1527 movq(kScratchRegister, reg_addr); | 2364 movq(kScratchRegister, reg_addr); |
| 1528 movq(Operand(kScratchRegister, 0), scratch); | 2365 movq(Operand(kScratchRegister, 0), scratch); |
| 1529 lea(base, Operand(base, kPointerSize)); | 2366 lea(base, Operand(base, kPointerSize)); |
| 1530 } | 2367 } |
| 1531 } | 2368 } |
| 1532 } | 2369 } |
| 1533 | 2370 |
| 1534 #endif // ENABLE_DEBUGGER_SUPPORT | 2371 #endif // ENABLE_DEBUGGER_SUPPORT |
| 1535 | 2372 |
| 1536 | 2373 |
| 1537 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { | 2374 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { |
| 1538 bool resolved; | 2375 bool resolved; |
| 1539 Handle<Code> code = ResolveBuiltin(id, &resolved); | 2376 Handle<Code> code = ResolveBuiltin(id, &resolved); |
| 1540 | 2377 |
| 1541 // Calls are not allowed in some stubs. | 2378 // Calls are not allowed in some stubs. |
| 1542 ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); | 2379 ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); |
| 1543 | 2380 |
| 1544 // Rely on the assertion to check that the number of provided | 2381 // Rely on the assertion to check that the number of provided |
| 1545 // arguments match the expected number of arguments. Fake a | 2382 // arguments match the expected number of arguments. Fake a |
| 1546 // parameter count to avoid emitting code to do the check. | 2383 // parameter count to avoid emitting code to do the check. |
| 1547 ParameterCount expected(0); | 2384 ParameterCount expected(0); |
| 1548 InvokeCode(Handle<Code>(code), expected, expected, | 2385 InvokeCode(Handle<Code>(code), expected, expected, RelocInfo::CODE_TARGET, |
| 1549 RelocInfo::CODE_TARGET, flag); | 2386 flag); |
| 1550 | 2387 |
| 1551 const char* name = Builtins::GetName(id); | 2388 const char* name = Builtins::GetName(id); |
| 1552 int argc = Builtins::GetArgumentsCount(id); | 2389 int argc = Builtins::GetArgumentsCount(id); |
| 1553 // The target address for the jump is stored as an immediate at offset | 2390 // The target address for the jump is stored as an immediate at offset |
| 1554 // kInvokeCodeAddressOffset. | 2391 // kInvokeCodeAddressOffset. |
| 1555 if (!resolved) { | 2392 if (!resolved) { |
| 1556 uint32_t flags = | 2393 uint32_t flags = |
| 1557 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | | 2394 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
| 1558 Bootstrapper::FixupFlagsUseCodeObject::encode(false); | 2395 Bootstrapper::FixupFlagsUseCodeObject::encode(false); |
| 1559 Unresolved entry = | 2396 Unresolved entry = |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1570 Label* done, | 2407 Label* done, |
| 1571 InvokeFlag flag) { | 2408 InvokeFlag flag) { |
| 1572 bool definitely_matches = false; | 2409 bool definitely_matches = false; |
| 1573 Label invoke; | 2410 Label invoke; |
| 1574 if (expected.is_immediate()) { | 2411 if (expected.is_immediate()) { |
| 1575 ASSERT(actual.is_immediate()); | 2412 ASSERT(actual.is_immediate()); |
| 1576 if (expected.immediate() == actual.immediate()) { | 2413 if (expected.immediate() == actual.immediate()) { |
| 1577 definitely_matches = true; | 2414 definitely_matches = true; |
| 1578 } else { | 2415 } else { |
| 1579 movq(rax, Immediate(actual.immediate())); | 2416 movq(rax, Immediate(actual.immediate())); |
| 1580 if (expected.immediate() == | 2417 if (expected.immediate() |
| 1581 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { | 2418 == SharedFunctionInfo::kDontAdaptArgumentsSentinel) { |
| 1582 // Don't worry about adapting arguments for built-ins that | 2419 // Don't worry about adapting arguments for built-ins that |
| 1583 // don't want that done. Skip adaption code by making it look | 2420 // don't want that done. Skip adaption code by making it look |
| 1584 // like we have a match between expected and actual number of | 2421 // like we have a match between expected and actual number of |
| 1585 // arguments. | 2422 // arguments. |
| 1586 definitely_matches = true; | 2423 definitely_matches = true; |
| 1587 } else { | 2424 } else { |
| 1588 movq(rbx, Immediate(expected.immediate())); | 2425 movq(rbx, Immediate(expected.immediate())); |
| 1589 } | 2426 } |
| 1590 } | 2427 } |
| 1591 } else { | 2428 } else { |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1661 bind(&done); | 2498 bind(&done); |
| 1662 } | 2499 } |
| 1663 | 2500 |
| 1664 | 2501 |
| 1665 void MacroAssembler::InvokeFunction(Register function, | 2502 void MacroAssembler::InvokeFunction(Register function, |
| 1666 const ParameterCount& actual, | 2503 const ParameterCount& actual, |
| 1667 InvokeFlag flag) { | 2504 InvokeFlag flag) { |
| 1668 ASSERT(function.is(rdi)); | 2505 ASSERT(function.is(rdi)); |
| 1669 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 2506 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 1670 movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); | 2507 movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); |
| 1671 movsxlq(rbx, | 2508 movsxlq(rbx, FieldOperand(rdx, |
| 1672 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); | 2509 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1673 movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); | 2510 movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); |
| 1674 // Advances rdx to the end of the Code object header, to the start of | 2511 // Advances rdx to the end of the Code object header, to the start of |
| 1675 // the executable code. | 2512 // the executable code. |
| 1676 lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); | 2513 lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); |
| 1677 | 2514 |
| 1678 ParameterCount expected(rbx); | 2515 ParameterCount expected(rbx); |
| 1679 InvokeCode(rdx, expected, actual, flag); | 2516 InvokeCode(rdx, expected, actual, flag); |
| 1680 } | 2517 } |
| 1681 | 2518 |
| 1682 | 2519 |
| 1683 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 2520 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 1684 push(rbp); | 2521 push(rbp); |
| 1685 movq(rbp, rsp); | 2522 movq(rbp, rsp); |
| 1686 push(rsi); // Context. | 2523 push(rsi); // Context. |
| 1687 push(Immediate(Smi::FromInt(type))); | 2524 Push(Smi::FromInt(type)); |
| 1688 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); | 2525 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
| 1689 push(kScratchRegister); | 2526 push(kScratchRegister); |
| 1690 if (FLAG_debug_code) { | 2527 if (FLAG_debug_code) { |
| 1691 movq(kScratchRegister, | 2528 movq(kScratchRegister, Factory::undefined_value(), |
| 1692 Factory::undefined_value(), | |
| 1693 RelocInfo::EMBEDDED_OBJECT); | 2529 RelocInfo::EMBEDDED_OBJECT); |
| 1694 cmpq(Operand(rsp, 0), kScratchRegister); | 2530 cmpq(Operand(rsp, 0), kScratchRegister); |
| 1695 Check(not_equal, "code object not properly patched"); | 2531 Check(not_equal, "code object not properly patched"); |
| 1696 } | 2532 } |
| 1697 } | 2533 } |
| 1698 | 2534 |
| 1699 | 2535 |
| 1700 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 2536 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 1701 if (FLAG_debug_code) { | 2537 if (FLAG_debug_code) { |
| 1702 movq(kScratchRegister, Immediate(Smi::FromInt(type))); | 2538 Move(kScratchRegister, Smi::FromInt(type)); |
| 1703 cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister); | 2539 cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister); |
| 1704 Check(equal, "stack frame types must match"); | 2540 Check(equal, "stack frame types must match"); |
| 1705 } | 2541 } |
| 1706 movq(rsp, rbp); | 2542 movq(rsp, rbp); |
| 1707 pop(rbp); | 2543 pop(rbp); |
| 1708 } | 2544 } |
| 1709 | 2545 |
| 1710 | 2546 |
| 1711 | |
| 1712 void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { | 2547 void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { |
| 1713 ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); | 2548 ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); |
| 1714 | 2549 |
| 1715 // Setup the frame structure on the stack. | 2550 // Setup the frame structure on the stack. |
| 1716 // All constants are relative to the frame pointer of the exit frame. | 2551 // All constants are relative to the frame pointer of the exit frame. |
| 1717 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); | 2552 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); |
| 1718 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); | 2553 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); |
| 1719 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); | 2554 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); |
| 1720 push(rbp); | 2555 push(rbp); |
| 1721 movq(rbp, rsp); | 2556 movq(rbp, rsp); |
| 1722 | 2557 |
| 1723 // Reserve room for entry stack pointer and push the debug marker. | 2558 // Reserve room for entry stack pointer and push the debug marker. |
| 1724 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); | 2559 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); |
| 1725 push(Immediate(0)); // saved entry sp, patched before call | 2560 push(Immediate(0)); // saved entry sp, patched before call |
| 1726 push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0)); | 2561 push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0)); |
| 1727 | 2562 |
| 1728 // Save the frame pointer and the context in top. | 2563 // Save the frame pointer and the context in top. |
| 1729 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); | 2564 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); |
| 1730 ExternalReference context_address(Top::k_context_address); | 2565 ExternalReference context_address(Top::k_context_address); |
| 1731 movq(r14, rax); // Backup rax before we use it. | 2566 movq(r14, rax); // Backup rax before we use it. |
| 1732 | 2567 |
| 1733 movq(rax, rbp); | 2568 movq(rax, rbp); |
| 1734 store_rax(c_entry_fp_address); | 2569 store_rax(c_entry_fp_address); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1758 // Reserve space on stack for result and argument structures, if necessary. | 2593 // Reserve space on stack for result and argument structures, if necessary. |
| 1759 int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize; | 2594 int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize; |
| 1760 // Reserve space for the Arguments object. The Windows 64-bit ABI | 2595 // Reserve space for the Arguments object. The Windows 64-bit ABI |
| 1761 // requires us to pass this structure as a pointer to its location on | 2596 // requires us to pass this structure as a pointer to its location on |
| 1762 // the stack. The structure contains 2 values. | 2597 // the stack. The structure contains 2 values. |
| 1763 int argument_stack_space = 2 * kPointerSize; | 2598 int argument_stack_space = 2 * kPointerSize; |
| 1764 // We also need backing space for 4 parameters, even though | 2599 // We also need backing space for 4 parameters, even though |
| 1765 // we only pass one or two parameter, and it is in a register. | 2600 // we only pass one or two parameter, and it is in a register. |
| 1766 int argument_mirror_space = 4 * kPointerSize; | 2601 int argument_mirror_space = 4 * kPointerSize; |
| 1767 int total_stack_space = | 2602 int total_stack_space = |
| 1768 argument_mirror_space + argument_stack_space + result_stack_space; | 2603 argument_mirror_space + argument_stack_space + result_stack_space; |
| 1769 subq(rsp, Immediate(total_stack_space)); | 2604 subq(rsp, Immediate(total_stack_space)); |
| 1770 #endif | 2605 #endif |
| 1771 | 2606 |
| 1772 // Get the required frame alignment for the OS. | 2607 // Get the required frame alignment for the OS. |
| 1773 static const int kFrameAlignment = OS::ActivationFrameAlignment(); | 2608 static const int kFrameAlignment = OS::ActivationFrameAlignment(); |
| 1774 if (kFrameAlignment > 0) { | 2609 if (kFrameAlignment > 0) { |
| 1775 ASSERT(IsPowerOf2(kFrameAlignment)); | 2610 ASSERT(IsPowerOf2(kFrameAlignment)); |
| 1776 movq(kScratchRegister, Immediate(-kFrameAlignment)); | 2611 movq(kScratchRegister, Immediate(-kFrameAlignment)); |
| 1777 and_(rsp, kScratchRegister); | 2612 and_(rsp, kScratchRegister); |
| 1778 } | 2613 } |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1827 // Push the return address to get ready to return. | 2662 // Push the return address to get ready to return. |
| 1828 push(rcx); | 2663 push(rcx); |
| 1829 | 2664 |
| 1830 // Clear the top frame. | 2665 // Clear the top frame. |
| 1831 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); | 2666 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); |
| 1832 movq(kScratchRegister, c_entry_fp_address); | 2667 movq(kScratchRegister, c_entry_fp_address); |
| 1833 movq(Operand(kScratchRegister, 0), Immediate(0)); | 2668 movq(Operand(kScratchRegister, 0), Immediate(0)); |
| 1834 } | 2669 } |
| 1835 | 2670 |
| 1836 | 2671 |
| 1837 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, | 2672 Register MacroAssembler::CheckMaps(JSObject* object, |
| 1838 JSObject* holder, Register holder_reg, | 2673 Register object_reg, |
| 2674 JSObject* holder, |
| 2675 Register holder_reg, |
| 1839 Register scratch, | 2676 Register scratch, |
| 1840 Label* miss) { | 2677 Label* miss) { |
| 1841 // Make sure there's no overlap between scratch and the other | 2678 // Make sure there's no overlap between scratch and the other |
| 1842 // registers. | 2679 // registers. |
| 1843 ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); | 2680 ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); |
| 1844 | 2681 |
| 1845 // Keep track of the current object in register reg. On the first | 2682 // Keep track of the current object in register reg. On the first |
| 1846 // iteration, reg is an alias for object_reg, on later iterations, | 2683 // iteration, reg is an alias for object_reg, on later iterations, |
| 1847 // it is an alias for holder_reg. | 2684 // it is an alias for holder_reg. |
| 1848 Register reg = object_reg; | 2685 Register reg = object_reg; |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1894 // The prototype is in old space; load it directly. | 2731 // The prototype is in old space; load it directly. |
| 1895 reg = holder_reg; // from now the object is in holder_reg | 2732 reg = holder_reg; // from now the object is in holder_reg |
| 1896 Move(reg, Handle<JSObject>(prototype)); | 2733 Move(reg, Handle<JSObject>(prototype)); |
| 1897 } | 2734 } |
| 1898 | 2735 |
| 1899 // Go to the next object in the prototype chain. | 2736 // Go to the next object in the prototype chain. |
| 1900 object = prototype; | 2737 object = prototype; |
| 1901 } | 2738 } |
| 1902 | 2739 |
| 1903 // Check the holder map. | 2740 // Check the holder map. |
| 1904 Cmp(FieldOperand(reg, HeapObject::kMapOffset), | 2741 Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map())); |
| 1905 Handle<Map>(holder->map())); | |
| 1906 j(not_equal, miss); | 2742 j(not_equal, miss); |
| 1907 | 2743 |
| 1908 // Log the check depth. | 2744 // Log the check depth. |
| 1909 LOG(IntEvent("check-maps-depth", depth)); | 2745 LOG(IntEvent("check-maps-depth", depth)); |
| 1910 | 2746 |
| 1911 // Perform security check for access to the global object and return | 2747 // Perform security check for access to the global object and return |
| 1912 // the holder register. | 2748 // the holder register. |
| 1913 ASSERT(object == holder); | 2749 ASSERT(object == holder); |
| 1914 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 2750 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
| 1915 if (object->IsJSGlobalProxy()) { | 2751 if (object->IsJSGlobalProxy()) { |
| 1916 CheckAccessGlobalProxy(reg, scratch, miss); | 2752 CheckAccessGlobalProxy(reg, scratch, miss); |
| 1917 } | 2753 } |
| 1918 return reg; | 2754 return reg; |
| 1919 } | 2755 } |
| 1920 | 2756 |
| 1921 | 2757 |
| 1922 | |
| 1923 | |
| 1924 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 2758 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 1925 Register scratch, | 2759 Register scratch, |
| 1926 Label* miss) { | 2760 Label* miss) { |
| 1927 Label same_contexts; | 2761 Label same_contexts; |
| 1928 | 2762 |
| 1929 ASSERT(!holder_reg.is(scratch)); | 2763 ASSERT(!holder_reg.is(scratch)); |
| 1930 ASSERT(!scratch.is(kScratchRegister)); | 2764 ASSERT(!scratch.is(kScratchRegister)); |
| 1931 // Load current lexical context from the stack frame. | 2765 // Load current lexical context from the stack frame. |
| 1932 movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); | 2766 movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 1933 | 2767 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1967 | 2801 |
| 1968 // Read the first word and compare to global_context_map(), | 2802 // Read the first word and compare to global_context_map(), |
| 1969 movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset)); | 2803 movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset)); |
| 1970 CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex); | 2804 CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex); |
| 1971 Check(equal, "JSGlobalObject::global_context should be a global context."); | 2805 Check(equal, "JSGlobalObject::global_context should be a global context."); |
| 1972 pop(holder_reg); | 2806 pop(holder_reg); |
| 1973 } | 2807 } |
| 1974 | 2808 |
| 1975 movq(kScratchRegister, | 2809 movq(kScratchRegister, |
| 1976 FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); | 2810 FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 1977 int token_offset = Context::kHeaderSize + | 2811 int token_offset = Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX |
| 1978 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 2812 * kPointerSize; |
| 1979 movq(scratch, FieldOperand(scratch, token_offset)); | 2813 movq(scratch, FieldOperand(scratch, token_offset)); |
| 1980 cmpq(scratch, FieldOperand(kScratchRegister, token_offset)); | 2814 cmpq(scratch, FieldOperand(kScratchRegister, token_offset)); |
| 1981 j(not_equal, miss); | 2815 j(not_equal, miss); |
| 1982 | 2816 |
| 1983 bind(&same_contexts); | 2817 bind(&same_contexts); |
| 1984 } | 2818 } |
| 1985 | 2819 |
| 1986 | 2820 |
| 1987 void MacroAssembler::LoadAllocationTopHelper(Register result, | 2821 void MacroAssembler::LoadAllocationTopHelper(Register result, |
| 1988 Register result_end, | 2822 Register result_end, |
| (...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2153 | 2987 |
| 2154 CodePatcher::~CodePatcher() { | 2988 CodePatcher::~CodePatcher() { |
| 2155 // Indicate that code has changed. | 2989 // Indicate that code has changed. |
| 2156 CPU::FlushICache(address_, size_); | 2990 CPU::FlushICache(address_, size_); |
| 2157 | 2991 |
| 2158 // Check that the code was patched as expected. | 2992 // Check that the code was patched as expected. |
| 2159 ASSERT(masm_.pc_ == address_ + size_); | 2993 ASSERT(masm_.pc_ == address_ + size_); |
| 2160 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2994 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2161 } | 2995 } |
| 2162 | 2996 |
| 2163 | |
| 2164 } } // namespace v8::internal | 2997 } } // namespace v8::internal |
| OLD | NEW |