Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(456)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 105503006: Replace movq with movp for X64 when the operand size is kPointerSize (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebased with bleeding_edge Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/debug-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
163 163
164 Label valid_result; 164 Label valid_result;
165 Label return_result; 165 Label return_result;
166 // If Invalid Operand or Zero Division exceptions are set, 166 // If Invalid Operand or Zero Division exceptions are set,
167 // return NaN. 167 // return NaN.
168 __ testb(rax, Immediate(5)); 168 __ testb(rax, Immediate(5));
169 __ j(zero, &valid_result); 169 __ j(zero, &valid_result);
170 __ fstp(0); // Drop result in st(0). 170 __ fstp(0); // Drop result in st(0).
171 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); 171 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
172 __ movq(rcx, kNaNValue); 172 __ movq(rcx, kNaNValue);
173 __ movq(Operand(rsp, kPointerSize), rcx); 173 __ movp(Operand(rsp, kPointerSize), rcx);
174 __ movsd(xmm0, Operand(rsp, kPointerSize)); 174 __ movsd(xmm0, Operand(rsp, kPointerSize));
175 __ jmp(&return_result); 175 __ jmp(&return_result);
176 176
177 // If result is valid, return that. 177 // If result is valid, return that.
178 __ bind(&valid_result); 178 __ bind(&valid_result);
179 __ fstp_d(Operand(rsp, kPointerSize)); 179 __ fstp_d(Operand(rsp, kPointerSize));
180 __ movsd(xmm0, Operand(rsp, kPointerSize)); 180 __ movsd(xmm0, Operand(rsp, kPointerSize));
181 181
182 // Clean up FPU stack and exceptions and return xmm0 182 // Clean up FPU stack and exceptions and return xmm0
183 __ bind(&return_result); 183 __ bind(&return_result);
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
216 // -- rcx : key 216 // -- rcx : key
217 // -- rdx : receiver 217 // -- rdx : receiver
218 // -- rsp[0] : return address 218 // -- rsp[0] : return address
219 // ----------------------------------- 219 // -----------------------------------
220 if (mode == TRACK_ALLOCATION_SITE) { 220 if (mode == TRACK_ALLOCATION_SITE) {
221 ASSERT(allocation_memento_found != NULL); 221 ASSERT(allocation_memento_found != NULL);
222 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found); 222 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
223 } 223 }
224 224
225 // Set transitioned map. 225 // Set transitioned map.
226 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); 226 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
227 __ RecordWriteField(rdx, 227 __ RecordWriteField(rdx,
228 HeapObject::kMapOffset, 228 HeapObject::kMapOffset,
229 rbx, 229 rbx,
230 rdi, 230 rdi,
231 kDontSaveFPRegs, 231 kDontSaveFPRegs,
232 EMIT_REMEMBERED_SET, 232 EMIT_REMEMBERED_SET,
233 OMIT_SMI_CHECK); 233 OMIT_SMI_CHECK);
234 } 234 }
235 235
236 236
237 void ElementsTransitionGenerator::GenerateSmiToDouble( 237 void ElementsTransitionGenerator::GenerateSmiToDouble(
238 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 238 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
239 // ----------- S t a t e ------------- 239 // ----------- S t a t e -------------
240 // -- rax : value 240 // -- rax : value
241 // -- rbx : target map 241 // -- rbx : target map
242 // -- rcx : key 242 // -- rcx : key
243 // -- rdx : receiver 243 // -- rdx : receiver
244 // -- rsp[0] : return address 244 // -- rsp[0] : return address
245 // ----------------------------------- 245 // -----------------------------------
246 // The fail label is not actually used since we do not allocate. 246 // The fail label is not actually used since we do not allocate.
247 Label allocated, new_backing_store, only_change_map, done; 247 Label allocated, new_backing_store, only_change_map, done;
248 248
249 if (mode == TRACK_ALLOCATION_SITE) { 249 if (mode == TRACK_ALLOCATION_SITE) {
250 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); 250 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
251 } 251 }
252 252
253 // Check for empty arrays, which only require a map transition and no changes 253 // Check for empty arrays, which only require a map transition and no changes
254 // to the backing store. 254 // to the backing store.
255 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); 255 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
256 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); 256 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
257 __ j(equal, &only_change_map); 257 __ j(equal, &only_change_map);
258 258
259 // Check backing store for COW-ness. For COW arrays we have to 259 // Check backing store for COW-ness. For COW arrays we have to
260 // allocate a new backing store. 260 // allocate a new backing store.
261 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); 261 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
262 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), 262 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
263 Heap::kFixedCOWArrayMapRootIndex); 263 Heap::kFixedCOWArrayMapRootIndex);
264 __ j(equal, &new_backing_store); 264 __ j(equal, &new_backing_store);
265 // Check if the backing store is in new-space. If not, we need to allocate 265 // Check if the backing store is in new-space. If not, we need to allocate
266 // a new one since the old one is in pointer-space. 266 // a new one since the old one is in pointer-space.
267 // If in new space, we can reuse the old backing store because it is 267 // If in new space, we can reuse the old backing store because it is
268 // the same size. 268 // the same size.
269 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store); 269 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
270 270
271 __ movq(r14, r8); // Destination array equals source array. 271 __ movp(r14, r8); // Destination array equals source array.
272 272
273 // r8 : source FixedArray 273 // r8 : source FixedArray
274 // r9 : elements array length 274 // r9 : elements array length
275 // r14: destination FixedDoubleArray 275 // r14: destination FixedDoubleArray
276 // Set backing store's map 276 // Set backing store's map
277 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); 277 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
278 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi); 278 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
279 279
280 __ bind(&allocated); 280 __ bind(&allocated);
281 // Set transitioned map. 281 // Set transitioned map.
282 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); 282 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
283 __ RecordWriteField(rdx, 283 __ RecordWriteField(rdx,
284 HeapObject::kMapOffset, 284 HeapObject::kMapOffset,
285 rbx, 285 rbx,
286 rdi, 286 rdi,
287 kDontSaveFPRegs, 287 kDontSaveFPRegs,
288 EMIT_REMEMBERED_SET, 288 EMIT_REMEMBERED_SET,
289 OMIT_SMI_CHECK); 289 OMIT_SMI_CHECK);
290 290
291 // Convert smis to doubles and holes to hole NaNs. The Array's length 291 // Convert smis to doubles and holes to hole NaNs. The Array's length
292 // remains unchanged. 292 // remains unchanged.
293 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset); 293 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
294 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); 294 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
295 295
296 Label loop, entry, convert_hole; 296 Label loop, entry, convert_hole;
297 __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64)); 297 __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
298 // r15: the-hole NaN 298 // r15: the-hole NaN
299 __ jmp(&entry); 299 __ jmp(&entry);
300 300
301 // Allocate new backing store. 301 // Allocate new backing store.
302 __ bind(&new_backing_store); 302 __ bind(&new_backing_store);
303 __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize)); 303 __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
304 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT); 304 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
305 // Set backing store's map 305 // Set backing store's map
306 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); 306 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
307 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi); 307 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
308 // Set receiver's backing store. 308 // Set receiver's backing store.
309 __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14); 309 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
310 __ movq(r11, r14); 310 __ movp(r11, r14);
311 __ RecordWriteField(rdx, 311 __ RecordWriteField(rdx,
312 JSObject::kElementsOffset, 312 JSObject::kElementsOffset,
313 r11, 313 r11,
314 r15, 314 r15,
315 kDontSaveFPRegs, 315 kDontSaveFPRegs,
316 EMIT_REMEMBERED_SET, 316 EMIT_REMEMBERED_SET,
317 OMIT_SMI_CHECK); 317 OMIT_SMI_CHECK);
318 // Set backing store's length. 318 // Set backing store's length.
319 __ Integer32ToSmi(r11, r9); 319 __ Integer32ToSmi(r11, r9);
320 __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11); 320 __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
321 __ jmp(&allocated); 321 __ jmp(&allocated);
322 322
323 __ bind(&only_change_map); 323 __ bind(&only_change_map);
324 // Set transitioned map. 324 // Set transitioned map.
325 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); 325 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
326 __ RecordWriteField(rdx, 326 __ RecordWriteField(rdx,
327 HeapObject::kMapOffset, 327 HeapObject::kMapOffset,
328 rbx, 328 rbx,
329 rdi, 329 rdi,
330 kDontSaveFPRegs, 330 kDontSaveFPRegs,
331 OMIT_REMEMBERED_SET, 331 OMIT_REMEMBERED_SET,
332 OMIT_SMI_CHECK); 332 OMIT_SMI_CHECK);
333 __ jmp(&done); 333 __ jmp(&done);
334 334
335 // Conversion loop. 335 // Conversion loop.
336 __ bind(&loop); 336 __ bind(&loop);
337 __ movq(rbx, 337 __ movp(rbx,
338 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize)); 338 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
339 // r9 : current element's index 339 // r9 : current element's index
340 // rbx: current element (smi-tagged) 340 // rbx: current element (smi-tagged)
341 __ JumpIfNotSmi(rbx, &convert_hole); 341 __ JumpIfNotSmi(rbx, &convert_hole);
342 __ SmiToInteger32(rbx, rbx); 342 __ SmiToInteger32(rbx, rbx);
343 __ Cvtlsi2sd(xmm0, rbx); 343 __ Cvtlsi2sd(xmm0, rbx);
344 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), 344 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
345 xmm0); 345 xmm0);
346 __ jmp(&entry); 346 __ jmp(&entry);
347 __ bind(&convert_hole); 347 __ bind(&convert_hole);
(...skipping 22 matching lines...) Expand all
370 // -- rsp[0] : return address 370 // -- rsp[0] : return address
371 // ----------------------------------- 371 // -----------------------------------
372 Label loop, entry, convert_hole, gc_required, only_change_map; 372 Label loop, entry, convert_hole, gc_required, only_change_map;
373 373
374 if (mode == TRACK_ALLOCATION_SITE) { 374 if (mode == TRACK_ALLOCATION_SITE) {
375 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); 375 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
376 } 376 }
377 377
378 // Check for empty arrays, which only require a map transition and no changes 378 // Check for empty arrays, which only require a map transition and no changes
379 // to the backing store. 379 // to the backing store.
380 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); 380 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
381 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); 381 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
382 __ j(equal, &only_change_map); 382 __ j(equal, &only_change_map);
383 383
384 __ push(rax); 384 __ push(rax);
385 385
386 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); 386 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
387 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); 387 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
388 // r8 : source FixedDoubleArray 388 // r8 : source FixedDoubleArray
389 // r9 : number of elements 389 // r9 : number of elements
390 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize)); 390 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
391 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT); 391 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
392 // r11: destination FixedArray 392 // r11: destination FixedArray
393 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex); 393 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
394 __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi); 394 __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
395 __ Integer32ToSmi(r14, r9); 395 __ Integer32ToSmi(r14, r9);
396 __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14); 396 __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
397 397
398 // Prepare for conversion loop. 398 // Prepare for conversion loop.
399 __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64)); 399 __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
400 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex); 400 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
401 // rsi: the-hole NaN 401 // rsi: the-hole NaN
402 // rdi: pointer to the-hole 402 // rdi: pointer to the-hole
403 __ jmp(&entry); 403 __ jmp(&entry);
404 404
405 // Call into runtime if GC is required. 405 // Call into runtime if GC is required.
406 __ bind(&gc_required); 406 __ bind(&gc_required);
407 __ pop(rax); 407 __ pop(rax);
408 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 408 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
409 __ jmp(fail); 409 __ jmp(fail);
410 410
411 // Box doubles into heap numbers. 411 // Box doubles into heap numbers.
412 __ bind(&loop); 412 __ bind(&loop);
413 __ movq(r14, FieldOperand(r8, 413 __ movq(r14, FieldOperand(r8,
414 r9, 414 r9,
415 times_8, 415 times_8,
416 FixedDoubleArray::kHeaderSize)); 416 FixedDoubleArray::kHeaderSize));
417 // r9 : current element's index 417 // r9 : current element's index
418 // r14: current element 418 // r14: current element
419 __ cmpq(r14, rsi); 419 __ cmpq(r14, rsi);
420 __ j(equal, &convert_hole); 420 __ j(equal, &convert_hole);
421 421
422 // Non-hole double, copy value into a heap number. 422 // Non-hole double, copy value into a heap number.
423 __ AllocateHeapNumber(rax, r15, &gc_required); 423 __ AllocateHeapNumber(rax, r15, &gc_required);
424 // rax: new heap number 424 // rax: new heap number
425 __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14); 425 __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
426 __ movq(FieldOperand(r11, 426 __ movp(FieldOperand(r11,
427 r9, 427 r9,
428 times_pointer_size, 428 times_pointer_size,
429 FixedArray::kHeaderSize), 429 FixedArray::kHeaderSize),
430 rax); 430 rax);
431 __ movq(r15, r9); 431 __ movp(r15, r9);
432 __ RecordWriteArray(r11, 432 __ RecordWriteArray(r11,
433 rax, 433 rax,
434 r15, 434 r15,
435 kDontSaveFPRegs, 435 kDontSaveFPRegs,
436 EMIT_REMEMBERED_SET, 436 EMIT_REMEMBERED_SET,
437 OMIT_SMI_CHECK); 437 OMIT_SMI_CHECK);
438 __ jmp(&entry, Label::kNear); 438 __ jmp(&entry, Label::kNear);
439 439
440 // Replace the-hole NaN with the-hole pointer. 440 // Replace the-hole NaN with the-hole pointer.
441 __ bind(&convert_hole); 441 __ bind(&convert_hole);
442 __ movq(FieldOperand(r11, 442 __ movp(FieldOperand(r11,
443 r9, 443 r9,
444 times_pointer_size, 444 times_pointer_size,
445 FixedArray::kHeaderSize), 445 FixedArray::kHeaderSize),
446 rdi); 446 rdi);
447 447
448 __ bind(&entry); 448 __ bind(&entry);
449 __ decq(r9); 449 __ decq(r9);
450 __ j(not_sign, &loop); 450 __ j(not_sign, &loop);
451 451
452 // Replace receiver's backing store with newly created and filled FixedArray. 452 // Replace receiver's backing store with newly created and filled FixedArray.
453 __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11); 453 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
454 __ RecordWriteField(rdx, 454 __ RecordWriteField(rdx,
455 JSObject::kElementsOffset, 455 JSObject::kElementsOffset,
456 r11, 456 r11,
457 r15, 457 r15,
458 kDontSaveFPRegs, 458 kDontSaveFPRegs,
459 EMIT_REMEMBERED_SET, 459 EMIT_REMEMBERED_SET,
460 OMIT_SMI_CHECK); 460 OMIT_SMI_CHECK);
461 __ pop(rax); 461 __ pop(rax);
462 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 462 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
463 463
464 __ bind(&only_change_map); 464 __ bind(&only_change_map);
465 // Set transitioned map. 465 // Set transitioned map.
466 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); 466 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
467 __ RecordWriteField(rdx, 467 __ RecordWriteField(rdx,
468 HeapObject::kMapOffset, 468 HeapObject::kMapOffset,
469 rbx, 469 rbx,
470 rdi, 470 rdi,
471 kDontSaveFPRegs, 471 kDontSaveFPRegs,
472 OMIT_REMEMBERED_SET, 472 OMIT_REMEMBERED_SET,
473 OMIT_SMI_CHECK); 473 OMIT_SMI_CHECK);
474 } 474 }
475 475
476 476
477 void StringCharLoadGenerator::Generate(MacroAssembler* masm, 477 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
478 Register string, 478 Register string,
479 Register index, 479 Register index,
480 Register result, 480 Register result,
481 Label* call_runtime) { 481 Label* call_runtime) {
482 // Fetch the instance type of the receiver into result register. 482 // Fetch the instance type of the receiver into result register.
483 __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); 483 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
484 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); 484 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
485 485
486 // We need special handling for indirect strings. 486 // We need special handling for indirect strings.
487 Label check_sequential; 487 Label check_sequential;
488 __ testb(result, Immediate(kIsIndirectStringMask)); 488 __ testb(result, Immediate(kIsIndirectStringMask));
489 __ j(zero, &check_sequential, Label::kNear); 489 __ j(zero, &check_sequential, Label::kNear);
490 490
491 // Dispatch on the indirect string shape: slice or cons. 491 // Dispatch on the indirect string shape: slice or cons.
492 Label cons_string; 492 Label cons_string;
493 __ testb(result, Immediate(kSlicedNotConsMask)); 493 __ testb(result, Immediate(kSlicedNotConsMask));
494 __ j(zero, &cons_string, Label::kNear); 494 __ j(zero, &cons_string, Label::kNear);
495 495
496 // Handle slices. 496 // Handle slices.
497 Label indirect_string_loaded; 497 Label indirect_string_loaded;
498 __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset)); 498 __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
499 __ addq(index, result); 499 __ addq(index, result);
500 __ movq(string, FieldOperand(string, SlicedString::kParentOffset)); 500 __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
501 __ jmp(&indirect_string_loaded, Label::kNear); 501 __ jmp(&indirect_string_loaded, Label::kNear);
502 502
503 // Handle cons strings. 503 // Handle cons strings.
504 // Check whether the right hand side is the empty string (i.e. if 504 // Check whether the right hand side is the empty string (i.e. if
505 // this is really a flat string in a cons string). If that is not 505 // this is really a flat string in a cons string). If that is not
506 // the case we would rather go to the runtime system now to flatten 506 // the case we would rather go to the runtime system now to flatten
507 // the string. 507 // the string.
508 __ bind(&cons_string); 508 __ bind(&cons_string);
509 __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset), 509 __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
510 Heap::kempty_stringRootIndex); 510 Heap::kempty_stringRootIndex);
511 __ j(not_equal, call_runtime); 511 __ j(not_equal, call_runtime);
512 __ movq(string, FieldOperand(string, ConsString::kFirstOffset)); 512 __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
513 513
514 __ bind(&indirect_string_loaded); 514 __ bind(&indirect_string_loaded);
515 __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); 515 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
516 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); 516 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
517 517
518 // Distinguish sequential and external strings. Only these two string 518 // Distinguish sequential and external strings. Only these two string
519 // representations can reach here (slices and flat cons strings have been 519 // representations can reach here (slices and flat cons strings have been
520 // reduced to the underlying sequential or external string). 520 // reduced to the underlying sequential or external string).
521 Label seq_string; 521 Label seq_string;
522 __ bind(&check_sequential); 522 __ bind(&check_sequential);
523 STATIC_ASSERT(kSeqStringTag == 0); 523 STATIC_ASSERT(kSeqStringTag == 0);
524 __ testb(result, Immediate(kStringRepresentationMask)); 524 __ testb(result, Immediate(kStringRepresentationMask));
525 __ j(zero, &seq_string, Label::kNear); 525 __ j(zero, &seq_string, Label::kNear);
526 526
527 // Handle external strings. 527 // Handle external strings.
528 Label ascii_external, done; 528 Label ascii_external, done;
529 if (FLAG_debug_code) { 529 if (FLAG_debug_code) {
530 // Assert that we do not have a cons or slice (indirect strings) here. 530 // Assert that we do not have a cons or slice (indirect strings) here.
531 // Sequential strings have already been ruled out. 531 // Sequential strings have already been ruled out.
532 __ testb(result, Immediate(kIsIndirectStringMask)); 532 __ testb(result, Immediate(kIsIndirectStringMask));
533 __ Assert(zero, kExternalStringExpectedButNotFound); 533 __ Assert(zero, kExternalStringExpectedButNotFound);
534 } 534 }
535 // Rule out short external strings. 535 // Rule out short external strings.
536 STATIC_CHECK(kShortExternalStringTag != 0); 536 STATIC_CHECK(kShortExternalStringTag != 0);
537 __ testb(result, Immediate(kShortExternalStringTag)); 537 __ testb(result, Immediate(kShortExternalStringTag));
538 __ j(not_zero, call_runtime); 538 __ j(not_zero, call_runtime);
539 // Check encoding. 539 // Check encoding.
540 STATIC_ASSERT(kTwoByteStringTag == 0); 540 STATIC_ASSERT(kTwoByteStringTag == 0);
541 __ testb(result, Immediate(kStringEncodingMask)); 541 __ testb(result, Immediate(kStringEncodingMask));
542 __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset)); 542 __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
543 __ j(not_equal, &ascii_external, Label::kNear); 543 __ j(not_equal, &ascii_external, Label::kNear);
544 // Two-byte string. 544 // Two-byte string.
545 __ movzxwl(result, Operand(result, index, times_2, 0)); 545 __ movzxwl(result, Operand(result, index, times_2, 0));
546 __ jmp(&done, Label::kNear); 546 __ jmp(&done, Label::kNear);
547 __ bind(&ascii_external); 547 __ bind(&ascii_external);
548 // Ascii string. 548 // Ascii string.
549 __ movzxbl(result, Operand(result, index, times_1, 0)); 549 __ movzxbl(result, Operand(result, index, times_1, 0));
550 __ jmp(&done, Label::kNear); 550 __ jmp(&done, Label::kNear);
551 551
552 // Dispatch on the encoding: ASCII or two-byte. 552 // Dispatch on the encoding: ASCII or two-byte.
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
634 static byte* GetNoCodeAgeSequence(uint32_t* length) { 634 static byte* GetNoCodeAgeSequence(uint32_t* length) {
635 static bool initialized = false; 635 static bool initialized = false;
636 static byte sequence[kNoCodeAgeSequenceLength]; 636 static byte sequence[kNoCodeAgeSequenceLength];
637 *length = kNoCodeAgeSequenceLength; 637 *length = kNoCodeAgeSequenceLength;
638 if (!initialized) { 638 if (!initialized) {
639 // The sequence of instructions that is patched out for aging code is the 639 // The sequence of instructions that is patched out for aging code is the
640 // following boilerplate stack-building prologue that is found both in 640 // following boilerplate stack-building prologue that is found both in
641 // FUNCTION and OPTIMIZED_FUNCTION code: 641 // FUNCTION and OPTIMIZED_FUNCTION code:
642 CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); 642 CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
643 patcher.masm()->push(rbp); 643 patcher.masm()->push(rbp);
644 patcher.masm()->movq(rbp, rsp); 644 patcher.masm()->movp(rbp, rsp);
645 patcher.masm()->push(rsi); 645 patcher.masm()->push(rsi);
646 patcher.masm()->push(rdi); 646 patcher.masm()->push(rdi);
647 initialized = true; 647 initialized = true;
648 } 648 }
649 return sequence; 649 return sequence;
650 } 650 }
651 651
652 652
653 bool Code::IsYoungSequence(byte* sequence) { 653 bool Code::IsYoungSequence(byte* sequence) {
654 uint32_t young_length; 654 uint32_t young_length;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
710 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize. 710 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
711 return Operand(base_reg_, argument_count_reg_, times_pointer_size, 711 return Operand(base_reg_, argument_count_reg_, times_pointer_size,
712 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); 712 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
713 } 713 }
714 } 714 }
715 715
716 716
717 } } // namespace v8::internal 717 } } // namespace v8::internal
718 718
719 #endif // V8_TARGET_ARCH_X64 719 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/debug-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698