Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(652)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 26 matching lines...) Expand all
37 #include "debug.h" 37 #include "debug.h"
38 #include "heap.h" 38 #include "heap.h"
39 39
40 namespace v8 { 40 namespace v8 {
41 namespace internal { 41 namespace internal {
42 42
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) 43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44 : Assembler(arg_isolate, buffer, size), 44 : Assembler(arg_isolate, buffer, size),
45 generating_stub_(false), 45 generating_stub_(false),
46 allow_stub_calls_(true), 46 allow_stub_calls_(true),
47 has_frame_(false),
47 root_array_available_(true) { 48 root_array_available_(true) {
48 if (isolate() != NULL) { 49 if (isolate() != NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 50 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
50 isolate()); 51 isolate());
51 } 52 }
52 } 53 }
53 54
54 55
55 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) { 56 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
56 Address roots_register_value = kRootRegisterBias + 57 Address roots_register_value = kRootRegisterBias +
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 190
190 void MacroAssembler::CompareRoot(const Operand& with, 191 void MacroAssembler::CompareRoot(const Operand& with,
191 Heap::RootListIndex index) { 192 Heap::RootListIndex index) {
192 ASSERT(root_array_available_); 193 ASSERT(root_array_available_);
193 ASSERT(!with.AddressUsesRegister(kScratchRegister)); 194 ASSERT(!with.AddressUsesRegister(kScratchRegister));
194 LoadRoot(kScratchRegister, index); 195 LoadRoot(kScratchRegister, index);
195 cmpq(with, kScratchRegister); 196 cmpq(with, kScratchRegister);
196 } 197 }
197 198
198 199
199 void MacroAssembler::RecordWriteHelper(Register object, 200 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
200 Register addr, 201 Register addr,
201 Register scratch) { 202 Register scratch,
202 if (emit_debug_code()) { 203 SaveFPRegsMode save_fp,
203 // Check that the object is not in new space. 204 RememberedSetFinalAction and_then) {
204 Label not_in_new_space; 205 if (FLAG_debug_code) {
205 InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear); 206 Label ok;
206 Abort("new-space object passed to RecordWriteHelper"); 207 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
207 bind(&not_in_new_space); 208 int3();
209 bind(&ok);
208 } 210 }
209 211 // Load store buffer top.
210 // Compute the page start address from the heap object pointer, and reuse 212 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
211 // the 'object' register for it. 213 // Store pointer to buffer.
212 and_(object, Immediate(~Page::kPageAlignmentMask)); 214 movq(Operand(scratch, 0), addr);
213 215 // Increment buffer top.
214 // Compute number of region covering addr. See Page::GetRegionNumberForAddress 216 addq(scratch, Immediate(kPointerSize));
215 // method for more details. 217 // Write back new top of buffer.
216 shrl(addr, Immediate(Page::kRegionSizeLog2)); 218 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
217 andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); 219 // Call stub on end of buffer.
218 220 Label done;
219 // Set dirty mark for region. 221 // Check for end of buffer.
220 bts(Operand(object, Page::kDirtyFlagOffset), addr); 222 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
223 if (and_then == kReturnAtEnd) {
224 Label buffer_overflowed;
225 j(not_equal, &buffer_overflowed, Label::kNear);
226 ret(0);
227 bind(&buffer_overflowed);
228 } else {
229 ASSERT(and_then == kFallThroughAtEnd);
230 j(equal, &done, Label::kNear);
231 }
232 StoreBufferOverflowStub store_buffer_overflow =
233 StoreBufferOverflowStub(save_fp);
234 CallStub(&store_buffer_overflow);
235 if (and_then == kReturnAtEnd) {
236 ret(0);
237 } else {
238 ASSERT(and_then == kFallThroughAtEnd);
239 bind(&done);
240 }
221 } 241 }
222 242
223 243
224 void MacroAssembler::InNewSpace(Register object, 244 void MacroAssembler::InNewSpace(Register object,
225 Register scratch, 245 Register scratch,
226 Condition cc, 246 Condition cc,
227 Label* branch, 247 Label* branch,
228 Label::Distance near_jump) { 248 Label::Distance distance) {
229 if (Serializer::enabled()) { 249 if (Serializer::enabled()) {
230 // Can't do arithmetic on external references if it might get serialized. 250 // Can't do arithmetic on external references if it might get serialized.
231 // The mask isn't really an address. We load it as an external reference in 251 // The mask isn't really an address. We load it as an external reference in
232 // case the size of the new space is different between the snapshot maker 252 // case the size of the new space is different between the snapshot maker
233 // and the running system. 253 // and the running system.
234 if (scratch.is(object)) { 254 if (scratch.is(object)) {
235 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); 255 movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
236 and_(scratch, kScratchRegister); 256 and_(scratch, kScratchRegister);
237 } else { 257 } else {
238 movq(scratch, ExternalReference::new_space_mask(isolate())); 258 movq(scratch, ExternalReference::new_space_mask(isolate()));
239 and_(scratch, object); 259 and_(scratch, object);
240 } 260 }
241 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); 261 movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
242 cmpq(scratch, kScratchRegister); 262 cmpq(scratch, kScratchRegister);
243 j(cc, branch, near_jump); 263 j(cc, branch, distance);
244 } else { 264 } else {
245 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); 265 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
246 intptr_t new_space_start = 266 intptr_t new_space_start =
247 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); 267 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
248 movq(kScratchRegister, -new_space_start, RelocInfo::NONE); 268 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
249 if (scratch.is(object)) { 269 if (scratch.is(object)) {
250 addq(scratch, kScratchRegister); 270 addq(scratch, kScratchRegister);
251 } else { 271 } else {
252 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); 272 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
253 } 273 }
254 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); 274 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
255 j(cc, branch, near_jump); 275 j(cc, branch, distance);
256 } 276 }
257 } 277 }
258 278
259 279
260 void MacroAssembler::RecordWrite(Register object, 280 void MacroAssembler::RecordWriteField(
261 int offset, 281 Register object,
262 Register value, 282 int offset,
263 Register index) { 283 Register value,
284 Register dst,
285 SaveFPRegsMode save_fp,
286 RememberedSetAction remembered_set_action,
287 SmiCheck smi_check) {
264 // The compiled code assumes that record write doesn't change the 288 // The compiled code assumes that record write doesn't change the
265 // context register, so we check that none of the clobbered 289 // context register, so we check that none of the clobbered
266 // registers are rsi. 290 // registers are rsi.
267 ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); 291 ASSERT(!value.is(rsi) && !dst.is(rsi));
268 292
269 // First, check if a write barrier is even needed. The tests below 293 // First, check if a write barrier is even needed. The tests below
270 // catch stores of smis and stores into the young generation. 294 // catch stores of Smis.
271 Label done; 295 Label done;
272 JumpIfSmi(value, &done);
273 296
274 RecordWriteNonSmi(object, offset, value, index); 297 // Skip barrier if writing a smi.
298 if (smi_check == INLINE_SMI_CHECK) {
299 JumpIfSmi(value, &done);
300 }
301
302 // Although the object register is tagged, the offset is relative to the start
303 // of the object, so so offset must be a multiple of kPointerSize.
304 ASSERT(IsAligned(offset, kPointerSize));
305
306 lea(dst, FieldOperand(object, offset));
307 if (emit_debug_code()) {
308 Label ok;
309 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
310 j(zero, &ok, Label::kNear);
311 int3();
312 bind(&ok);
313 }
314
315 RecordWrite(
316 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
317
275 bind(&done); 318 bind(&done);
276 319
277 // Clobber all input registers when running with the debug-code flag 320 // Clobber clobbered input registers when running with the debug-code flag
278 // turned on to provoke errors. This clobbering repeats the 321 // turned on to provoke errors.
279 // clobbering done inside RecordWriteNonSmi but it's necessary to
280 // avoid having the fast case for smis leave the registers
281 // unchanged.
282 if (emit_debug_code()) { 322 if (emit_debug_code()) {
283 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
284 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 323 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
285 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 324 movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
286 } 325 }
287 } 326 }
288 327
289 328
290 void MacroAssembler::RecordWrite(Register object, 329 void MacroAssembler::RecordWrite(Register object,
291 Register address, 330 Register address,
292 Register value) { 331 Register value,
332 SaveFPRegsMode fp_mode,
333 RememberedSetAction remembered_set_action,
334 SmiCheck smi_check) {
293 // The compiled code assumes that record write doesn't change the 335 // The compiled code assumes that record write doesn't change the
294 // context register, so we check that none of the clobbered 336 // context register, so we check that none of the clobbered
295 // registers are rsi. 337 // registers are rsi.
296 ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); 338 ASSERT(!value.is(rsi) && !address.is(rsi));
339
340 ASSERT(!object.is(value));
341 ASSERT(!object.is(address));
342 ASSERT(!value.is(address));
343 if (emit_debug_code()) {
344 AbortIfSmi(object);
345 }
346
347 if (remembered_set_action == OMIT_REMEMBERED_SET &&
348 !FLAG_incremental_marking) {
349 return;
350 }
351
352 if (FLAG_debug_code) {
353 Label ok;
354 cmpq(value, Operand(address, 0));
355 j(equal, &ok, Label::kNear);
356 int3();
357 bind(&ok);
358 }
297 359
298 // First, check if a write barrier is even needed. The tests below 360 // First, check if a write barrier is even needed. The tests below
299 // catch stores of smis and stores into the young generation. 361 // catch stores of smis and stores into the young generation.
300 Label done; 362 Label done;
301 JumpIfSmi(value, &done);
302 363
303 InNewSpace(object, value, equal, &done); 364 if (smi_check == INLINE_SMI_CHECK) {
365 // Skip barrier if writing a smi.
366 JumpIfSmi(value, &done);
367 }
304 368
305 RecordWriteHelper(object, address, value); 369 CheckPageFlag(value,
370 value, // Used as scratch.
371 MemoryChunk::kPointersToHereAreInterestingMask,
372 zero,
373 &done,
374 Label::kNear);
375
376 CheckPageFlag(object,
377 value, // Used as scratch.
378 MemoryChunk::kPointersFromHereAreInterestingMask,
379 zero,
380 &done,
381 Label::kNear);
382
383 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
384 CallStub(&stub);
306 385
307 bind(&done); 386 bind(&done);
308 387
309 // Clobber all input registers when running with the debug-code flag 388 // Clobber clobbered registers when running with the debug-code flag
310 // turned on to provoke errors. 389 // turned on to provoke errors.
311 if (emit_debug_code()) { 390 if (emit_debug_code()) {
312 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
313 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 391 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
314 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 392 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
315 } 393 }
316 } 394 }
317 395
318 396
319 void MacroAssembler::RecordWriteNonSmi(Register object,
320 int offset,
321 Register scratch,
322 Register index) {
323 Label done;
324
325 if (emit_debug_code()) {
326 Label okay;
327 JumpIfNotSmi(object, &okay, Label::kNear);
328 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
329 bind(&okay);
330
331 if (offset == 0) {
332 // index must be int32.
333 Register tmp = index.is(rax) ? rbx : rax;
334 push(tmp);
335 movl(tmp, index);
336 cmpq(tmp, index);
337 Check(equal, "Index register for RecordWrite must be untagged int32.");
338 pop(tmp);
339 }
340 }
341
342 // Test that the object address is not in the new space. We cannot
343 // update page dirty marks for new space pages.
344 InNewSpace(object, scratch, equal, &done);
345
346 // The offset is relative to a tagged or untagged HeapObject pointer,
347 // so either offset or offset + kHeapObjectTag must be a
348 // multiple of kPointerSize.
349 ASSERT(IsAligned(offset, kPointerSize) ||
350 IsAligned(offset + kHeapObjectTag, kPointerSize));
351
352 Register dst = index;
353 if (offset != 0) {
354 lea(dst, Operand(object, offset));
355 } else {
356 // array access: calculate the destination address in the same manner as
357 // KeyedStoreIC::GenerateGeneric.
358 lea(dst, FieldOperand(object,
359 index,
360 times_pointer_size,
361 FixedArray::kHeaderSize));
362 }
363 RecordWriteHelper(object, dst, scratch);
364
365 bind(&done);
366
367 // Clobber all input registers when running with the debug-code flag
368 // turned on to provoke errors.
369 if (emit_debug_code()) {
370 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
371 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
372 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
373 }
374 }
375
376 void MacroAssembler::Assert(Condition cc, const char* msg) { 397 void MacroAssembler::Assert(Condition cc, const char* msg) {
377 if (emit_debug_code()) Check(cc, msg); 398 if (emit_debug_code()) Check(cc, msg);
378 } 399 }
379 400
380 401
381 void MacroAssembler::AssertFastElements(Register elements) { 402 void MacroAssembler::AssertFastElements(Register elements) {
382 if (emit_debug_code()) { 403 if (emit_debug_code()) {
383 Label ok; 404 Label ok;
384 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 405 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
385 Heap::kFixedArrayMapRootIndex); 406 Heap::kFixedArrayMapRootIndex);
386 j(equal, &ok, Label::kNear); 407 j(equal, &ok, Label::kNear);
387 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 408 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
388 Heap::kFixedDoubleArrayMapRootIndex); 409 Heap::kFixedDoubleArrayMapRootIndex);
389 j(equal, &ok, Label::kNear); 410 j(equal, &ok, Label::kNear);
390 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 411 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
391 Heap::kFixedCOWArrayMapRootIndex); 412 Heap::kFixedCOWArrayMapRootIndex);
392 j(equal, &ok, Label::kNear); 413 j(equal, &ok, Label::kNear);
393 Abort("JSObject with fast elements map has slow elements"); 414 Abort("JSObject with fast elements map has slow elements");
394 bind(&ok); 415 bind(&ok);
395 } 416 }
396 } 417 }
397 418
398 419
399 void MacroAssembler::Check(Condition cc, const char* msg) { 420 void MacroAssembler::Check(Condition cc, const char* msg) {
400 Label L; 421 Label L;
401 j(cc, &L, Label::kNear); 422 j(cc, &L, Label::kNear);
402 Abort(msg); 423 Abort(msg);
403 // will not return here 424 // Control will not return here.
404 bind(&L); 425 bind(&L);
405 } 426 }
406 427
407 428
408 void MacroAssembler::CheckStackAlignment() { 429 void MacroAssembler::CheckStackAlignment() {
409 int frame_alignment = OS::ActivationFrameAlignment(); 430 int frame_alignment = OS::ActivationFrameAlignment();
410 int frame_alignment_mask = frame_alignment - 1; 431 int frame_alignment_mask = frame_alignment - 1;
411 if (frame_alignment > kPointerSize) { 432 if (frame_alignment > kPointerSize) {
412 ASSERT(IsPowerOf2(frame_alignment)); 433 ASSERT(IsPowerOf2(frame_alignment));
413 Label alignment_as_expected; 434 Label alignment_as_expected;
(...skipping 27 matching lines...) Expand all
441 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 462 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
442 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 463 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
443 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag. 464 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
444 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 465 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
445 #ifdef DEBUG 466 #ifdef DEBUG
446 if (msg != NULL) { 467 if (msg != NULL) {
447 RecordComment("Abort message: "); 468 RecordComment("Abort message: ");
448 RecordComment(msg); 469 RecordComment(msg);
449 } 470 }
450 #endif 471 #endif
451 // Disable stub call restrictions to always allow calls to abort.
452 AllowStubCallsScope allow_scope(this, true);
453
454 push(rax); 472 push(rax);
455 movq(kScratchRegister, p0, RelocInfo::NONE); 473 movq(kScratchRegister, p0, RelocInfo::NONE);
456 push(kScratchRegister); 474 push(kScratchRegister);
457 movq(kScratchRegister, 475 movq(kScratchRegister,
458 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), 476 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
459 RelocInfo::NONE); 477 RelocInfo::NONE);
460 push(kScratchRegister); 478 push(kScratchRegister);
461 CallRuntime(Runtime::kAbort, 2); 479
462 // will not return here 480 if (!has_frame_) {
481 // We don't actually want to generate a pile of code for this, so just
482 // claim there is a stack frame, without generating one.
483 FrameScope scope(this, StackFrame::NONE);
484 CallRuntime(Runtime::kAbort, 2);
485 } else {
486 CallRuntime(Runtime::kAbort, 2);
487 }
488 // Control will not return here.
463 int3(); 489 int3();
464 } 490 }
465 491
466 492
467 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { 493 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
468 ASSERT(allow_stub_calls()); // calls are not allowed in some stubs 494 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
469 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 495 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
470 } 496 }
471 497
472 498
473 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { 499 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
474 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. 500 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
475 MaybeObject* result = stub->TryGetCode(); 501 MaybeObject* result = stub->TryGetCode();
476 if (!result->IsFailure()) { 502 if (!result->IsFailure()) {
477 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())), 503 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
478 RelocInfo::CODE_TARGET); 504 RelocInfo::CODE_TARGET);
479 } 505 }
480 return result; 506 return result;
481 } 507 }
482 508
483 509
484 void MacroAssembler::TailCallStub(CodeStub* stub) { 510 void MacroAssembler::TailCallStub(CodeStub* stub) {
485 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. 511 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
486 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); 512 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
487 } 513 }
488 514
489 515
490 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) { 516 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
491 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
492 MaybeObject* result = stub->TryGetCode(); 517 MaybeObject* result = stub->TryGetCode();
493 if (!result->IsFailure()) { 518 if (!result->IsFailure()) {
494 jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())), 519 jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
495 RelocInfo::CODE_TARGET); 520 RelocInfo::CODE_TARGET);
496 } 521 }
497 return result; 522 return result;
498 } 523 }
499 524
500 525
501 void MacroAssembler::StubReturn(int argc) { 526 void MacroAssembler::StubReturn(int argc) {
502 ASSERT(argc >= 1 && generating_stub()); 527 ASSERT(argc >= 1 && generating_stub());
503 ret((argc - 1) * kPointerSize); 528 ret((argc - 1) * kPointerSize);
504 } 529 }
505 530
506 531
532 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
533 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
534 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
535 }
536
537
507 void MacroAssembler::IllegalOperation(int num_arguments) { 538 void MacroAssembler::IllegalOperation(int num_arguments) {
508 if (num_arguments > 0) { 539 if (num_arguments > 0) {
509 addq(rsp, Immediate(num_arguments * kPointerSize)); 540 addq(rsp, Immediate(num_arguments * kPointerSize));
510 } 541 }
511 LoadRoot(rax, Heap::kUndefinedValueRootIndex); 542 LoadRoot(rax, Heap::kUndefinedValueRootIndex);
512 } 543 }
513 544
514 545
515 void MacroAssembler::IndexFromHash(Register hash, Register index) { 546 void MacroAssembler::IndexFromHash(Register hash, Register index) {
516 // The assert checks that the constants for the maximum number of digits 547 // The assert checks that the constants for the maximum number of digits
(...skipping 16 matching lines...) Expand all
533 564
534 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { 565 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
535 CallRuntime(Runtime::FunctionForId(id), num_arguments); 566 CallRuntime(Runtime::FunctionForId(id), num_arguments);
536 } 567 }
537 568
538 569
539 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { 570 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
540 const Runtime::Function* function = Runtime::FunctionForId(id); 571 const Runtime::Function* function = Runtime::FunctionForId(id);
541 Set(rax, function->nargs); 572 Set(rax, function->nargs);
542 LoadAddress(rbx, ExternalReference(function, isolate())); 573 LoadAddress(rbx, ExternalReference(function, isolate()));
543 CEntryStub ces(1); 574 CEntryStub ces(1, kSaveFPRegs);
544 ces.SaveDoubles();
545 CallStub(&ces); 575 CallStub(&ces);
546 } 576 }
547 577
548 578
549 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id, 579 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
550 int num_arguments) { 580 int num_arguments) {
551 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments); 581 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
552 } 582 }
553 583
554 584
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 // Set the entry point and jump to the C entry runtime stub. 818 // Set the entry point and jump to the C entry runtime stub.
789 LoadAddress(rbx, ext); 819 LoadAddress(rbx, ext);
790 CEntryStub ces(result_size); 820 CEntryStub ces(result_size);
791 return TryTailCallStub(&ces); 821 return TryTailCallStub(&ces);
792 } 822 }
793 823
794 824
795 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 825 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
796 InvokeFlag flag, 826 InvokeFlag flag,
797 const CallWrapper& call_wrapper) { 827 const CallWrapper& call_wrapper) {
798 // Calls are not allowed in some stubs. 828 // You can't call a builtin without a valid frame.
799 ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); 829 ASSERT(flag == JUMP_FUNCTION || has_frame());
800 830
801 // Rely on the assertion to check that the number of provided 831 // Rely on the assertion to check that the number of provided
802 // arguments match the expected number of arguments. Fake a 832 // arguments match the expected number of arguments. Fake a
803 // parameter count to avoid emitting code to do the check. 833 // parameter count to avoid emitting code to do the check.
804 ParameterCount expected(0); 834 ParameterCount expected(0);
805 GetBuiltinEntry(rdx, id); 835 GetBuiltinEntry(rdx, id);
806 InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD); 836 InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
807 } 837 }
808 838
809 839
810 void MacroAssembler::GetBuiltinFunction(Register target, 840 void MacroAssembler::GetBuiltinFunction(Register target,
811 Builtins::JavaScript id) { 841 Builtins::JavaScript id) {
812 // Load the builtins object into target register. 842 // Load the builtins object into target register.
813 movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); 843 movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
814 movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); 844 movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
815 movq(target, FieldOperand(target, 845 movq(target, FieldOperand(target,
816 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 846 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
817 } 847 }
818 848
819 849
820 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 850 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
821 ASSERT(!target.is(rdi)); 851 ASSERT(!target.is(rdi));
822 // Load the JavaScript builtin function from the builtins object. 852 // Load the JavaScript builtin function from the builtins object.
823 GetBuiltinFunction(rdi, id); 853 GetBuiltinFunction(rdi, id);
824 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); 854 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
825 } 855 }
826 856
827 857
858 static const Register saved_regs[] =
859 { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
860 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
861
862
863 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
864 Register exclusion1,
865 Register exclusion2,
866 Register exclusion3) {
867 // We don't allow a GC during a store buffer overflow so there is no need to
868 // store the registers in any particular way, but we do have to store and
869 // restore them.
870 for (int i = 0; i < kNumberOfSavedRegs; i++) {
871 Register reg = saved_regs[i];
872 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
873 push(reg);
874 }
875 }
876 // R12 to r15 are callee save on all platforms.
877 if (fp_mode == kSaveFPRegs) {
878 CpuFeatures::Scope scope(SSE2);
879 subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
880 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
881 XMMRegister reg = XMMRegister::from_code(i);
882 movsd(Operand(rsp, i * kDoubleSize), reg);
883 }
884 }
885 }
886
887
888 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
889 Register exclusion1,
890 Register exclusion2,
891 Register exclusion3) {
892 if (fp_mode == kSaveFPRegs) {
893 CpuFeatures::Scope scope(SSE2);
894 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
895 XMMRegister reg = XMMRegister::from_code(i);
896 movsd(reg, Operand(rsp, i * kDoubleSize));
897 }
898 addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
899 }
900 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
901 Register reg = saved_regs[i];
902 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
903 pop(reg);
904 }
905 }
906 }
907
908
828 void MacroAssembler::Set(Register dst, int64_t x) { 909 void MacroAssembler::Set(Register dst, int64_t x) {
829 if (x == 0) { 910 if (x == 0) {
830 xorl(dst, dst); 911 xorl(dst, dst);
831 } else if (is_uint32(x)) { 912 } else if (is_uint32(x)) {
832 movl(dst, Immediate(static_cast<uint32_t>(x))); 913 movl(dst, Immediate(static_cast<uint32_t>(x)));
833 } else if (is_int32(x)) { 914 } else if (is_int32(x)) {
834 movq(dst, Immediate(static_cast<int32_t>(x))); 915 movq(dst, Immediate(static_cast<int32_t>(x)));
835 } else { 916 } else {
836 movq(dst, x, RelocInfo::NONE); 917 movq(dst, x, RelocInfo::NONE);
837 } 918 }
(...skipping 1722 matching lines...) Expand 10 before | Expand all | Expand 10 after
2560 2641
2561 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { 2642 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2562 cmpb(FieldOperand(map, Map::kInstanceTypeOffset), 2643 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2563 Immediate(static_cast<int8_t>(type))); 2644 Immediate(static_cast<int8_t>(type)));
2564 } 2645 }
2565 2646
2566 2647
2567 void MacroAssembler::CheckFastElements(Register map, 2648 void MacroAssembler::CheckFastElements(Register map,
2568 Label* fail, 2649 Label* fail,
2569 Label::Distance distance) { 2650 Label::Distance distance) {
2570 STATIC_ASSERT(FAST_ELEMENTS == 0); 2651 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2652 STATIC_ASSERT(FAST_ELEMENTS == 1);
2571 cmpb(FieldOperand(map, Map::kBitField2Offset), 2653 cmpb(FieldOperand(map, Map::kBitField2Offset),
2572 Immediate(Map::kMaximumBitField2FastElementValue)); 2654 Immediate(Map::kMaximumBitField2FastElementValue));
2573 j(above, fail, distance); 2655 j(above, fail, distance);
2574 } 2656 }
2575 2657
2576 2658
2659 void MacroAssembler::CheckFastObjectElements(Register map,
2660 Label* fail,
2661 Label::Distance distance) {
2662 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2663 STATIC_ASSERT(FAST_ELEMENTS == 1);
2664 cmpb(FieldOperand(map, Map::kBitField2Offset),
2665 Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2666 j(below_equal, fail, distance);
2667 cmpb(FieldOperand(map, Map::kBitField2Offset),
2668 Immediate(Map::kMaximumBitField2FastElementValue));
2669 j(above, fail, distance);
2670 }
2671
2672
2673 void MacroAssembler::CheckFastSmiOnlyElements(Register map,
2674 Label* fail,
2675 Label::Distance distance) {
2676 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2677 cmpb(FieldOperand(map, Map::kBitField2Offset),
2678 Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2679 j(above, fail, distance);
2680 }
2681
2682
2683 void MacroAssembler::StoreNumberToDoubleElements(
2684 Register maybe_number,
2685 Register elements,
2686 Register key,
2687 XMMRegister xmm_scratch,
2688 Label* fail) {
2689 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2690
2691 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2692
2693 CheckMap(maybe_number,
2694 isolate()->factory()->heap_number_map(),
2695 fail,
2696 DONT_DO_SMI_CHECK);
2697
2698 // Double value, canonicalize NaN.
2699 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2700 cmpl(FieldOperand(maybe_number, offset),
2701 Immediate(kNaNOrInfinityLowerBoundUpper32));
2702 j(greater_equal, &maybe_nan, Label::kNear);
2703
2704 bind(&not_nan);
2705 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2706 bind(&have_double_value);
2707 movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
2708 xmm_scratch);
2709 jmp(&done);
2710
2711 bind(&maybe_nan);
2712 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2713 // it's an Infinity, and the non-NaN code path applies.
2714 j(greater, &is_nan, Label::kNear);
2715 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2716 j(zero, &not_nan);
2717 bind(&is_nan);
2718 // Convert all NaNs to the same canonical NaN value when they are stored in
2719 // the double array.
2720 Set(kScratchRegister, BitCast<uint64_t>(
2721 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
2722 movq(xmm_scratch, kScratchRegister);
2723 jmp(&have_double_value, Label::kNear);
2724
2725 bind(&smi_value);
2726 // Value is a smi. convert to a double and store.
2727 // Preserve original value.
2728 SmiToInteger32(kScratchRegister, maybe_number);
2729 cvtlsi2sd(xmm_scratch, kScratchRegister);
2730 movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
2731 xmm_scratch);
2732 bind(&done);
2733 }
2734
2735
2577 void MacroAssembler::CheckMap(Register obj, 2736 void MacroAssembler::CheckMap(Register obj,
2578 Handle<Map> map, 2737 Handle<Map> map,
2579 Label* fail, 2738 Label* fail,
2580 SmiCheckType smi_check_type) { 2739 SmiCheckType smi_check_type) {
2581 if (smi_check_type == DO_SMI_CHECK) { 2740 if (smi_check_type == DO_SMI_CHECK) {
2582 JumpIfSmi(obj, fail); 2741 JumpIfSmi(obj, fail);
2583 } 2742 }
2584 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); 2743 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2585 j(not_equal, fail); 2744 j(not_equal, fail);
2586 } 2745 }
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
2780 decl(counter_operand); 2939 decl(counter_operand);
2781 } else { 2940 } else {
2782 subl(counter_operand, Immediate(value)); 2941 subl(counter_operand, Immediate(value));
2783 } 2942 }
2784 } 2943 }
2785 } 2944 }
2786 2945
2787 2946
2788 #ifdef ENABLE_DEBUGGER_SUPPORT 2947 #ifdef ENABLE_DEBUGGER_SUPPORT
2789 void MacroAssembler::DebugBreak() { 2948 void MacroAssembler::DebugBreak() {
2790 ASSERT(allow_stub_calls());
2791 Set(rax, 0); // No arguments. 2949 Set(rax, 0); // No arguments.
2792 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); 2950 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
2793 CEntryStub ces(1); 2951 CEntryStub ces(1);
2952 ASSERT(AllowThisStubCall(&ces));
2794 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 2953 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2795 } 2954 }
2796 #endif // ENABLE_DEBUGGER_SUPPORT 2955 #endif // ENABLE_DEBUGGER_SUPPORT
2797 2956
2798 2957
2799 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { 2958 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2800 // This macro takes the dst register to make the code more readable 2959 // This macro takes the dst register to make the code more readable
2801 // at the call sites. However, the dst register has to be rcx to 2960 // at the call sites. However, the dst register has to be rcx to
2802 // follow the calling convention which requires the call type to be 2961 // follow the calling convention which requires the call type to be
2803 // in rcx. 2962 // in rcx.
2804 ASSERT(dst.is(rcx)); 2963 ASSERT(dst.is(rcx));
2805 if (call_kind == CALL_AS_FUNCTION) { 2964 if (call_kind == CALL_AS_FUNCTION) {
2806 LoadSmiConstant(dst, Smi::FromInt(1)); 2965 LoadSmiConstant(dst, Smi::FromInt(1));
2807 } else { 2966 } else {
2808 LoadSmiConstant(dst, Smi::FromInt(0)); 2967 LoadSmiConstant(dst, Smi::FromInt(0));
2809 } 2968 }
2810 } 2969 }
2811 2970
2812 2971
2813 void MacroAssembler::InvokeCode(Register code, 2972 void MacroAssembler::InvokeCode(Register code,
2814 const ParameterCount& expected, 2973 const ParameterCount& expected,
2815 const ParameterCount& actual, 2974 const ParameterCount& actual,
2816 InvokeFlag flag, 2975 InvokeFlag flag,
2817 const CallWrapper& call_wrapper, 2976 const CallWrapper& call_wrapper,
2818 CallKind call_kind) { 2977 CallKind call_kind) {
2978 // You can't call a function without a valid frame.
2979 ASSERT(flag == JUMP_FUNCTION || has_frame());
2980
2819 Label done; 2981 Label done;
2820 InvokePrologue(expected, 2982 InvokePrologue(expected,
2821 actual, 2983 actual,
2822 Handle<Code>::null(), 2984 Handle<Code>::null(),
2823 code, 2985 code,
2824 &done, 2986 &done,
2825 flag, 2987 flag,
2826 Label::kNear, 2988 Label::kNear,
2827 call_wrapper, 2989 call_wrapper,
2828 call_kind); 2990 call_kind);
(...skipping 11 matching lines...) Expand all
2840 } 3002 }
2841 3003
2842 3004
2843 void MacroAssembler::InvokeCode(Handle<Code> code, 3005 void MacroAssembler::InvokeCode(Handle<Code> code,
2844 const ParameterCount& expected, 3006 const ParameterCount& expected,
2845 const ParameterCount& actual, 3007 const ParameterCount& actual,
2846 RelocInfo::Mode rmode, 3008 RelocInfo::Mode rmode,
2847 InvokeFlag flag, 3009 InvokeFlag flag,
2848 const CallWrapper& call_wrapper, 3010 const CallWrapper& call_wrapper,
2849 CallKind call_kind) { 3011 CallKind call_kind) {
3012 // You can't call a function without a valid frame.
3013 ASSERT(flag == JUMP_FUNCTION || has_frame());
3014
2850 Label done; 3015 Label done;
2851 Register dummy = rax; 3016 Register dummy = rax;
2852 InvokePrologue(expected, 3017 InvokePrologue(expected,
2853 actual, 3018 actual,
2854 code, 3019 code,
2855 dummy, 3020 dummy,
2856 &done, 3021 &done,
2857 flag, 3022 flag,
2858 Label::kNear, 3023 Label::kNear,
2859 call_wrapper, 3024 call_wrapper,
(...skipping 10 matching lines...) Expand all
2870 } 3035 }
2871 bind(&done); 3036 bind(&done);
2872 } 3037 }
2873 3038
2874 3039
2875 void MacroAssembler::InvokeFunction(Register function, 3040 void MacroAssembler::InvokeFunction(Register function,
2876 const ParameterCount& actual, 3041 const ParameterCount& actual,
2877 InvokeFlag flag, 3042 InvokeFlag flag,
2878 const CallWrapper& call_wrapper, 3043 const CallWrapper& call_wrapper,
2879 CallKind call_kind) { 3044 CallKind call_kind) {
3045 // You can't call a function without a valid frame.
3046 ASSERT(flag == JUMP_FUNCTION || has_frame());
3047
2880 ASSERT(function.is(rdi)); 3048 ASSERT(function.is(rdi));
2881 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3049 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2882 movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); 3050 movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
2883 movsxlq(rbx, 3051 movsxlq(rbx,
2884 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); 3052 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
2885 // Advances rdx to the end of the Code object header, to the start of 3053 // Advances rdx to the end of the Code object header, to the start of
2886 // the executable code. 3054 // the executable code.
2887 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); 3055 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
2888 3056
2889 ParameterCount expected(rbx); 3057 ParameterCount expected(rbx);
2890 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind); 3058 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
2891 } 3059 }
2892 3060
2893 3061
2894 void MacroAssembler::InvokeFunction(JSFunction* function, 3062 void MacroAssembler::InvokeFunction(JSFunction* function,
2895 const ParameterCount& actual, 3063 const ParameterCount& actual,
2896 InvokeFlag flag, 3064 InvokeFlag flag,
2897 const CallWrapper& call_wrapper, 3065 const CallWrapper& call_wrapper,
2898 CallKind call_kind) { 3066 CallKind call_kind) {
3067 // You can't call a function without a valid frame.
3068 ASSERT(flag == JUMP_FUNCTION || has_frame());
3069
2899 ASSERT(function->is_compiled()); 3070 ASSERT(function->is_compiled());
2900 // Get the function and setup the context. 3071 // Get the function and setup the context.
2901 Move(rdi, Handle<JSFunction>(function)); 3072 Move(rdi, Handle<JSFunction>(function));
2902 movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); 3073 movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2903 3074
2904 if (V8::UseCrankshaft()) { 3075 if (V8::UseCrankshaft()) {
2905 // Since Crankshaft can recompile a function, we need to load 3076 // Since Crankshaft can recompile a function, we need to load
2906 // the Code object every time we call the function. 3077 // the Code object every time we call the function.
2907 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); 3078 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
2908 ParameterCount expected(function->shared()->formal_parameter_count()); 3079 ParameterCount expected(function->shared()->formal_parameter_count());
(...skipping 843 matching lines...) Expand 10 before | Expand all | Expand 10 after
3752 incq(source); 3923 incq(source);
3753 incq(destination); 3924 incq(destination);
3754 cmpq(destination, scratch); 3925 cmpq(destination, scratch);
3755 j(not_equal, &short_loop); 3926 j(not_equal, &short_loop);
3756 3927
3757 bind(&done); 3928 bind(&done);
3758 } 3929 }
3759 } 3930 }
3760 3931
3761 3932
3933 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3934 Register end_offset,
3935 Register filler) {
3936 Label loop, entry;
3937 jmp(&entry);
3938 bind(&loop);
3939 movq(Operand(start_offset, 0), filler);
3940 addq(start_offset, Immediate(kPointerSize));
3941 bind(&entry);
3942 cmpq(start_offset, end_offset);
3943 j(less, &loop);
3944 }
3945
3946
3762 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 3947 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3763 if (context_chain_length > 0) { 3948 if (context_chain_length > 0) {
3764 // Move up the chain of contexts to the context containing the slot. 3949 // Move up the chain of contexts to the context containing the slot.
3765 movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX))); 3950 movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3766 for (int i = 1; i < context_chain_length; i++) { 3951 for (int i = 1; i < context_chain_length; i++) {
3767 movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); 3952 movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3768 } 3953 }
3769 } else { 3954 } else {
3770 // Slot is in the current function context. Move it into the 3955 // Slot is in the current function context. Move it into the
3771 // destination register in case we store into it (the write barrier 3956 // destination register in case we store into it (the write barrier
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3851 4036
3852 4037
3853 void MacroAssembler::CallCFunction(ExternalReference function, 4038 void MacroAssembler::CallCFunction(ExternalReference function,
3854 int num_arguments) { 4039 int num_arguments) {
3855 LoadAddress(rax, function); 4040 LoadAddress(rax, function);
3856 CallCFunction(rax, num_arguments); 4041 CallCFunction(rax, num_arguments);
3857 } 4042 }
3858 4043
3859 4044
3860 void MacroAssembler::CallCFunction(Register function, int num_arguments) { 4045 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4046 ASSERT(has_frame());
3861 // Check stack alignment. 4047 // Check stack alignment.
3862 if (emit_debug_code()) { 4048 if (emit_debug_code()) {
3863 CheckStackAlignment(); 4049 CheckStackAlignment();
3864 } 4050 }
3865 4051
3866 call(function); 4052 call(function);
3867 ASSERT(OS::ActivationFrameAlignment() != 0); 4053 ASSERT(OS::ActivationFrameAlignment() != 0);
3868 ASSERT(num_arguments >= 0); 4054 ASSERT(num_arguments >= 0);
3869 int argument_slots_on_stack = 4055 int argument_slots_on_stack =
3870 ArgumentStackSlotsForCFunctionCall(num_arguments); 4056 ArgumentStackSlotsForCFunctionCall(num_arguments);
3871 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); 4057 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
3872 } 4058 }
3873 4059
3874 4060
4061 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4062 if (r1.is(r2)) return true;
4063 if (r1.is(r3)) return true;
4064 if (r1.is(r4)) return true;
4065 if (r2.is(r3)) return true;
4066 if (r2.is(r4)) return true;
4067 if (r3.is(r4)) return true;
4068 return false;
4069 }
4070
4071
3875 CodePatcher::CodePatcher(byte* address, int size) 4072 CodePatcher::CodePatcher(byte* address, int size)
3876 : address_(address), 4073 : address_(address),
3877 size_(size), 4074 size_(size),
3878 masm_(Isolate::Current(), address, size + Assembler::kGap) { 4075 masm_(Isolate::Current(), address, size + Assembler::kGap) {
3879 // Create a new macro assembler pointing to the address of the code to patch. 4076 // Create a new macro assembler pointing to the address of the code to patch.
3880 // The size is adjusted with kGap on order for the assembler to generate size 4077 // The size is adjusted with kGap on order for the assembler to generate size
3881 // bytes of instructions without failing with buffer size constraints. 4078 // bytes of instructions without failing with buffer size constraints.
3882 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 4079 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3883 } 4080 }
3884 4081
3885 4082
3886 CodePatcher::~CodePatcher() { 4083 CodePatcher::~CodePatcher() {
3887 // Indicate that code has changed. 4084 // Indicate that code has changed.
3888 CPU::FlushICache(address_, size_); 4085 CPU::FlushICache(address_, size_);
3889 4086
3890 // Check that the code was patched as expected. 4087 // Check that the code was patched as expected.
3891 ASSERT(masm_.pc_ == address_ + size_); 4088 ASSERT(masm_.pc_ == address_ + size_);
3892 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 4089 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3893 } 4090 }
3894 4091
4092
4093 void MacroAssembler::CheckPageFlag(
4094 Register object,
4095 Register scratch,
4096 int mask,
4097 Condition cc,
4098 Label* condition_met,
4099 Label::Distance condition_met_distance) {
4100 ASSERT(cc == zero || cc == not_zero);
4101 if (scratch.is(object)) {
4102 and_(scratch, Immediate(~Page::kPageAlignmentMask));
4103 } else {
4104 movq(scratch, Immediate(~Page::kPageAlignmentMask));
4105 and_(scratch, object);
4106 }
4107 if (mask < (1 << kBitsPerByte)) {
4108 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4109 Immediate(static_cast<uint8_t>(mask)));
4110 } else {
4111 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4112 }
4113 j(cc, condition_met, condition_met_distance);
4114 }
4115
4116
4117 void MacroAssembler::JumpIfBlack(Register object,
4118 Register bitmap_scratch,
4119 Register mask_scratch,
4120 Label* on_black,
4121 Label::Distance on_black_distance) {
4122 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4123 GetMarkBits(object, bitmap_scratch, mask_scratch);
4124
4125 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4126 // The mask_scratch register contains a 1 at the position of the first bit
4127 // and a 0 at all other positions, including the position of the second bit.
4128 movq(rcx, mask_scratch);
4129 // Make rcx into a mask that covers both marking bits using the operation
4130 // rcx = mask | (mask << 1).
4131 lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4132 // Note that we are using a 4-byte aligned 8-byte load.
4133 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4134 cmpq(mask_scratch, rcx);
4135 j(equal, on_black, on_black_distance);
4136 }
4137
4138
4139 // Detect some, but not all, common pointer-free objects. This is used by the
4140 // incremental write barrier which doesn't care about oddballs (they are always
4141 // marked black immediately so this code is not hit).
4142 void MacroAssembler::JumpIfDataObject(
4143 Register value,
4144 Register scratch,
4145 Label* not_data_object,
4146 Label::Distance not_data_object_distance) {
4147 Label is_data_object;
4148 movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4149 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4150 j(equal, &is_data_object, Label::kNear);
4151 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4152 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4153 // If it's a string and it's not a cons string then it's an object containing
4154 // no GC pointers.
4155 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4156 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4157 j(not_zero, not_data_object, not_data_object_distance);
4158 bind(&is_data_object);
4159 }
4160
4161
4162 void MacroAssembler::GetMarkBits(Register addr_reg,
4163 Register bitmap_reg,
4164 Register mask_reg) {
4165 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4166 movq(bitmap_reg, addr_reg);
4167 // Sign extended 32 bit immediate.
4168 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4169 movq(rcx, addr_reg);
4170 int shift =
4171 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4172 shrl(rcx, Immediate(shift));
4173 and_(rcx,
4174 Immediate((Page::kPageAlignmentMask >> shift) &
4175 ~(Bitmap::kBytesPerCell - 1)));
4176
4177 addq(bitmap_reg, rcx);
4178 movq(rcx, addr_reg);
4179 shrl(rcx, Immediate(kPointerSizeLog2));
4180 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4181 movl(mask_reg, Immediate(1));
4182 shl_cl(mask_reg);
4183 }
4184
4185
4186 void MacroAssembler::EnsureNotWhite(
4187 Register value,
4188 Register bitmap_scratch,
4189 Register mask_scratch,
4190 Label* value_is_white_and_not_data,
4191 Label::Distance distance) {
4192 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4193 GetMarkBits(value, bitmap_scratch, mask_scratch);
4194
4195 // If the value is black or grey we don't need to do anything.
4196 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4197 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4198 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4199 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4200
4201 Label done;
4202
4203 // Since both black and grey have a 1 in the first position and white does
4204 // not have a 1 there we only need to check one bit.
4205 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4206 j(not_zero, &done, Label::kNear);
4207
4208 if (FLAG_debug_code) {
4209 // Check for impossible bit pattern.
4210 Label ok;
4211 push(mask_scratch);
4212 // shl. May overflow making the check conservative.
4213 addq(mask_scratch, mask_scratch);
4214 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4215 j(zero, &ok, Label::kNear);
4216 int3();
4217 bind(&ok);
4218 pop(mask_scratch);
4219 }
4220
4221 // Value is white. We check whether it is data that doesn't need scanning.
4222 // Currently only checks for HeapNumber and non-cons strings.
4223 Register map = rcx; // Holds map while checking type.
4224 Register length = rcx; // Holds length of object after checking type.
4225 Label not_heap_number;
4226 Label is_data_object;
4227
4228 // Check for heap-number
4229 movq(map, FieldOperand(value, HeapObject::kMapOffset));
4230 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4231 j(not_equal, &not_heap_number, Label::kNear);
4232 movq(length, Immediate(HeapNumber::kSize));
4233 jmp(&is_data_object, Label::kNear);
4234
4235 bind(&not_heap_number);
4236 // Check for strings.
4237 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4238 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4239 // If it's a string and it's not a cons string then it's an object containing
4240 // no GC pointers.
4241 Register instance_type = rcx;
4242 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4243 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4244 j(not_zero, value_is_white_and_not_data);
4245 // It's a non-indirect (non-cons and non-slice) string.
4246 // If it's external, the length is just ExternalString::kSize.
4247 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4248 Label not_external;
4249 // External strings are the only ones with the kExternalStringTag bit
4250 // set.
4251 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4252 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4253 testb(instance_type, Immediate(kExternalStringTag));
4254 j(zero, &not_external, Label::kNear);
4255 movq(length, Immediate(ExternalString::kSize));
4256 jmp(&is_data_object, Label::kNear);
4257
4258 bind(&not_external);
4259 // Sequential string, either ASCII or UC16.
4260 ASSERT(kAsciiStringTag == 0x04);
4261 and_(length, Immediate(kStringEncodingMask));
4262 xor_(length, Immediate(kStringEncodingMask));
4263 addq(length, Immediate(0x04));
4264 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4265 imul(length, FieldOperand(value, String::kLengthOffset));
4266 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4267 addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4268 and_(length, Immediate(~kObjectAlignmentMask));
4269
4270 bind(&is_data_object);
4271 // Value is a data object, and it is white. Mark it black. Since we know
4272 // that the object is white we can make it black by flipping one bit.
4273 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4274
4275 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4276 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4277
4278 bind(&done);
4279 }
4280
3895 } } // namespace v8::internal 4281 } } // namespace v8::internal
3896 4282
3897 #endif // V8_TARGET_ARCH_X64 4283 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698