Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(283)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 11817017: Additional work to get array literal allocation tracking working, even with --always-opt (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Code cleanup Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5319 matching lines...) Expand 10 before | Expand all | Expand 10 after
5330 ASSERT(instr->hydrogen()->depth() == 1); 5330 ASSERT(instr->hydrogen()->depth() == 1);
5331 FastCloneShallowArrayStub::Mode mode = 5331 FastCloneShallowArrayStub::Mode mode =
5332 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; 5332 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
5333 FastCloneShallowArrayStub stub(mode, length); 5333 FastCloneShallowArrayStub stub(mode, length);
5334 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5334 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5335 } else if (instr->hydrogen()->depth() > 1) { 5335 } else if (instr->hydrogen()->depth() > 1) {
5336 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); 5336 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
5337 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 5337 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
5338 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); 5338 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
5339 } else { 5339 } else {
5340 // TODO(mvstanton): I'm doing more work than necessary here by running
5341 // CLONE_ANY_ELEMENTS instead of the more specific stub, but I'm doing it
5342 // just because I want to track allocation info. Alternative approach: quit
5343 // baking allocation tracking info into this field, instead just have it on
5344 // all the time?
5340 FastCloneShallowArrayStub::Mode mode = 5345 FastCloneShallowArrayStub::Mode mode =
5346 FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS_WITH_ALLOCATION_SITE_INFO;
5347 /*
5341 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS 5348 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
5342 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS 5349 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
5343 : FastCloneShallowArrayStub::CLONE_ELEMENTS; 5350 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
5351 */
5344 FastCloneShallowArrayStub stub(mode, length); 5352 FastCloneShallowArrayStub stub(mode, length);
5345 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5353 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5346 } 5354 }
5347 } 5355 }
5348 5356
5349 5357
5350 void LCodeGen::EmitDeepCopy(Handle<JSObject> object, 5358 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
5351 Register result, 5359 Register result,
5352 Register source, 5360 Register source,
5353 int* offset) { 5361 int* offset,
5362 bool create_allocation_site_info) {
5354 ASSERT(!source.is(ecx)); 5363 ASSERT(!source.is(ecx));
5355 ASSERT(!result.is(ecx)); 5364 ASSERT(!result.is(ecx));
5356 5365
5357 if (FLAG_debug_code) { 5366 if (FLAG_debug_code) {
5358 __ LoadHeapObject(ecx, object); 5367 __ LoadHeapObject(ecx, object);
5359 __ cmp(source, ecx); 5368 __ cmp(source, ecx);
5360 __ Assert(equal, "Unexpected object literal boilerplate"); 5369 __ Assert(equal, "Unexpected object literal boilerplate");
5361 __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset)); 5370 __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
5362 __ cmp(ecx, Handle<Map>(object->map())); 5371 __ cmp(ecx, Handle<Map>(object->map()));
5363 __ Assert(equal, "Unexpected boilerplate map"); 5372 __ Assert(equal, "Unexpected boilerplate map");
5364 __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset)); 5373 __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
5365 __ and_(ecx, Map::kElementsKindMask); 5374 __ and_(ecx, Map::kElementsKindMask);
5366 __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift); 5375 __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
5367 __ Assert(equal, "Unexpected boilerplate elements kind"); 5376 __ Assert(equal, "Unexpected boilerplate elements kind");
5368 } 5377 }
5369 5378
5370 // Only elements backing stores for non-COW arrays need to be copied. 5379 // Only elements backing stores for non-COW arrays need to be copied.
5371 Handle<FixedArrayBase> elements(object->elements()); 5380 Handle<FixedArrayBase> elements(object->elements());
5372 bool has_elements = elements->length() > 0 && 5381 bool has_elements = elements->length() > 0 &&
5373 elements->map() != isolate()->heap()->fixed_cow_array_map(); 5382 elements->map() != isolate()->heap()->fixed_cow_array_map();
5374 5383
5375 // Increase the offset so that subsequent objects end up right after 5384 // Increase the offset so that subsequent objects end up right after
5376 // this object and its backing store. 5385 // this object and its backing store.
5377 int object_offset = *offset; 5386 int object_offset = *offset;
5378 int object_size = object->map()->instance_size(); 5387 int object_size = object->map()->instance_size();
5379 int elements_offset = *offset + object_size; 5388 int elements_offset = *offset + object_size;
5389 if (create_allocation_site_info) {
5390 elements_offset += AllocationSiteInfo::kSize;
5391 }
5380 int elements_size = has_elements ? elements->Size() : 0; 5392 int elements_size = has_elements ? elements->Size() : 0;
5381 *offset += object_size + elements_size; 5393 *offset += object_size + elements_size;
5394 if (create_allocation_site_info) {
5395 *offset += AllocationSiteInfo::kSize;
5396 }
5382 5397
5383 // Copy object header. 5398 // Copy object header.
5384 ASSERT(object->properties()->length() == 0); 5399 ASSERT(object->properties()->length() == 0);
5385 int inobject_properties = object->map()->inobject_properties(); 5400 int inobject_properties = object->map()->inobject_properties();
5386 int header_size = object_size - inobject_properties * kPointerSize; 5401 int header_size = object_size - inobject_properties * kPointerSize;
5387 for (int i = 0; i < header_size; i += kPointerSize) { 5402 for (int i = 0; i < header_size; i += kPointerSize) {
5388 if (has_elements && i == JSObject::kElementsOffset) { 5403 if (has_elements && i == JSObject::kElementsOffset) {
5389 __ lea(ecx, Operand(result, elements_offset)); 5404 __ lea(ecx, Operand(result, elements_offset));
5390 } else { 5405 } else {
5391 __ mov(ecx, FieldOperand(source, i)); 5406 __ mov(ecx, FieldOperand(source, i));
(...skipping 12 matching lines...) Expand all
5404 __ LoadHeapObject(source, value_object); 5419 __ LoadHeapObject(source, value_object);
5405 EmitDeepCopy(value_object, result, source, offset); 5420 EmitDeepCopy(value_object, result, source, offset);
5406 } else if (value->IsHeapObject()) { 5421 } else if (value->IsHeapObject()) {
5407 __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value)); 5422 __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
5408 __ mov(FieldOperand(result, total_offset), ecx); 5423 __ mov(FieldOperand(result, total_offset), ecx);
5409 } else { 5424 } else {
5410 __ mov(FieldOperand(result, total_offset), Immediate(value)); 5425 __ mov(FieldOperand(result, total_offset), Immediate(value));
5411 } 5426 }
5412 } 5427 }
5413 5428
5429 // Build Allocation Site Info if desired
5430 if (create_allocation_site_info) {
danno 2013/01/10 22:58:59 Here and other platforms, only do this if and "cre
mvstanton 2013/01/11 13:43:01 Done.
5431 __ mov(FieldOperand(result, object_size),
5432 Immediate(Handle<Map>(isolate()->heap()->
5433 allocation_site_info_map())));
5434 __ mov(FieldOperand(result, object_size + kPointerSize), source);
5435 }
5436
5414 if (has_elements) { 5437 if (has_elements) {
5415 // Copy elements backing store header. 5438 // Copy elements backing store header.
5416 __ LoadHeapObject(source, elements); 5439 __ LoadHeapObject(source, elements);
5417 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { 5440 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
5418 __ mov(ecx, FieldOperand(source, i)); 5441 __ mov(ecx, FieldOperand(source, i));
5419 __ mov(FieldOperand(result, elements_offset + i), ecx); 5442 __ mov(FieldOperand(result, elements_offset + i), ecx);
5420 } 5443 }
5421 5444
5422 // Copy elements backing store content. 5445 // Copy elements backing store content.
5423 int elements_length = elements->length(); 5446 int elements_length = elements->length();
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
5486 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); 5509 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5487 __ jmp(&allocated); 5510 __ jmp(&allocated);
5488 5511
5489 __ bind(&runtime_allocate); 5512 __ bind(&runtime_allocate);
5490 __ push(Immediate(Smi::FromInt(size))); 5513 __ push(Immediate(Smi::FromInt(size)));
5491 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5514 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5492 5515
5493 __ bind(&allocated); 5516 __ bind(&allocated);
5494 int offset = 0; 5517 int offset = 0;
5495 __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate()); 5518 __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
5496 EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset); 5519 EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset,
5520 instr->hydrogen()->create_allocation_site_info());
5497 ASSERT_EQ(size, offset); 5521 ASSERT_EQ(size, offset);
5498 } 5522 }
5499 5523
5500 5524
5501 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { 5525 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5502 ASSERT(ToRegister(instr->context()).is(esi)); 5526 ASSERT(ToRegister(instr->context()).is(esi));
5503 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5527 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5504 Handle<FixedArray> constant_properties = 5528 Handle<FixedArray> constant_properties =
5505 instr->hydrogen()->constant_properties(); 5529 instr->hydrogen()->constant_properties();
5506 5530
(...skipping 448 matching lines...) Expand 10 before | Expand all | Expand 10 after
5955 FixedArray::kHeaderSize - kPointerSize)); 5979 FixedArray::kHeaderSize - kPointerSize));
5956 __ bind(&done); 5980 __ bind(&done);
5957 } 5981 }
5958 5982
5959 5983
5960 #undef __ 5984 #undef __
5961 5985
5962 } } // namespace v8::internal 5986 } } // namespace v8::internal
5963 5987
5964 #endif // V8_TARGET_ARCH_IA32 5988 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698