Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(27)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 2121022: Refactor x64 named loads to agree with ia32 implementation. Remove dead code... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) 596 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
597 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); 597 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
598 } 598 }
599 #endif 599 #endif
600 600
601 601
602 class DeferredReferenceGetKeyedValue: public DeferredCode { 602 class DeferredReferenceGetKeyedValue: public DeferredCode {
603 public: 603 public:
604 explicit DeferredReferenceGetKeyedValue(Register dst, 604 explicit DeferredReferenceGetKeyedValue(Register dst,
605 Register receiver, 605 Register receiver,
606 Register key, 606 Register key)
607 bool is_global) 607 : dst_(dst), receiver_(receiver), key_(key) {
608 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
609 set_comment("[ DeferredReferenceGetKeyedValue"); 608 set_comment("[ DeferredReferenceGetKeyedValue");
610 } 609 }
611 610
612 virtual void Generate(); 611 virtual void Generate();
613 612
614 Label* patch_site() { return &patch_site_; } 613 Label* patch_site() { return &patch_site_; }
615 614
616 private: 615 private:
617 Label patch_site_; 616 Label patch_site_;
618 Register dst_; 617 Register dst_;
619 Register receiver_; 618 Register receiver_;
620 Register key_; 619 Register key_;
621 bool is_global_;
622 }; 620 };
623 621
624 622
625 void DeferredReferenceGetKeyedValue::Generate() { 623 void DeferredReferenceGetKeyedValue::Generate() {
626 __ push(receiver_); // First IC argument. 624 __ push(receiver_); // First IC argument.
627 __ push(key_); // Second IC argument. 625 __ push(key_); // Second IC argument.
628 626
629 // Calculate the delta from the IC call instruction to the map check 627 // Calculate the delta from the IC call instruction to the map check
630 // movq instruction in the inlined version. This delta is stored in 628 // movq instruction in the inlined version. This delta is stored in
631 // a test(rax, delta) instruction after the call so that we can find 629 // a test(rax, delta) instruction after the call so that we can find
632 // it in the IC initialization code and patch the movq instruction. 630 // it in the IC initialization code and patch the movq instruction.
633 // This means that we cannot allow test instructions after calls to 631 // This means that we cannot allow test instructions after calls to
634 // KeyedLoadIC stubs in other places. 632 // KeyedLoadIC stubs in other places.
635 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 633 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
636 RelocInfo::Mode mode = is_global_ 634 __ Call(ic, RelocInfo::CODE_TARGET);
637 ? RelocInfo::CODE_TARGET_CONTEXT
638 : RelocInfo::CODE_TARGET;
639 __ Call(ic, mode);
640 // The delta from the start of the map-compare instruction to the 635 // The delta from the start of the map-compare instruction to the
641 // test instruction. We use masm_-> directly here instead of the __ 636 // test instruction. We use masm_-> directly here instead of the __
642 // macro because the macro sometimes uses macro expansion to turn 637 // macro because the macro sometimes uses macro expansion to turn
643 // into something that can't return a value. This is encountered 638 // into something that can't return a value. This is encountered
644 // when doing generated code coverage tests. 639 // when doing generated code coverage tests.
645 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 640 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
646 // Here we use masm_-> instead of the __ macro because this is the 641 // Here we use masm_-> instead of the __ macro because this is the
647 // instruction that gets patched and coverage code gets in the way. 642 // instruction that gets patched and coverage code gets in the way.
648 // TODO(X64): Consider whether it's worth switching the test to a 643 // TODO(X64): Consider whether it's worth switching the test to a
649 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't 644 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
(...skipping 5036 matching lines...) Expand 10 before | Expand all | Expand 10 after
5686 // variables. Then load the argument from the arguments 5681 // variables. Then load the argument from the arguments
5687 // object using keyed load. 5682 // object using keyed load.
5688 Result arguments = allocator()->Allocate(); 5683 Result arguments = allocator()->Allocate();
5689 ASSERT(arguments.is_valid()); 5684 ASSERT(arguments.is_valid());
5690 __ movq(arguments.reg(), 5685 __ movq(arguments.reg(),
5691 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), 5686 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
5692 arguments, 5687 arguments,
5693 slow)); 5688 slow));
5694 frame_->Push(&arguments); 5689 frame_->Push(&arguments);
5695 frame_->Push(key_literal->handle()); 5690 frame_->Push(key_literal->handle());
5696 *result = EmitKeyedLoad(false); 5691 *result = EmitKeyedLoad();
5697 frame_->Drop(2); // Drop key and receiver. 5692 frame_->Drop(2); // Drop key and receiver.
5698 done->Jump(result); 5693 done->Jump(result);
5699 } 5694 }
5700 } 5695 }
5701 } 5696 }
5702 } 5697 }
5703 } 5698 }
5704 5699
5705 5700
5706 void CodeGenerator::LoadGlobal() { 5701 void CodeGenerator::LoadGlobal() {
(...skipping 1474 matching lines...) Expand 10 before | Expand all | Expand 10 after
7181 break; 7176 break;
7182 } 7177 }
7183 deferred->BindExit(); 7178 deferred->BindExit();
7184 left->Unuse(); 7179 left->Unuse();
7185 right->Unuse(); 7180 right->Unuse();
7186 ASSERT(answer.is_valid()); 7181 ASSERT(answer.is_valid());
7187 return answer; 7182 return answer;
7188 } 7183 }
7189 7184
7190 7185
7191 Result CodeGenerator::EmitKeyedLoad(bool is_global) { 7186 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
7192 Comment cmnt(masm_, "[ Load from keyed Property"); 7187 #ifdef DEBUG
7188 int original_height = frame()->height();
7189 #endif
7190 Result result;
7191 // Do not inline the inobject property case for loads from the global
7192 // object. Also do not inline for unoptimized code. This saves time
7193 // in the code generator. Unoptimized code is toplevel code or code
7194 // that is not in a loop.
7195 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
7196 Comment cmnt(masm(), "[ Load from named Property");
7197 frame()->Push(name);
7198
7199 RelocInfo::Mode mode = is_contextual
7200 ? RelocInfo::CODE_TARGET_CONTEXT
7201 : RelocInfo::CODE_TARGET;
7202 result = frame()->CallLoadIC(mode);
7203 // A test rax instruction following the call signals that the
7204 // inobject property case was inlined. Ensure that there is not
7205 // a test rax instruction here.
7206 __ nop();
7207 } else {
7208 // Inline the inobject property case.
7209 Comment cmnt(masm(), "[ Inlined named property load");
7210 Result receiver = frame()->Pop();
7211 receiver.ToRegister();
7212 result = allocator()->Allocate();
7213 ASSERT(result.is_valid());
7214
7215 // Cannot use r12 for receiver, because that changes
7216 // the distance between a call and a fixup location,
7217 // due to a special encoding of r12 as r/m in a ModR/M byte.
7218 if (receiver.reg().is(r12)) {
7219 frame()->Spill(receiver.reg()); // It will be overwritten with result.
7220 // Swap receiver and value.
7221 __ movq(result.reg(), receiver.reg());
7222 Result temp = receiver;
7223 receiver = result;
7224 result = temp;
7225 }
7226
7227 DeferredReferenceGetNamedValue* deferred =
7228 new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
7229
7230 // Check that the receiver is a heap object.
7231 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7232
7233 __ bind(deferred->patch_site());
7234 // This is the map check instruction that will be patched (so we can't
7235 // use the double underscore macro that may insert instructions).
7236 // Initially use an invalid map to force a failure.
7237 masm()->Move(kScratchRegister, Factory::null_value());
7238 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
7239 kScratchRegister);
7240 // This branch is always a forwards branch so it's always a fixed
7241 // size which allows the assert below to succeed and patching to work.
7242 // Don't use deferred->Branch(...), since that might add coverage code.
7243 masm()->j(not_equal, deferred->entry_label());
7244
7245 // The delta from the patch label to the load offset must be
7246 // statically known.
7247 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
7248 LoadIC::kOffsetToLoadInstruction);
7249 // The initial (invalid) offset has to be large enough to force
7250 // a 32-bit instruction encoding to allow patching with an
7251 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
7252 int offset = kMaxInt;
7253 masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
7254
7255 __ IncrementCounter(&Counters::named_load_inline, 1);
7256 deferred->BindExit();
7257 frame()->Push(&receiver);
7258 }
7259 ASSERT(frame()->height() == original_height);
7260 return result;
7261 }
7262
7263
7264 Result CodeGenerator::EmitKeyedLoad() {
7265 #ifdef DEBUG
7266 int original_height = frame()->height();
7267 #endif
7268 Result result;
7193 // Inline array load code if inside of a loop. We do not know 7269 // Inline array load code if inside of a loop. We do not know
7194 // the receiver map yet, so we initially generate the code with 7270 // the receiver map yet, so we initially generate the code with
7195 // a check against an invalid map. In the inline cache code, we 7271 // a check against an invalid map. In the inline cache code, we
7196 // patch the map check if appropriate. 7272 // patch the map check if appropriate.
7197 if (loop_nesting() > 0) { 7273 if (loop_nesting() > 0) {
7198 Comment cmnt(masm_, "[ Inlined load from keyed Property"); 7274 Comment cmnt(masm_, "[ Inlined load from keyed Property");
7199 7275
7276 // Use a fresh temporary to load the elements without destroying
7277 // the receiver which is needed for the deferred slow case.
7278 // Allocate the temporary early so that we use rax if it is free.
7279 Result elements = allocator()->Allocate();
7280 ASSERT(elements.is_valid());
7281
7282
7200 Result key = frame_->Pop(); 7283 Result key = frame_->Pop();
7201 Result receiver = frame_->Pop(); 7284 Result receiver = frame_->Pop();
7202 key.ToRegister(); 7285 key.ToRegister();
7203 receiver.ToRegister(); 7286 receiver.ToRegister();
7204 7287
7205 // Use a fresh temporary to load the elements without destroying 7288 // Use a fresh temporary for the index
7206 // the receiver which is needed for the deferred slow case.
7207 Result elements = allocator()->Allocate();
7208 ASSERT(elements.is_valid());
7209
7210 // Use a fresh temporary for the index and later the loaded
7211 // value.
7212 Result index = allocator()->Allocate(); 7289 Result index = allocator()->Allocate();
7213 ASSERT(index.is_valid()); 7290 ASSERT(index.is_valid());
7214 7291
7215 DeferredReferenceGetKeyedValue* deferred = 7292 DeferredReferenceGetKeyedValue* deferred =
7216 new DeferredReferenceGetKeyedValue(index.reg(), 7293 new DeferredReferenceGetKeyedValue(elements.reg(),
7217 receiver.reg(), 7294 receiver.reg(),
7218 key.reg(), 7295 key.reg());
7219 is_global);
7220 7296
7221 // Check that the receiver is not a smi (only needed if this 7297 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7222 // is not a load from the global context) and that it has the
7223 // expected map.
7224 if (!is_global) {
7225 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7226 }
7227 7298
7299 // Check that the receiver has the expected map.
7228 // Initially, use an invalid map. The map is patched in the IC 7300 // Initially, use an invalid map. The map is patched in the IC
7229 // initialization code. 7301 // initialization code.
7230 __ bind(deferred->patch_site()); 7302 __ bind(deferred->patch_site());
7231 // Use masm-> here instead of the double underscore macro since extra 7303 // Use masm-> here instead of the double underscore macro since extra
7232 // coverage code can interfere with the patching. Do not use 7304 // coverage code can interfere with the patching. Do not use
7233 // root array to load null_value, since it must be patched with 7305 // root array to load null_value, since it must be patched with
7234 // the expected receiver map. 7306 // the expected receiver map.
7235 masm_->movq(kScratchRegister, Factory::null_value(), 7307 masm_->movq(kScratchRegister, Factory::null_value(),
7236 RelocInfo::EMBEDDED_OBJECT); 7308 RelocInfo::EMBEDDED_OBJECT);
7237 masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 7309 masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
(...skipping 10 matching lines...) Expand all
7248 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), 7320 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
7249 Factory::fixed_array_map()); 7321 Factory::fixed_array_map());
7250 deferred->Branch(not_equal); 7322 deferred->Branch(not_equal);
7251 7323
7252 // Shift the key to get the actual index value and check that 7324 // Shift the key to get the actual index value and check that
7253 // it is within bounds. 7325 // it is within bounds.
7254 __ SmiToInteger32(index.reg(), key.reg()); 7326 __ SmiToInteger32(index.reg(), key.reg());
7255 __ cmpl(index.reg(), 7327 __ cmpl(index.reg(),
7256 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); 7328 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
7257 deferred->Branch(above_equal); 7329 deferred->Branch(above_equal);
7258
7259 // The index register holds the un-smi-tagged key. It has been 7330 // The index register holds the un-smi-tagged key. It has been
7260 // zero-extended to 64-bits, so it can be used directly as index in the 7331 // zero-extended to 64-bits, so it can be used directly as index in the
7261 // operand below. 7332 // operand below.
7262 // Load and check that the result is not the hole. We could 7333 // Load and check that the result is not the hole. We could
7263 // reuse the index or elements register for the value. 7334 // reuse the index or elements register for the value.
7264 // 7335 //
7265 // TODO(206): Consider whether it makes sense to try some 7336 // TODO(206): Consider whether it makes sense to try some
7266 // heuristic about which register to reuse. For example, if 7337 // heuristic about which register to reuse. For example, if
7267 // one is rax, the we can reuse that one because the value 7338 // one is rax, the we can reuse that one because the value
7268 // coming from the deferred code will be in rax. 7339 // coming from the deferred code will be in rax.
7269 Result value = index; 7340 __ movq(elements.reg(),
7270 __ movq(value.reg(),
7271 Operand(elements.reg(), 7341 Operand(elements.reg(),
7272 index.reg(), 7342 index.reg(),
7273 times_pointer_size, 7343 times_pointer_size,
7274 FixedArray::kHeaderSize - kHeapObjectTag)); 7344 FixedArray::kHeaderSize - kHeapObjectTag));
7345 result = elements;
7275 elements.Unuse(); 7346 elements.Unuse();
7276 index.Unuse(); 7347 index.Unuse();
7277 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); 7348 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
7278 deferred->Branch(equal); 7349 deferred->Branch(equal);
7279 __ IncrementCounter(&Counters::keyed_load_inline, 1); 7350 __ IncrementCounter(&Counters::keyed_load_inline, 1);
7280 7351
7281 deferred->BindExit(); 7352 deferred->BindExit();
7282 // Restore the receiver and key to the frame and push the
7283 // result on top of it.
7284 frame_->Push(&receiver); 7353 frame_->Push(&receiver);
7285 frame_->Push(&key); 7354 frame_->Push(&key);
7286 return value;
7287
7288 } else { 7355 } else {
7289 Comment cmnt(masm_, "[ Load from keyed Property"); 7356 Comment cmnt(masm_, "[ Load from keyed Property");
7290 RelocInfo::Mode mode = is_global 7357 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
7291 ? RelocInfo::CODE_TARGET_CONTEXT
7292 : RelocInfo::CODE_TARGET;
7293 Result answer = frame_->CallKeyedLoadIC(mode);
7294 // Make sure that we do not have a test instruction after the 7358 // Make sure that we do not have a test instruction after the
7295 // call. A test instruction after the call is used to 7359 // call. A test instruction after the call is used to
7296 // indicate that we have generated an inline version of the 7360 // indicate that we have generated an inline version of the
7297 // keyed load. The explicit nop instruction is here because 7361 // keyed load. The explicit nop instruction is here because
7298 // the push that follows might be peep-hole optimized away. 7362 // the push that follows might be peep-hole optimized away.
7299 __ nop(); 7363 __ nop();
7300 return answer;
7301 } 7364 }
7365 ASSERT(frame()->height() == original_height);
7366 return result;
7302 } 7367 }
7303 7368
7304 7369
7305 #undef __ 7370 #undef __
7306 #define __ ACCESS_MASM(masm) 7371 #define __ ACCESS_MASM(masm)
7307 7372
7308 7373
7309 Handle<String> Reference::GetName() { 7374 Handle<String> Reference::GetName() {
7310 ASSERT(type_ == NAMED); 7375 ASSERT(type_ == NAMED);
7311 Property* property = expression_->AsProperty(); 7376 Property* property = expression_->AsProperty();
(...skipping 22 matching lines...) Expand all
7334 if (property != NULL) { 7399 if (property != NULL) {
7335 cgen_->CodeForSourcePosition(property->position()); 7400 cgen_->CodeForSourcePosition(property->position());
7336 } 7401 }
7337 7402
7338 switch (type_) { 7403 switch (type_) {
7339 case SLOT: { 7404 case SLOT: {
7340 Comment cmnt(masm, "[ Load from Slot"); 7405 Comment cmnt(masm, "[ Load from Slot");
7341 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); 7406 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
7342 ASSERT(slot != NULL); 7407 ASSERT(slot != NULL);
7343 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); 7408 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
7409 if (!persist_after_get_) set_unloaded();
7344 break; 7410 break;
7345 } 7411 }
7346 7412
7347 case NAMED: { 7413 case NAMED: {
7348 Variable* var = expression_->AsVariableProxy()->AsVariable(); 7414 Variable* var = expression_->AsVariableProxy()->AsVariable();
7349 bool is_global = var != NULL; 7415 bool is_global = var != NULL;
7350 ASSERT(!is_global || var->is_global()); 7416 ASSERT(!is_global || var->is_global());
7351 7417 Result result = cgen_->EmitNamedLoad(GetName(), is_global);
7352 // Do not inline the inobject property case for loads from the global 7418 cgen_->frame()->Push(&result);
7353 // object. Also do not inline for unoptimized code. This saves time 7419 if (!persist_after_get_) {
7354 // in the code generator. Unoptimized code is toplevel code or code 7420 cgen_->UnloadReference(this);
7355 // that is not in a loop.
7356 if (is_global ||
7357 cgen_->scope()->is_global_scope() ||
7358 cgen_->loop_nesting() == 0) {
7359 Comment cmnt(masm, "[ Load from named Property");
7360 cgen_->frame()->Push(GetName());
7361
7362 RelocInfo::Mode mode = is_global
7363 ? RelocInfo::CODE_TARGET_CONTEXT
7364 : RelocInfo::CODE_TARGET;
7365 Result answer = cgen_->frame()->CallLoadIC(mode);
7366 // A test rax instruction following the call signals that the
7367 // inobject property case was inlined. Ensure that there is not
7368 // a test rax instruction here.
7369 __ nop();
7370 cgen_->frame()->Push(&answer);
7371 } else {
7372 // Inline the inobject property case.
7373 Comment cmnt(masm, "[ Inlined named property load");
7374 Result receiver = cgen_->frame()->Pop();
7375 receiver.ToRegister();
7376 Result value = cgen_->allocator()->Allocate();
7377 ASSERT(value.is_valid());
7378 // Cannot use r12 for receiver, because that changes
7379 // the distance between a call and a fixup location,
7380 // due to a special encoding of r12 as r/m in a ModR/M byte.
7381 if (receiver.reg().is(r12)) {
7382 // Swap receiver and value.
7383 __ movq(value.reg(), receiver.reg());
7384 Result temp = receiver;
7385 receiver = value;
7386 value = temp;
7387 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
7388 }
7389
7390 DeferredReferenceGetNamedValue* deferred =
7391 new DeferredReferenceGetNamedValue(value.reg(),
7392 receiver.reg(),
7393 GetName());
7394
7395 // Check that the receiver is a heap object.
7396 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7397
7398 __ bind(deferred->patch_site());
7399 // This is the map check instruction that will be patched (so we can't
7400 // use the double underscore macro that may insert instructions).
7401 // Initially use an invalid map to force a failure.
7402 masm->Move(kScratchRegister, Factory::null_value());
7403 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
7404 kScratchRegister);
7405 // This branch is always a forwards branch so it's always a fixed
7406 // size which allows the assert below to succeed and patching to work.
7407 // Don't use deferred->Branch(...), since that might add coverage code.
7408 masm->j(not_equal, deferred->entry_label());
7409
7410 // The delta from the patch label to the load offset must be
7411 // statically known.
7412 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
7413 LoadIC::kOffsetToLoadInstruction);
7414 // The initial (invalid) offset has to be large enough to force
7415 // a 32-bit instruction encoding to allow patching with an
7416 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
7417 int offset = kMaxInt;
7418 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
7419
7420 __ IncrementCounter(&Counters::named_load_inline, 1);
7421 deferred->BindExit();
7422 cgen_->frame()->Push(&receiver);
7423 cgen_->frame()->Push(&value);
7424 } 7421 }
7425 break; 7422 break;
7426 } 7423 }
7427 7424
7428 case KEYED: { 7425 case KEYED: {
7429 Comment cmnt(masm, "[ Load from keyed Property"); 7426 // A load of a bare identifier (load from global) cannot be keyed.
7430 Variable* var = expression_->AsVariableProxy()->AsVariable(); 7427 ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
7431 bool is_global = var != NULL;
7432 ASSERT(!is_global || var->is_global());
7433 7428
7434 Result value = cgen_->EmitKeyedLoad(is_global); 7429 Result value = cgen_->EmitKeyedLoad();
7435 cgen_->frame()->Push(&value); 7430 cgen_->frame()->Push(&value);
7431 if (!persist_after_get_) {
7432 cgen_->UnloadReference(this);
7433 }
7436 break; 7434 break;
7437 } 7435 }
7438 7436
7439 default: 7437 default:
7440 UNREACHABLE(); 7438 UNREACHABLE();
7441 } 7439 }
7442
7443 if (!persist_after_get_) {
7444 cgen_->UnloadReference(this);
7445 }
7446 } 7440 }
7447 7441
7448 7442
7449 void Reference::TakeValue() { 7443 void Reference::TakeValue() {
7450 // TODO(X64): This function is completely architecture independent. Move 7444 // TODO(X64): This function is completely architecture independent. Move
7451 // it somewhere shared. 7445 // it somewhere shared.
7452 7446
7453 // For non-constant frame-allocated slots, we invalidate the value in the 7447 // For non-constant frame-allocated slots, we invalidate the value in the
7454 // slot. For all others, we fall back on GetValue. 7448 // slot. For all others, we fall back on GetValue.
7455 ASSERT(!cgen_->in_spilled_code()); 7449 ASSERT(!cgen_->in_spilled_code());
(...skipping 4209 matching lines...) Expand 10 before | Expand all | Expand 10 after
11665 } 11659 }
11666 11660
11667 #endif 11661 #endif
11668 11662
11669 11663
11670 #undef __ 11664 #undef __
11671 11665
11672 } } // namespace v8::internal 11666 } } // namespace v8::internal
11673 11667
11674 #endif // V8_TARGET_ARCH_X64 11668 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698