Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1084)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 6723014: Avoid TLS access for counters. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix lint Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4964 matching lines...) Expand 10 before | Expand all | Expand 10 after
4975 4975
4976 frame_->Push(&literals); 4976 frame_->Push(&literals);
4977 frame_->Push(Smi::FromInt(node->literal_index())); 4977 frame_->Push(Smi::FromInt(node->literal_index()));
4978 frame_->Push(node->constant_elements()); 4978 frame_->Push(node->constant_elements());
4979 int length = node->values()->length(); 4979 int length = node->values()->length();
4980 Result clone; 4980 Result clone;
4981 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) { 4981 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
4982 FastCloneShallowArrayStub stub( 4982 FastCloneShallowArrayStub stub(
4983 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); 4983 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
4984 clone = frame_->CallStub(&stub, 3); 4984 clone = frame_->CallStub(&stub, 3);
4985 __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1); 4985 Counters* counters = masm()->isolate()->counters();
4986 __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
4986 } else if (node->depth() > 1) { 4987 } else if (node->depth() > 1) {
4987 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); 4988 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
4988 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 4989 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4989 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); 4990 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
4990 } else { 4991 } else {
4991 FastCloneShallowArrayStub stub( 4992 FastCloneShallowArrayStub stub(
4992 FastCloneShallowArrayStub::CLONE_ELEMENTS, length); 4993 FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
4993 clone = frame_->CallStub(&stub, 3); 4994 clone = frame_->CallStub(&stub, 3);
4994 } 4995 }
4995 frame_->Push(&clone); 4996 frame_->Push(&clone);
(...skipping 3008 matching lines...) Expand 10 before | Expand all | Expand 10 after
8004 // The call must be followed by a test rax instruction to indicate 8005 // The call must be followed by a test rax instruction to indicate
8005 // that the inobject property case was inlined. 8006 // that the inobject property case was inlined.
8006 // 8007 //
8007 // Store the delta to the map check instruction here in the test 8008 // Store the delta to the map check instruction here in the test
8008 // instruction. Use masm_-> instead of the __ macro since the 8009 // instruction. Use masm_-> instead of the __ macro since the
8009 // latter can't return a value. 8010 // latter can't return a value.
8010 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 8011 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
8011 // Here we use masm_-> instead of the __ macro because this is the 8012 // Here we use masm_-> instead of the __ macro because this is the
8012 // instruction that gets patched and coverage code gets in the way. 8013 // instruction that gets patched and coverage code gets in the way.
8013 masm_->testl(rax, Immediate(-delta_to_patch_site)); 8014 masm_->testl(rax, Immediate(-delta_to_patch_site));
8014 __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1); 8015 Counters* counters = masm()->isolate()->counters();
8016 __ IncrementCounter(counters->named_load_inline_miss(), 1);
8015 8017
8016 if (!dst_.is(rax)) __ movq(dst_, rax); 8018 if (!dst_.is(rax)) __ movq(dst_, rax);
8017 } 8019 }
8018 8020
8019 8021
8020 class DeferredReferenceGetKeyedValue: public DeferredCode { 8022 class DeferredReferenceGetKeyedValue: public DeferredCode {
8021 public: 8023 public:
8022 explicit DeferredReferenceGetKeyedValue(Register dst, 8024 explicit DeferredReferenceGetKeyedValue(Register dst,
8023 Register receiver, 8025 Register receiver,
8024 Register key) 8026 Register key)
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
8072 // macro because the macro sometimes uses macro expansion to turn 8074 // macro because the macro sometimes uses macro expansion to turn
8073 // into something that can't return a value. This is encountered 8075 // into something that can't return a value. This is encountered
8074 // when doing generated code coverage tests. 8076 // when doing generated code coverage tests.
8075 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 8077 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
8076 // Here we use masm_-> instead of the __ macro because this is the 8078 // Here we use masm_-> instead of the __ macro because this is the
8077 // instruction that gets patched and coverage code gets in the way. 8079 // instruction that gets patched and coverage code gets in the way.
8078 // TODO(X64): Consider whether it's worth switching the test to a 8080 // TODO(X64): Consider whether it's worth switching the test to a
8079 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't 8081 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
8080 // be generated normally. 8082 // be generated normally.
8081 masm_->testl(rax, Immediate(-delta_to_patch_site)); 8083 masm_->testl(rax, Immediate(-delta_to_patch_site));
8082 __ IncrementCounter(COUNTERS->keyed_load_inline_miss(), 1); 8084 Counters* counters = masm()->isolate()->counters();
8085 __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
8083 8086
8084 if (!dst_.is(rax)) __ movq(dst_, rax); 8087 if (!dst_.is(rax)) __ movq(dst_, rax);
8085 } 8088 }
8086 8089
8087 8090
8088 class DeferredReferenceSetKeyedValue: public DeferredCode { 8091 class DeferredReferenceSetKeyedValue: public DeferredCode {
8089 public: 8092 public:
8090 DeferredReferenceSetKeyedValue(Register value, 8093 DeferredReferenceSetKeyedValue(Register value,
8091 Register key, 8094 Register key,
8092 Register receiver, 8095 Register receiver,
(...skipping 12 matching lines...) Expand all
8105 private: 8108 private:
8106 Register value_; 8109 Register value_;
8107 Register key_; 8110 Register key_;
8108 Register receiver_; 8111 Register receiver_;
8109 Label patch_site_; 8112 Label patch_site_;
8110 StrictModeFlag strict_mode_; 8113 StrictModeFlag strict_mode_;
8111 }; 8114 };
8112 8115
8113 8116
8114 void DeferredReferenceSetKeyedValue::Generate() { 8117 void DeferredReferenceSetKeyedValue::Generate() {
8115 __ IncrementCounter(COUNTERS->keyed_store_inline_miss(), 1); 8118 Counters* counters = masm()->isolate()->counters();
8119 __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
8116 // Move value, receiver, and key to registers rax, rdx, and rcx, as 8120 // Move value, receiver, and key to registers rax, rdx, and rcx, as
8117 // the IC stub expects. 8121 // the IC stub expects.
8118 // Move value to rax, using xchg if the receiver or key is in rax. 8122 // Move value to rax, using xchg if the receiver or key is in rax.
8119 if (!value_.is(rax)) { 8123 if (!value_.is(rax)) {
8120 if (!receiver_.is(rax) && !key_.is(rax)) { 8124 if (!receiver_.is(rax) && !key_.is(rax)) {
8121 __ movq(rax, value_); 8125 __ movq(rax, value_);
8122 } else { 8126 } else {
8123 __ xchg(rax, value_); 8127 __ xchg(rax, value_);
8124 // Update receiver_ and key_ if they are affected by the swap. 8128 // Update receiver_ and key_ if they are affected by the swap.
8125 if (receiver_.is(rax)) { 8129 if (receiver_.is(rax)) {
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
8240 // The delta from the patch label to the load offset must be 8244 // The delta from the patch label to the load offset must be
8241 // statically known. 8245 // statically known.
8242 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == 8246 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
8243 LoadIC::kOffsetToLoadInstruction); 8247 LoadIC::kOffsetToLoadInstruction);
8244 // The initial (invalid) offset has to be large enough to force 8248 // The initial (invalid) offset has to be large enough to force
8245 // a 32-bit instruction encoding to allow patching with an 8249 // a 32-bit instruction encoding to allow patching with an
8246 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag). 8250 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
8247 int offset = kMaxInt; 8251 int offset = kMaxInt;
8248 masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset)); 8252 masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
8249 8253
8250 __ IncrementCounter(COUNTERS->named_load_inline(), 1); 8254 Counters* counters = masm()->isolate()->counters();
8255 __ IncrementCounter(counters->named_load_inline(), 1);
8251 deferred->BindExit(); 8256 deferred->BindExit();
8252 } 8257 }
8253 ASSERT(frame()->height() == original_height - 1); 8258 ASSERT(frame()->height() == original_height - 1);
8254 return result; 8259 return result;
8255 } 8260 }
8256 8261
8257 8262
8258 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { 8263 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
8259 #ifdef DEBUG 8264 #ifdef DEBUG
8260 int expected_height = frame()->height() - (is_contextual ? 1 : 2); 8265 int expected_height = frame()->height() - (is_contextual ? 1 : 2);
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after
8447 SmiIndex index = 8452 SmiIndex index =
8448 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); 8453 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
8449 __ movq(elements.reg(), 8454 __ movq(elements.reg(),
8450 FieldOperand(elements.reg(), 8455 FieldOperand(elements.reg(),
8451 index.reg, 8456 index.reg,
8452 index.scale, 8457 index.scale,
8453 FixedArray::kHeaderSize)); 8458 FixedArray::kHeaderSize));
8454 result = elements; 8459 result = elements;
8455 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex); 8460 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
8456 deferred->Branch(equal); 8461 deferred->Branch(equal);
8457 __ IncrementCounter(COUNTERS->keyed_load_inline(), 1); 8462 Counters* counters = masm()->isolate()->counters();
8463 __ IncrementCounter(counters->keyed_load_inline(), 1);
8458 8464
8459 deferred->BindExit(); 8465 deferred->BindExit();
8460 } else { 8466 } else {
8461 Comment cmnt(masm_, "[ Load from keyed Property"); 8467 Comment cmnt(masm_, "[ Load from keyed Property");
8462 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET); 8468 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
8463 // Make sure that we do not have a test instruction after the 8469 // Make sure that we do not have a test instruction after the
8464 // call. A test instruction after the call is used to 8470 // call. A test instruction after the call is used to
8465 // indicate that we have generated an inline version of the 8471 // indicate that we have generated an inline version of the
8466 // keyed load. The explicit nop instruction is here because 8472 // keyed load. The explicit nop instruction is here because
8467 // the push that follows might be peep-hole optimized away. 8473 // the push that follows might be peep-hole optimized away.
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
8558 deferred->Branch(below_equal); 8564 deferred->Branch(below_equal);
8559 8565
8560 // Store the value. 8566 // Store the value.
8561 SmiIndex index = 8567 SmiIndex index =
8562 masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); 8568 masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
8563 __ movq(FieldOperand(tmp.reg(), 8569 __ movq(FieldOperand(tmp.reg(),
8564 index.reg, 8570 index.reg,
8565 index.scale, 8571 index.scale,
8566 FixedArray::kHeaderSize), 8572 FixedArray::kHeaderSize),
8567 result.reg()); 8573 result.reg());
8568 __ IncrementCounter(COUNTERS->keyed_store_inline(), 1); 8574 Counters* counters = masm()->isolate()->counters();
8575 __ IncrementCounter(counters->keyed_store_inline(), 1);
8569 8576
8570 deferred->BindExit(); 8577 deferred->BindExit();
8571 } else { 8578 } else {
8572 result = frame()->CallKeyedStoreIC(strict_mode_flag()); 8579 result = frame()->CallKeyedStoreIC(strict_mode_flag());
8573 // Make sure that we do not have a test instruction after the 8580 // Make sure that we do not have a test instruction after the
8574 // call. A test instruction after the call is used to 8581 // call. A test instruction after the call is used to
8575 // indicate that we have generated an inline version of the 8582 // indicate that we have generated an inline version of the
8576 // keyed store. 8583 // keyed store.
8577 __ nop(); 8584 __ nop();
8578 } 8585 }
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
8841 } 8848 }
8842 8849
8843 #endif 8850 #endif
8844 8851
8845 8852
8846 #undef __ 8853 #undef __
8847 8854
8848 } } // namespace v8::internal 8855 } } // namespace v8::internal
8849 8856
8850 #endif // V8_TARGET_ARCH_X64 8857 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698