Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Side by Side Diff: src/x64/lithium-codegen-x64.cc

Issue 228073004: Guard 32-bit SMI load/store optimization with SmiValuesAre32Bits predicate. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2878 matching lines...) Expand 10 before | Expand all | Expand 10 after
2889 return; 2889 return;
2890 } 2890 }
2891 2891
2892 Register result = ToRegister(instr->result()); 2892 Register result = ToRegister(instr->result());
2893 if (!access.IsInobject()) { 2893 if (!access.IsInobject()) {
2894 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2894 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2895 object = result; 2895 object = result;
2896 } 2896 }
2897 2897
2898 Representation representation = access.representation(); 2898 Representation representation = access.representation();
2899 if (representation.IsSmi() && 2899 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2900 instr->hydrogen()->representation().IsInteger32()) { 2900 instr->hydrogen()->representation().IsInteger32()) {
2901 #ifdef DEBUG 2901 #ifdef DEBUG
2902 Register scratch = kScratchRegister; 2902 Register scratch = kScratchRegister;
2903 __ Load(scratch, FieldOperand(object, offset), representation); 2903 __ Load(scratch, FieldOperand(object, offset), representation);
2904 __ AssertSmi(scratch); 2904 __ AssertSmi(scratch);
2905 #endif 2905 #endif
2906 2906
2907 // Read int value directly from upper half of the smi. 2907 // Read int value directly from upper half of the smi.
2908 STATIC_ASSERT(kSmiTag == 0); 2908 STATIC_ASSERT(kSmiTag == 0);
2909 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 2909 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2910 offset += kPointerSize / 2; 2910 offset += kPointerSize / 2;
2911 representation = Representation::Integer32(); 2911 representation = Representation::Integer32();
2912 } 2912 }
2913 __ Load(result, FieldOperand(object, offset), representation); 2913 __ Load(result, FieldOperand(object, offset), representation);
2914 } 2914 }
2915 2915
2916 2916
2917 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2917 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2918 ASSERT(ToRegister(instr->context()).is(rsi)); 2918 ASSERT(ToRegister(instr->context()).is(rsi));
2919 ASSERT(ToRegister(instr->object()).is(rax)); 2919 ASSERT(ToRegister(instr->object()).is(rax));
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
3103 3103
3104 3104
3105 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3105 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3106 HLoadKeyed* hinstr = instr->hydrogen(); 3106 HLoadKeyed* hinstr = instr->hydrogen();
3107 Register result = ToRegister(instr->result()); 3107 Register result = ToRegister(instr->result());
3108 LOperand* key = instr->key(); 3108 LOperand* key = instr->key();
3109 bool requires_hole_check = hinstr->RequiresHoleCheck(); 3109 bool requires_hole_check = hinstr->RequiresHoleCheck();
3110 int offset = FixedArray::kHeaderSize - kHeapObjectTag; 3110 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3111 Representation representation = hinstr->representation(); 3111 Representation representation = hinstr->representation();
3112 3112
3113 if (representation.IsInteger32() && 3113 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3114 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 3114 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3115 ASSERT(!requires_hole_check); 3115 ASSERT(!requires_hole_check);
3116 #ifdef DEBUG 3116 #ifdef DEBUG
3117 Register scratch = kScratchRegister; 3117 Register scratch = kScratchRegister;
3118 __ Load(scratch, 3118 __ Load(scratch,
3119 BuildFastArrayOperand(instr->elements(), 3119 BuildFastArrayOperand(instr->elements(),
3120 key, 3120 key,
3121 FAST_ELEMENTS, 3121 FAST_ELEMENTS,
3122 offset, 3122 offset,
3123 instr->additional_index()), 3123 instr->additional_index()),
3124 Representation::Smi()); 3124 Representation::Smi());
3125 __ AssertSmi(scratch); 3125 __ AssertSmi(scratch);
3126 #endif 3126 #endif
3127 // Read int value directly from upper half of the smi. 3127 // Read int value directly from upper half of the smi.
3128 STATIC_ASSERT(kSmiTag == 0); 3128 STATIC_ASSERT(kSmiTag == 0);
3129 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 3129 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3130 offset += kPointerSize / 2; 3130 offset += kPointerSize / 2;
3131 } 3131 }
3132 3132
3133 __ Load(result, 3133 __ Load(result,
3134 BuildFastArrayOperand(instr->elements(), 3134 BuildFastArrayOperand(instr->elements(),
3135 key, 3135 key,
3136 FAST_ELEMENTS, 3136 FAST_ELEMENTS,
3137 offset, 3137 offset,
3138 instr->additional_index()), 3138 instr->additional_index()),
3139 representation); 3139 representation);
(...skipping 887 matching lines...) Expand 10 before | Expand all | Expand 10 after
4027 } 4027 }
4028 } 4028 }
4029 4029
4030 // Do the store. 4030 // Do the store.
4031 Register write_register = object; 4031 Register write_register = object;
4032 if (!access.IsInobject()) { 4032 if (!access.IsInobject()) {
4033 write_register = ToRegister(instr->temp()); 4033 write_register = ToRegister(instr->temp());
4034 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 4034 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4035 } 4035 }
4036 4036
4037 if (representation.IsSmi() && 4037 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4038 hinstr->value()->representation().IsInteger32()) { 4038 hinstr->value()->representation().IsInteger32()) {
4039 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4039 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4040 #ifdef DEBUG 4040 #ifdef DEBUG
4041 Register scratch = kScratchRegister; 4041 Register scratch = kScratchRegister;
4042 __ Load(scratch, FieldOperand(write_register, offset), representation); 4042 __ Load(scratch, FieldOperand(write_register, offset), representation);
4043 __ AssertSmi(scratch); 4043 __ AssertSmi(scratch);
4044 #endif 4044 #endif
4045 // Store int value directly to upper half of the smi. 4045 // Store int value directly to upper half of the smi.
4046 STATIC_ASSERT(kSmiTag == 0); 4046 STATIC_ASSERT(kSmiTag == 0);
4047 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 4047 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4048 offset += kPointerSize / 2; 4048 offset += kPointerSize / 2;
4049 representation = Representation::Integer32(); 4049 representation = Representation::Integer32();
4050 } 4050 }
4051 4051
4052 Operand operand = FieldOperand(write_register, offset); 4052 Operand operand = FieldOperand(write_register, offset);
4053 4053
4054 if (instr->value()->IsRegister()) { 4054 if (instr->value()->IsRegister()) {
4055 Register value = ToRegister(instr->value()); 4055 Register value = ToRegister(instr->value());
4056 __ Store(operand, value, representation); 4056 __ Store(operand, value, representation);
4057 } else { 4057 } else {
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
4250 __ movsd(double_store_operand, value); 4250 __ movsd(double_store_operand, value);
4251 } 4251 }
4252 4252
4253 4253
4254 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4254 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4255 HStoreKeyed* hinstr = instr->hydrogen(); 4255 HStoreKeyed* hinstr = instr->hydrogen();
4256 LOperand* key = instr->key(); 4256 LOperand* key = instr->key();
4257 int offset = FixedArray::kHeaderSize - kHeapObjectTag; 4257 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
4258 Representation representation = hinstr->value()->representation(); 4258 Representation representation = hinstr->value()->representation();
4259 4259
4260 if (representation.IsInteger32()) { 4260 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4261 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4261 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4262 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4262 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4263 #ifdef DEBUG 4263 #ifdef DEBUG
4264 Register scratch = kScratchRegister; 4264 Register scratch = kScratchRegister;
4265 __ Load(scratch, 4265 __ Load(scratch,
4266 BuildFastArrayOperand(instr->elements(), 4266 BuildFastArrayOperand(instr->elements(),
4267 key, 4267 key,
4268 FAST_ELEMENTS, 4268 FAST_ELEMENTS,
4269 offset, 4269 offset,
4270 instr->additional_index()), 4270 instr->additional_index()),
4271 Representation::Smi()); 4271 Representation::Smi());
4272 __ AssertSmi(scratch); 4272 __ AssertSmi(scratch);
4273 #endif 4273 #endif
4274 // Store int value directly to upper half of the smi. 4274 // Store int value directly to upper half of the smi.
4275 STATIC_ASSERT(kSmiTag == 0); 4275 STATIC_ASSERT(kSmiTag == 0);
4276 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 4276 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4277 offset += kPointerSize / 2; 4277 offset += kPointerSize / 2;
4278 } 4278 }
4279 4279
4280 Operand operand = 4280 Operand operand =
4281 BuildFastArrayOperand(instr->elements(), 4281 BuildFastArrayOperand(instr->elements(),
4282 key, 4282 key,
4283 FAST_ELEMENTS, 4283 FAST_ELEMENTS,
4284 offset, 4284 offset,
4285 instr->additional_index()); 4285 instr->additional_index());
4286 4286
(...skipping 1394 matching lines...) Expand 10 before | Expand all | Expand 10 after
5681 __ bind(deferred->exit()); 5681 __ bind(deferred->exit());
5682 __ bind(&done); 5682 __ bind(&done);
5683 } 5683 }
5684 5684
5685 5685
5686 #undef __ 5686 #undef __
5687 5687
5688 } } // namespace v8::internal 5688 } } // namespace v8::internal
5689 5689
5690 #endif // V8_TARGET_ARCH_X64 5690 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698