Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(703)

Side by Side Diff: src/arm64/lithium-codegen-arm64.cc

Issue 378503003: ARM64: Use UntagSmi helpers and clean up assertions. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Replace one '32' with 'kWRegSizeInBits'. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/full-codegen-arm64.cc ('k') | src/arm64/macro-assembler-arm64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/arm64/lithium-codegen-arm64.h" 7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h" 8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h" 10 #include "src/hydrogen-osr.h"
(...skipping 3507 matching lines...) Expand 10 before | Expand all | Expand 10 after
3518 } 3518 }
3519 3519
3520 3520
3521 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, 3521 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3522 Register elements, 3522 Register elements,
3523 Register key, 3523 Register key,
3524 bool key_is_tagged, 3524 bool key_is_tagged,
3525 ElementsKind elements_kind, 3525 ElementsKind elements_kind,
3526 Representation representation, 3526 Representation representation,
3527 int base_offset) { 3527 int base_offset) {
3528 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); 3528 STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
3529 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3529 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3530 3530
3531 // Even though the HLoad/StoreKeyed instructions force the input 3531 // Even though the HLoad/StoreKeyed instructions force the input
3532 // representation for the key to be an integer, the input gets replaced during 3532 // representation for the key to be an integer, the input gets replaced during
3533 // bounds check elimination with the index argument to the bounds check, which 3533 // bounds check elimination with the index argument to the bounds check, which
3534 // can be tagged, so that case must be handled here, too. 3534 // can be tagged, so that case must be handled here, too.
3535 if (key_is_tagged) { 3535 if (key_is_tagged) {
3536 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); 3536 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3537 if (representation.IsInteger32()) { 3537 if (representation.IsInteger32()) {
3538 ASSERT(elements_kind == FAST_SMI_ELEMENTS); 3538 ASSERT(elements_kind == FAST_SMI_ELEMENTS);
3539 // Read or write only the most-significant 32 bits in the case of fast smi 3539 // Read or write only the smi payload in the case of fast smi arrays.
3540 // arrays.
3541 return UntagSmiMemOperand(base, base_offset); 3540 return UntagSmiMemOperand(base, base_offset);
3542 } else { 3541 } else {
3543 return MemOperand(base, base_offset); 3542 return MemOperand(base, base_offset);
3544 } 3543 }
3545 } else { 3544 } else {
3546 // Sign extend key because it could be a 32-bit negative value or contain 3545 // Sign extend key because it could be a 32-bit negative value or contain
3547 // garbage in the top 32-bits. The address computation happens in 64-bit. 3546 // garbage in the top 32-bits. The address computation happens in 64-bit.
3548 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); 3547 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3549 if (representation.IsInteger32()) { 3548 if (representation.IsInteger32()) {
3550 ASSERT(elements_kind == FAST_SMI_ELEMENTS); 3549 ASSERT(elements_kind == FAST_SMI_ELEMENTS);
3551 // Read or write only the most-significant 32 bits in the case of fast smi 3550 // Read or write only the smi payload in the case of fast smi arrays.
3552 // arrays.
3553 __ Add(base, elements, Operand(key, SXTW, element_size_shift)); 3551 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3554 return UntagSmiMemOperand(base, base_offset); 3552 return UntagSmiMemOperand(base, base_offset);
3555 } else { 3553 } else {
3556 __ Add(base, elements, base_offset); 3554 __ Add(base, elements, base_offset);
3557 return MemOperand(base, key, SXTW, element_size_shift); 3555 return MemOperand(base, key, SXTW, element_size_shift);
3558 } 3556 }
3559 } 3557 }
3560 } 3558 }
3561 3559
3562 3560
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
3605 MemOperand mem_op; 3603 MemOperand mem_op;
3606 3604
3607 Representation representation = instr->hydrogen()->representation(); 3605 Representation representation = instr->hydrogen()->representation();
3608 if (instr->key()->IsConstantOperand()) { 3606 if (instr->key()->IsConstantOperand()) {
3609 ASSERT(instr->temp() == NULL); 3607 ASSERT(instr->temp() == NULL);
3610 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3608 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3611 int offset = instr->base_offset() + 3609 int offset = instr->base_offset() +
3612 ToInteger32(const_operand) * kPointerSize; 3610 ToInteger32(const_operand) * kPointerSize;
3613 if (representation.IsInteger32()) { 3611 if (representation.IsInteger32()) {
3614 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 3612 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3615 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && 3613 STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
3616 (kSmiTag == 0));
3617 mem_op = UntagSmiMemOperand(elements, offset); 3614 mem_op = UntagSmiMemOperand(elements, offset);
3618 } else { 3615 } else {
3619 mem_op = MemOperand(elements, offset); 3616 mem_op = MemOperand(elements, offset);
3620 } 3617 }
3621 } else { 3618 } else {
3622 Register load_base = ToRegister(instr->temp()); 3619 Register load_base = ToRegister(instr->temp());
3623 Register key = ToRegister(instr->key()); 3620 Register key = ToRegister(instr->key());
3624 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 3621 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3625 3622
3626 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3623 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
3676 source = object; 3673 source = object;
3677 } else { 3674 } else {
3678 // Load the properties array, using result as a scratch register. 3675 // Load the properties array, using result as a scratch register.
3679 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3676 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3680 source = result; 3677 source = result;
3681 } 3678 }
3682 3679
3683 if (access.representation().IsSmi() && 3680 if (access.representation().IsSmi() &&
3684 instr->hydrogen()->representation().IsInteger32()) { 3681 instr->hydrogen()->representation().IsInteger32()) {
3685 // Read int value directly from upper half of the smi. 3682 // Read int value directly from upper half of the smi.
3686 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); 3683 STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
3687 __ Load(result, UntagSmiFieldMemOperand(source, offset), 3684 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3688 Representation::Integer32()); 3685 Representation::Integer32());
3689 } else { 3686 } else {
3690 __ Load(result, FieldMemOperand(source, offset), access.representation()); 3687 __ Load(result, FieldMemOperand(source, offset), access.representation());
3691 } 3688 }
3692 } 3689 }
3693 3690
3694 3691
3695 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3692 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3696 ASSERT(ToRegister(instr->context()).is(cp)); 3693 ASSERT(ToRegister(instr->context()).is(cp));
(...skipping 1582 matching lines...) Expand 10 before | Expand all | Expand 10 after
5279 5276
5280 Representation representation = instr->hydrogen()->value()->representation(); 5277 Representation representation = instr->hydrogen()->value()->representation();
5281 if (instr->key()->IsConstantOperand()) { 5278 if (instr->key()->IsConstantOperand()) {
5282 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 5279 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5283 int offset = instr->base_offset() + 5280 int offset = instr->base_offset() +
5284 ToInteger32(const_operand) * kPointerSize; 5281 ToInteger32(const_operand) * kPointerSize;
5285 store_base = elements; 5282 store_base = elements;
5286 if (representation.IsInteger32()) { 5283 if (representation.IsInteger32()) {
5287 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); 5284 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5288 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 5285 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5289 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && 5286 STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
5290 (kSmiTag == 0));
5291 mem_op = UntagSmiMemOperand(store_base, offset); 5287 mem_op = UntagSmiMemOperand(store_base, offset);
5292 } else { 5288 } else {
5293 mem_op = MemOperand(store_base, offset); 5289 mem_op = MemOperand(store_base, offset);
5294 } 5290 }
5295 } else { 5291 } else {
5296 store_base = scratch; 5292 store_base = scratch;
5297 key = ToRegister(instr->key()); 5293 key = ToRegister(instr->key());
5298 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 5294 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5299 5295
5300 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, 5296 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
5399 Register temp0 = ToRegister(instr->temp0()); 5395 Register temp0 = ToRegister(instr->temp0());
5400 __ Ldr(temp0, FieldMemOperand(destination, offset)); 5396 __ Ldr(temp0, FieldMemOperand(destination, offset));
5401 __ AssertSmi(temp0); 5397 __ AssertSmi(temp0);
5402 // If destination aliased temp0, restore it to the address calculated 5398 // If destination aliased temp0, restore it to the address calculated
5403 // earlier. 5399 // earlier.
5404 if (destination.Is(temp0)) { 5400 if (destination.Is(temp0)) {
5405 ASSERT(!access.IsInobject()); 5401 ASSERT(!access.IsInobject());
5406 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5402 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5407 } 5403 }
5408 #endif 5404 #endif
5409 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); 5405 STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
5410 __ Store(value, UntagSmiFieldMemOperand(destination, offset), 5406 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5411 Representation::Integer32()); 5407 Representation::Integer32());
5412 } else { 5408 } else {
5413 __ Store(value, FieldMemOperand(destination, offset), representation); 5409 __ Store(value, FieldMemOperand(destination, offset), representation);
5414 } 5410 }
5415 if (instr->hydrogen()->NeedsWriteBarrier()) { 5411 if (instr->hydrogen()->NeedsWriteBarrier()) {
5416 __ RecordWriteField(destination, 5412 __ RecordWriteField(destination,
5417 offset, 5413 offset,
5418 value, // Clobbered. 5414 value, // Clobbered.
5419 ToRegister(instr->temp1()), // Clobbered. 5415 ToRegister(instr->temp1()), // Clobbered.
(...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after
6042 Handle<ScopeInfo> scope_info = instr->scope_info(); 6038 Handle<ScopeInfo> scope_info = instr->scope_info();
6043 __ Push(scope_info); 6039 __ Push(scope_info);
6044 __ Push(ToRegister(instr->function())); 6040 __ Push(ToRegister(instr->function()));
6045 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6041 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6046 RecordSafepoint(Safepoint::kNoLazyDeopt); 6042 RecordSafepoint(Safepoint::kNoLazyDeopt);
6047 } 6043 }
6048 6044
6049 6045
6050 6046
6051 } } // namespace v8::internal 6047 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm64/full-codegen-arm64.cc ('k') | src/arm64/macro-assembler-arm64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698