Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(105)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 153913002: A64: Synchronize with r16756. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1365 matching lines...) Expand 10 before | Expand all | Expand 10 after
1376 // it gets implemented. 1376 // it gets implemented.
1377 __ mul(scratch, result, ip); 1377 __ mul(scratch, result, ip);
1378 __ sub(remainder, dividend, scratch); 1378 __ sub(remainder, dividend, scratch);
1379 } 1379 }
1380 } 1380 }
1381 } 1381 }
1382 1382
1383 1383
1384 void LCodeGen::DoDivI(LDivI* instr) { 1384 void LCodeGen::DoDivI(LDivI* instr) {
1385 if (instr->hydrogen()->HasPowerOf2Divisor()) { 1385 if (instr->hydrogen()->HasPowerOf2Divisor()) {
1386 Register dividend = ToRegister(instr->left()); 1386 const Register dividend = ToRegister(instr->left());
1387 const Register result = ToRegister(instr->result());
1387 int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant(); 1388 int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1388 int32_t test_value = 0; 1389 int32_t test_value = 0;
1389 int32_t power = 0; 1390 int32_t power = 0;
1390 1391
1391 if (divisor > 0) { 1392 if (divisor > 0) {
1392 test_value = divisor - 1; 1393 test_value = divisor - 1;
1393 power = WhichPowerOf2(divisor); 1394 power = WhichPowerOf2(divisor);
1394 } else { 1395 } else {
1395 // Check for (0 / -x) that will produce negative zero. 1396 // Check for (0 / -x) that will produce negative zero.
1396 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1397 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1397 __ tst(dividend, Operand(dividend)); 1398 __ cmp(dividend, Operand::Zero());
1398 DeoptimizeIf(eq, instr->environment()); 1399 DeoptimizeIf(eq, instr->environment());
1399 } 1400 }
1400 // Check for (kMinInt / -1). 1401 // Check for (kMinInt / -1).
1401 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1402 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1402 __ cmp(dividend, Operand(kMinInt)); 1403 __ cmp(dividend, Operand(kMinInt));
1403 DeoptimizeIf(eq, instr->environment()); 1404 DeoptimizeIf(eq, instr->environment());
1404 } 1405 }
1405 test_value = - divisor - 1; 1406 test_value = - divisor - 1;
1406 power = WhichPowerOf2(-divisor); 1407 power = WhichPowerOf2(-divisor);
1407 } 1408 }
1408 1409
1409 if (test_value != 0) { 1410 if (test_value != 0) {
1410 if (instr->hydrogen()->CheckFlag( 1411 if (instr->hydrogen()->CheckFlag(
1411 HInstruction::kAllUsesTruncatingToInt32)) { 1412 HInstruction::kAllUsesTruncatingToInt32)) {
1412 __ cmp(dividend, Operand(0)); 1413 __ sub(result, dividend, Operand::Zero(), SetCC);
1413 __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); 1414 __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1414 __ mov(dividend, Operand(dividend, ASR, power)); 1415 __ mov(result, Operand(result, ASR, power));
1415 if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); 1416 if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1416 if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt); 1417 if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
1417 return; // Don't fall through to "__ rsb" below. 1418 return; // Don't fall through to "__ rsb" below.
1418 } else { 1419 } else {
1419 // Deoptimize if remainder is not 0. 1420 // Deoptimize if remainder is not 0.
1420 __ tst(dividend, Operand(test_value)); 1421 __ tst(dividend, Operand(test_value));
1421 DeoptimizeIf(ne, instr->environment()); 1422 DeoptimizeIf(ne, instr->environment());
1422 __ mov(dividend, Operand(dividend, ASR, power)); 1423 __ mov(result, Operand(dividend, ASR, power));
1424 if (divisor < 0) __ rsb(result, result, Operand(0));
1425 }
1426 } else {
1427 if (divisor < 0) {
1428 __ rsb(result, dividend, Operand(0));
1429 } else {
1430 __ Move(result, dividend);
1423 } 1431 }
1424 } 1432 }
1425 if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
1426 1433
1427 return; 1434 return;
1428 } 1435 }
1429 1436
1430 const Register left = ToRegister(instr->left()); 1437 const Register left = ToRegister(instr->left());
1431 const Register right = ToRegister(instr->right()); 1438 const Register right = ToRegister(instr->right());
1432 const Register result = ToRegister(instr->result()); 1439 const Register result = ToRegister(instr->result());
1433 1440
1434 // Check for x / 0. 1441 // Check for x / 0.
1435 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 1442 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1436 __ cmp(right, Operand::Zero()); 1443 __ cmp(right, Operand::Zero());
1437 DeoptimizeIf(eq, instr->environment()); 1444 DeoptimizeIf(eq, instr->environment());
1438 } 1445 }
1439 1446
1440 // Check for (0 / -x) that will produce negative zero. 1447 // Check for (0 / -x) that will produce negative zero.
1441 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1448 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1442 Label left_not_zero; 1449 Label positive;
1450 if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1451 // Do the test only if it hadn't be done above.
1452 __ cmp(right, Operand::Zero());
1453 }
1454 __ b(pl, &positive);
1443 __ cmp(left, Operand::Zero()); 1455 __ cmp(left, Operand::Zero());
1444 __ b(ne, &left_not_zero); 1456 DeoptimizeIf(eq, instr->environment());
1445 __ cmp(right, Operand::Zero()); 1457 __ bind(&positive);
1446 DeoptimizeIf(mi, instr->environment());
1447 __ bind(&left_not_zero);
1448 } 1458 }
1449 1459
1450 // Check for (kMinInt / -1). 1460 // Check for (kMinInt / -1).
1451 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1461 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1452 Label left_not_min_int; 1462 Label left_not_min_int;
1453 __ cmp(left, Operand(kMinInt)); 1463 __ cmp(left, Operand(kMinInt));
1454 __ b(ne, &left_not_min_int); 1464 __ b(ne, &left_not_min_int);
1455 __ cmp(right, Operand(-1)); 1465 __ cmp(right, Operand(-1));
1456 DeoptimizeIf(eq, instr->environment()); 1466 DeoptimizeIf(eq, instr->environment());
1457 __ bind(&left_not_min_int); 1467 __ bind(&left_not_min_int);
(...skipping 510 matching lines...) Expand 10 before | Expand all | Expand 10 after
1968 __ PrepareCallCFunction(2, scratch); 1978 __ PrepareCallCFunction(2, scratch);
1969 __ mov(r1, Operand(index)); 1979 __ mov(r1, Operand(index));
1970 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1980 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1971 __ bind(&done); 1981 __ bind(&done);
1972 } 1982 }
1973 } 1983 }
1974 1984
1975 1985
1976 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1986 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1977 Register string = ToRegister(instr->string()); 1987 Register string = ToRegister(instr->string());
1978 Register index = ToRegister(instr->index()); 1988 LOperand* index_op = instr->index();
1979 Register value = ToRegister(instr->value()); 1989 Register value = ToRegister(instr->value());
1990 Register scratch = scratch0();
1980 String::Encoding encoding = instr->encoding(); 1991 String::Encoding encoding = instr->encoding();
1981 1992
1982 if (FLAG_debug_code) { 1993 if (FLAG_debug_code) {
1983 __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); 1994 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1984 __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); 1995 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1985 1996
1986 __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); 1997 __ and_(scratch, scratch,
1998 Operand(kStringRepresentationMask | kStringEncodingMask));
1987 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1999 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1988 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 2000 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1989 __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING 2001 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1990 ? one_byte_seq_type : two_byte_seq_type)); 2002 ? one_byte_seq_type : two_byte_seq_type));
1991 __ Check(eq, kUnexpectedStringType); 2003 __ Check(eq, kUnexpectedStringType);
1992 } 2004 }
1993 2005
1994 __ add(ip, 2006 if (index_op->IsConstantOperand()) {
1995 string, 2007 int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1996 Operand(SeqString::kHeaderSize - kHeapObjectTag)); 2008 if (encoding == String::ONE_BYTE_ENCODING) {
1997 if (encoding == String::ONE_BYTE_ENCODING) { 2009 __ strb(value,
1998 __ strb(value, MemOperand(ip, index)); 2010 FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
2011 } else {
2012 __ strh(value,
2013 FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
2014 }
1999 } else { 2015 } else {
2000 // MemOperand with ip as the base register is not allowed for strh, so 2016 Register index = ToRegister(index_op);
2001 // we do the address calculation explicitly. 2017 if (encoding == String::ONE_BYTE_ENCODING) {
2002 __ add(ip, ip, Operand(index, LSL, 1)); 2018 __ add(scratch, string, Operand(index));
2003 __ strh(value, MemOperand(ip)); 2019 __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
2020 } else {
2021 __ add(scratch, string, Operand(index, LSL, 1));
2022 __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
2023 }
2004 } 2024 }
2005 } 2025 }
2006 2026
2007 2027
2008 void LCodeGen::DoThrow(LThrow* instr) { 2028 void LCodeGen::DoThrow(LThrow* instr) {
2009 Register input_reg = EmitLoadRegister(instr->value(), ip); 2029 Register input_reg = EmitLoadRegister(instr->value(), ip);
2010 __ push(input_reg); 2030 __ push(input_reg);
2011 CallRuntime(Runtime::kThrow, 1, instr); 2031 CallRuntime(Runtime::kThrow, 1, instr);
2012 2032
2013 if (FLAG_debug_code) { 2033 if (FLAG_debug_code) {
(...skipping 1870 matching lines...) Expand 10 before | Expand all | Expand 10 after
3884 __ CallStub(&stub); 3904 __ CallStub(&stub);
3885 } else { 3905 } else {
3886 ASSERT(exponent_type.IsDouble()); 3906 ASSERT(exponent_type.IsDouble());
3887 MathPowStub stub(MathPowStub::DOUBLE); 3907 MathPowStub stub(MathPowStub::DOUBLE);
3888 __ CallStub(&stub); 3908 __ CallStub(&stub);
3889 } 3909 }
3890 } 3910 }
3891 3911
3892 3912
3893 void LCodeGen::DoRandom(LRandom* instr) { 3913 void LCodeGen::DoRandom(LRandom* instr) {
3894 class DeferredDoRandom V8_FINAL : public LDeferredCode { 3914 // Assert that the register size is indeed the size of each seed.
3895 public:
3896 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3897 : LDeferredCode(codegen), instr_(instr) { }
3898 virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredRandom(instr_); }
3899 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3900 private:
3901 LRandom* instr_;
3902 };
3903
3904 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3905
3906 // Having marked this instruction as a call we can use any
3907 // registers.
3908 ASSERT(ToDoubleRegister(instr->result()).is(d7));
3909 ASSERT(ToRegister(instr->global_object()).is(r0));
3910
3911 static const int kSeedSize = sizeof(uint32_t); 3915 static const int kSeedSize = sizeof(uint32_t);
3912 STATIC_ASSERT(kPointerSize == kSeedSize); 3916 STATIC_ASSERT(kPointerSize == kSeedSize);
3913 3917
3914 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); 3918 // Load native context
3919 Register global_object = ToRegister(instr->global_object());
3920 Register native_context = global_object;
3921 __ ldr(native_context, FieldMemOperand(
3922 global_object, GlobalObject::kNativeContextOffset));
3923
3924 // Load state (FixedArray of the native context's random seeds)
3915 static const int kRandomSeedOffset = 3925 static const int kRandomSeedOffset =
3916 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; 3926 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3917 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset)); 3927 Register state = native_context;
3918 // r2: FixedArray of the native context's random seeds 3928 __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
3919 3929
3920 // Load state[0]. 3930 // Load state[0].
3921 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); 3931 Register state0 = ToRegister(instr->scratch());
3922 __ cmp(r1, Operand::Zero()); 3932 __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3923 __ b(eq, deferred->entry());
3924 // Load state[1]. 3933 // Load state[1].
3925 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); 3934 Register state1 = ToRegister(instr->scratch2());
3926 // r1: state[0]. 3935 __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3927 // r0: state[1].
3928 3936
3929 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) 3937 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3930 __ and_(r3, r1, Operand(0xFFFF)); 3938 Register scratch3 = ToRegister(instr->scratch3());
3931 __ mov(r4, Operand(18273)); 3939 Register scratch4 = scratch0();
3932 __ mul(r3, r3, r4); 3940 __ and_(scratch3, state0, Operand(0xFFFF));
3933 __ add(r1, r3, Operand(r1, LSR, 16)); 3941 __ mov(scratch4, Operand(18273));
3942 __ mul(scratch3, scratch3, scratch4);
3943 __ add(state0, scratch3, Operand(state0, LSR, 16));
3934 // Save state[0]. 3944 // Save state[0].
3935 __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); 3945 __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3936 3946
3937 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16) 3947 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3938 __ and_(r3, r0, Operand(0xFFFF)); 3948 __ and_(scratch3, state1, Operand(0xFFFF));
3939 __ mov(r4, Operand(36969)); 3949 __ mov(scratch4, Operand(36969));
3940 __ mul(r3, r3, r4); 3950 __ mul(scratch3, scratch3, scratch4);
3941 __ add(r0, r3, Operand(r0, LSR, 16)); 3951 __ add(state1, scratch3, Operand(state1, LSR, 16));
3942 // Save state[1]. 3952 // Save state[1].
3943 __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); 3953 __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3944 3954
3945 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF) 3955 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3946 __ and_(r0, r0, Operand(0x3FFFF)); 3956 Register random = scratch4;
3947 __ add(r0, r0, Operand(r1, LSL, 14)); 3957 __ and_(random, state1, Operand(0x3FFFF));
3958 __ add(random, random, Operand(state0, LSL, 14));
3948 3959
3949 __ bind(deferred->exit());
3950 // 0x41300000 is the top half of 1.0 x 2^20 as a double. 3960 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3951 // Create this constant using mov/orr to avoid PC relative load. 3961 // Create this constant using mov/orr to avoid PC relative load.
3952 __ mov(r1, Operand(0x41000000)); 3962 __ mov(scratch3, Operand(0x41000000));
3953 __ orr(r1, r1, Operand(0x300000)); 3963 __ orr(scratch3, scratch3, Operand(0x300000));
3954 // Move 0x41300000xxxxxxxx (x = random bits) to VFP. 3964 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3955 __ vmov(d7, r0, r1); 3965 DwVfpRegister result = ToDoubleRegister(instr->result());
3966 __ vmov(result, random, scratch3);
3956 // Move 0x4130000000000000 to VFP. 3967 // Move 0x4130000000000000 to VFP.
3957 __ mov(r0, Operand::Zero()); 3968 __ mov(scratch4, Operand::Zero());
3958 __ vmov(d8, r0, r1); 3969 DwVfpRegister scratch5 = double_scratch0();
3959 // Subtract and store the result in the heap number. 3970 __ vmov(scratch5, scratch4, scratch3);
3960 __ vsub(d7, d7, d8); 3971 __ vsub(result, result, scratch5);
3961 } 3972 }
3962 3973
3963 3974
3964 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3965 __ PrepareCallCFunction(1, scratch0());
3966 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3967 // Return value is in r0.
3968 }
3969
3970
3971 void LCodeGen::DoMathExp(LMathExp* instr) { 3975 void LCodeGen::DoMathExp(LMathExp* instr) {
3972 DwVfpRegister input = ToDoubleRegister(instr->value()); 3976 DwVfpRegister input = ToDoubleRegister(instr->value());
3973 DwVfpRegister result = ToDoubleRegister(instr->result()); 3977 DwVfpRegister result = ToDoubleRegister(instr->result());
3974 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3978 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3975 DwVfpRegister double_scratch2 = double_scratch0(); 3979 DwVfpRegister double_scratch2 = double_scratch0();
3976 Register temp1 = ToRegister(instr->temp1()); 3980 Register temp1 = ToRegister(instr->temp1());
3977 Register temp2 = ToRegister(instr->temp2()); 3981 Register temp2 = ToRegister(instr->temp2());
3978 3982
3979 MathExpGenerator::EmitMathExp( 3983 MathExpGenerator::EmitMathExp(
3980 masm(), input, result, double_scratch1, double_scratch2, 3984 masm(), input, result, double_scratch1, double_scratch2,
(...skipping 1015 matching lines...) Expand 10 before | Expand all | Expand 10 after
4996 private: 5000 private:
4997 LTaggedToI* instr_; 5001 LTaggedToI* instr_;
4998 }; 5002 };
4999 5003
5000 LOperand* input = instr->value(); 5004 LOperand* input = instr->value();
5001 ASSERT(input->IsRegister()); 5005 ASSERT(input->IsRegister());
5002 ASSERT(input->Equals(instr->result())); 5006 ASSERT(input->Equals(instr->result()));
5003 5007
5004 Register input_reg = ToRegister(input); 5008 Register input_reg = ToRegister(input);
5005 5009
5006 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5010 if (instr->hydrogen()->value()->representation().IsSmi()) {
5011 __ SmiUntag(input_reg);
5012 } else {
5013 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5007 5014
5008 // Optimistically untag the input. 5015 // Optimistically untag the input.
5009 // If the input is a HeapObject, SmiUntag will set the carry flag. 5016 // If the input is a HeapObject, SmiUntag will set the carry flag.
5010 __ SmiUntag(input_reg, SetCC); 5017 __ SmiUntag(input_reg, SetCC);
5011 // Branch to deferred code if the input was tagged. 5018 // Branch to deferred code if the input was tagged.
5012 // The deferred code will take care of restoring the tag. 5019 // The deferred code will take care of restoring the tag.
5013 __ b(cs, deferred->entry()); 5020 __ b(cs, deferred->entry());
5014 __ bind(deferred->exit()); 5021 __ bind(deferred->exit());
5022 }
5015 } 5023 }
5016 5024
5017 5025
5018 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5026 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5019 LOperand* input = instr->value(); 5027 LOperand* input = instr->value();
5020 ASSERT(input->IsRegister()); 5028 ASSERT(input->IsRegister());
5021 LOperand* result = instr->result(); 5029 LOperand* result = instr->result();
5022 ASSERT(result->IsDoubleRegister()); 5030 ASSERT(result->IsDoubleRegister());
5023 5031
5024 Register input_reg = ToRegister(input); 5032 Register input_reg = ToRegister(input);
(...skipping 768 matching lines...) Expand 10 before | Expand all | Expand 10 after
5793 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 5801 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5794 __ ldr(result, FieldMemOperand(scratch, 5802 __ ldr(result, FieldMemOperand(scratch,
5795 FixedArray::kHeaderSize - kPointerSize)); 5803 FixedArray::kHeaderSize - kPointerSize));
5796 __ bind(&done); 5804 __ bind(&done);
5797 } 5805 }
5798 5806
5799 5807
5800 #undef __ 5808 #undef __
5801 5809
5802 } } // namespace v8::internal 5810 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698