OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1496 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1507 | 1507 |
1508 // Stack and frame now have 4 elements. | 1508 // Stack and frame now have 4 elements. |
1509 __ bind(&slow); | 1509 __ bind(&slow); |
1510 | 1510 |
1511 // Generic computation of x.apply(y, args) with no special optimization. | 1511 // Generic computation of x.apply(y, args) with no special optimization. |
1512 // Flip applicand.apply and applicand on the stack, so | 1512 // Flip applicand.apply and applicand on the stack, so |
1513 // applicand looks like the receiver of the applicand.apply call. | 1513 // applicand looks like the receiver of the applicand.apply call. |
1514 // Then process it as a normal function call. | 1514 // Then process it as a normal function call. |
1515 __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); | 1515 __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); |
1516 __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); | 1516 __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); |
1517 __ strd(r0, r1, MemOperand(sp, 2 * kPointerSize)); | 1517 __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize)); |
1518 | 1518 |
1519 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); | 1519 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); |
1520 frame_->CallStub(&call_function, 3); | 1520 frame_->CallStub(&call_function, 3); |
1521 // The function and its two arguments have been dropped. | 1521 // The function and its two arguments have been dropped. |
1522 frame_->Drop(); // Drop the receiver as well. | 1522 frame_->Drop(); // Drop the receiver as well. |
1523 frame_->EmitPush(r0); | 1523 frame_->EmitPush(r0); |
1524 // Stack now has 1 element: | 1524 // Stack now has 1 element: |
1525 // sp[0]: result | 1525 // sp[0]: result |
1526 __ bind(&done); | 1526 __ bind(&done); |
1527 | 1527 |
(...skipping 772 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2300 // sp[1] : array/enum cache length | 2300 // sp[1] : array/enum cache length |
2301 // sp[2] : array or enum cache | 2301 // sp[2] : array or enum cache |
2302 // sp[3] : 0 or map | 2302 // sp[3] : 0 or map |
2303 // sp[4] : enumerable | 2303 // sp[4] : enumerable |
2304 // Grab the current frame's height for the break and continue | 2304 // Grab the current frame's height for the break and continue |
2305 // targets only after all the state is pushed on the frame. | 2305 // targets only after all the state is pushed on the frame. |
2306 node->break_target()->SetExpectedHeight(); | 2306 node->break_target()->SetExpectedHeight(); |
2307 node->continue_target()->SetExpectedHeight(); | 2307 node->continue_target()->SetExpectedHeight(); |
2308 | 2308 |
2309 // Load the current count to r0, load the length to r1. | 2309 // Load the current count to r0, load the length to r1. |
2310 __ ldrd(r0, r1, frame_->ElementAt(0)); | 2310 __ Ldrd(r0, r1, frame_->ElementAt(0)); |
2311 __ cmp(r0, r1); // compare to the array length | 2311 __ cmp(r0, r1); // compare to the array length |
2312 node->break_target()->Branch(hs); | 2312 node->break_target()->Branch(hs); |
2313 | 2313 |
2314 // Get the i'th entry of the array. | 2314 // Get the i'th entry of the array. |
2315 __ ldr(r2, frame_->ElementAt(2)); | 2315 __ ldr(r2, frame_->ElementAt(2)); |
2316 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 2316 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
2317 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 2317 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
2318 | 2318 |
2319 // Get Map or 0. | 2319 // Get Map or 0. |
2320 __ ldr(r2, frame_->ElementAt(3)); | 2320 __ ldr(r2, frame_->ElementAt(3)); |
(...skipping 4051 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6372 // Load the double from rhs, tagged HeapNumber r0, to d6. | 6372 // Load the double from rhs, tagged HeapNumber r0, to d6. |
6373 __ sub(r7, r0, Operand(kHeapObjectTag)); | 6373 __ sub(r7, r0, Operand(kHeapObjectTag)); |
6374 __ vldr(d6, r7, HeapNumber::kValueOffset); | 6374 __ vldr(d6, r7, HeapNumber::kValueOffset); |
6375 } else { | 6375 } else { |
6376 __ push(lr); | 6376 __ push(lr); |
6377 // Convert lhs to a double in r2, r3. | 6377 // Convert lhs to a double in r2, r3. |
6378 __ mov(r7, Operand(r1)); | 6378 __ mov(r7, Operand(r1)); |
6379 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 6379 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
6380 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 6380 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
6381 // Load rhs to a double in r0, r1. | 6381 // Load rhs to a double in r0, r1. |
6382 __ ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 6382 __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
6383 __ pop(lr); | 6383 __ pop(lr); |
6384 } | 6384 } |
6385 | 6385 |
6386 // We now have both loaded as doubles but we can skip the lhs nan check | 6386 // We now have both loaded as doubles but we can skip the lhs nan check |
6387 // since it's a smi. | 6387 // since it's a smi. |
6388 __ jmp(lhs_not_nan); | 6388 __ jmp(lhs_not_nan); |
6389 | 6389 |
6390 __ bind(&rhs_is_smi); | 6390 __ bind(&rhs_is_smi); |
6391 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 6391 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
6392 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 6392 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
(...skipping 14 matching lines...) Expand all Loading... |
6407 CpuFeatures::Scope scope(VFP3); | 6407 CpuFeatures::Scope scope(VFP3); |
6408 // Load the double from lhs, tagged HeapNumber r1, to d7. | 6408 // Load the double from lhs, tagged HeapNumber r1, to d7. |
6409 __ sub(r7, r1, Operand(kHeapObjectTag)); | 6409 __ sub(r7, r1, Operand(kHeapObjectTag)); |
6410 __ vldr(d7, r7, HeapNumber::kValueOffset); | 6410 __ vldr(d7, r7, HeapNumber::kValueOffset); |
6411 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 6411 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
6412 __ vmov(s13, r7); | 6412 __ vmov(s13, r7); |
6413 __ vcvt_f64_s32(d6, s13); | 6413 __ vcvt_f64_s32(d6, s13); |
6414 } else { | 6414 } else { |
6415 __ push(lr); | 6415 __ push(lr); |
6416 // Load lhs to a double in r2, r3. | 6416 // Load lhs to a double in r2, r3. |
6417 __ ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 6417 __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
6418 // Convert rhs to a double in r0, r1. | 6418 // Convert rhs to a double in r0, r1. |
6419 __ mov(r7, Operand(r0)); | 6419 __ mov(r7, Operand(r0)); |
6420 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 6420 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
6421 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 6421 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
6422 __ pop(lr); | 6422 __ pop(lr); |
6423 } | 6423 } |
6424 // Fall through to both_loaded_as_doubles. | 6424 // Fall through to both_loaded_as_doubles. |
6425 } | 6425 } |
6426 | 6426 |
6427 | 6427 |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6571 | 6571 |
6572 // Both are heap numbers. Load them up then jump to the code we have | 6572 // Both are heap numbers. Load them up then jump to the code we have |
6573 // for that. | 6573 // for that. |
6574 if (CpuFeatures::IsSupported(VFP3)) { | 6574 if (CpuFeatures::IsSupported(VFP3)) { |
6575 CpuFeatures::Scope scope(VFP3); | 6575 CpuFeatures::Scope scope(VFP3); |
6576 __ sub(r7, r0, Operand(kHeapObjectTag)); | 6576 __ sub(r7, r0, Operand(kHeapObjectTag)); |
6577 __ vldr(d6, r7, HeapNumber::kValueOffset); | 6577 __ vldr(d6, r7, HeapNumber::kValueOffset); |
6578 __ sub(r7, r1, Operand(kHeapObjectTag)); | 6578 __ sub(r7, r1, Operand(kHeapObjectTag)); |
6579 __ vldr(d7, r7, HeapNumber::kValueOffset); | 6579 __ vldr(d7, r7, HeapNumber::kValueOffset); |
6580 } else { | 6580 } else { |
6581 __ ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 6581 __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
6582 __ ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 6582 __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
6583 } | 6583 } |
6584 __ jmp(both_loaded_as_doubles); | 6584 __ jmp(both_loaded_as_doubles); |
6585 } | 6585 } |
6586 | 6586 |
6587 | 6587 |
6588 // Fast negative check for symbol-to-symbol equality. | 6588 // Fast negative check for symbol-to-symbol equality. |
6589 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { | 6589 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { |
6590 // r2 is object type of r0. | 6590 // r2 is object type of r0. |
6591 // Ensure that no non-strings have the symbol bit set. | 6591 // Ensure that no non-strings have the symbol bit set. |
6592 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); | 6592 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); |
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6949 if (mode_ == OVERWRITE_RIGHT) { | 6949 if (mode_ == OVERWRITE_RIGHT) { |
6950 __ mov(r5, Operand(r0)); // Overwrite this heap number. | 6950 __ mov(r5, Operand(r0)); // Overwrite this heap number. |
6951 } | 6951 } |
6952 if (use_fp_registers) { | 6952 if (use_fp_registers) { |
6953 CpuFeatures::Scope scope(VFP3); | 6953 CpuFeatures::Scope scope(VFP3); |
6954 // Load the double from tagged HeapNumber r0 to d7. | 6954 // Load the double from tagged HeapNumber r0 to d7. |
6955 __ sub(r7, r0, Operand(kHeapObjectTag)); | 6955 __ sub(r7, r0, Operand(kHeapObjectTag)); |
6956 __ vldr(d7, r7, HeapNumber::kValueOffset); | 6956 __ vldr(d7, r7, HeapNumber::kValueOffset); |
6957 } else { | 6957 } else { |
6958 // Calling convention says that second double is in r2 and r3. | 6958 // Calling convention says that second double is in r2 and r3. |
6959 __ ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 6959 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
6960 } | 6960 } |
6961 __ jmp(&finished_loading_r0); | 6961 __ jmp(&finished_loading_r0); |
6962 __ bind(&r0_is_smi); | 6962 __ bind(&r0_is_smi); |
6963 if (mode_ == OVERWRITE_RIGHT) { | 6963 if (mode_ == OVERWRITE_RIGHT) { |
6964 // We can't overwrite a Smi so get address of new heap number into r5. | 6964 // We can't overwrite a Smi so get address of new heap number into r5. |
6965 __ AllocateHeapNumber(r5, r6, r7, &slow); | 6965 __ AllocateHeapNumber(r5, r6, r7, &slow); |
6966 } | 6966 } |
6967 | 6967 |
6968 if (use_fp_registers) { | 6968 if (use_fp_registers) { |
6969 CpuFeatures::Scope scope(VFP3); | 6969 CpuFeatures::Scope scope(VFP3); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7001 if (mode_ == OVERWRITE_LEFT) { | 7001 if (mode_ == OVERWRITE_LEFT) { |
7002 __ mov(r5, Operand(r1)); // Overwrite this heap number. | 7002 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
7003 } | 7003 } |
7004 if (use_fp_registers) { | 7004 if (use_fp_registers) { |
7005 CpuFeatures::Scope scope(VFP3); | 7005 CpuFeatures::Scope scope(VFP3); |
7006 // Load the double from tagged HeapNumber r1 to d6. | 7006 // Load the double from tagged HeapNumber r1 to d6. |
7007 __ sub(r7, r1, Operand(kHeapObjectTag)); | 7007 __ sub(r7, r1, Operand(kHeapObjectTag)); |
7008 __ vldr(d6, r7, HeapNumber::kValueOffset); | 7008 __ vldr(d6, r7, HeapNumber::kValueOffset); |
7009 } else { | 7009 } else { |
7010 // Calling convention says that first double is in r0 and r1. | 7010 // Calling convention says that first double is in r0 and r1. |
7011 __ ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 7011 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
7012 } | 7012 } |
7013 __ jmp(&finished_loading_r1); | 7013 __ jmp(&finished_loading_r1); |
7014 __ bind(&r1_is_smi); | 7014 __ bind(&r1_is_smi); |
7015 if (mode_ == OVERWRITE_LEFT) { | 7015 if (mode_ == OVERWRITE_LEFT) { |
7016 // We can't overwrite a Smi so get address of new heap number into r5. | 7016 // We can't overwrite a Smi so get address of new heap number into r5. |
7017 __ AllocateHeapNumber(r5, r6, r7, &slow); | 7017 __ AllocateHeapNumber(r5, r6, r7, &slow); |
7018 } | 7018 } |
7019 | 7019 |
7020 if (use_fp_registers) { | 7020 if (use_fp_registers) { |
7021 CpuFeatures::Scope scope(VFP3); | 7021 CpuFeatures::Scope scope(VFP3); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7072 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 7072 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
7073 // Store answer in the overwritable heap number. | 7073 // Store answer in the overwritable heap number. |
7074 #if !defined(USE_ARM_EABI) | 7074 #if !defined(USE_ARM_EABI) |
7075 // Double returned in fp coprocessor register 0 and 1, encoded as register | 7075 // Double returned in fp coprocessor register 0 and 1, encoded as register |
7076 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | 7076 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
7077 // substract the tag from r5. | 7077 // substract the tag from r5. |
7078 __ sub(r4, r5, Operand(kHeapObjectTag)); | 7078 __ sub(r4, r5, Operand(kHeapObjectTag)); |
7079 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); | 7079 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
7080 #else | 7080 #else |
7081 // Double returned in registers 0 and 1. | 7081 // Double returned in registers 0 and 1. |
7082 __ strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); | 7082 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
7083 #endif | 7083 #endif |
7084 __ mov(r0, Operand(r5)); | 7084 __ mov(r0, Operand(r5)); |
7085 // And we are done. | 7085 // And we are done. |
7086 __ pop(pc); | 7086 __ pop(pc); |
7087 } | 7087 } |
7088 } | 7088 } |
7089 | 7089 |
7090 | 7090 |
7091 if (lhs.is(r0)) { | 7091 if (lhs.is(r0)) { |
7092 __ b(&slow); | 7092 __ b(&slow); |
(...skipping 2939 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
10032 __ bind(&string_add_runtime); | 10032 __ bind(&string_add_runtime); |
10033 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 10033 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
10034 } | 10034 } |
10035 | 10035 |
10036 | 10036 |
10037 #undef __ | 10037 #undef __ |
10038 | 10038 |
10039 } } // namespace v8::internal | 10039 } } // namespace v8::internal |
10040 | 10040 |
10041 #endif // V8_TARGET_ARCH_ARM | 10041 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |