OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4328 frame->EmitPush(result.reg()); | 4328 frame->EmitPush(result.reg()); |
4329 break; | 4329 break; |
4330 } | 4330 } |
4331 | 4331 |
4332 default: | 4332 default: |
4333 UNREACHABLE(); | 4333 UNREACHABLE(); |
4334 } | 4334 } |
4335 } | 4335 } |
4336 | 4336 |
4337 | 4337 |
| 4338 static void AllocateHeapNumber( |
| 4339 MacroAssembler* masm, |
| 4340 Label* need_gc, // Jump here if young space is full. |
| 4341 Register result_reg, // The tagged address of the new heap number. |
| 4342 Register allocation_top_addr_reg, // A scratch register. |
| 4343 Register scratch2) { // Another scratch register. |
| 4344 ExternalReference allocation_top = |
| 4345 ExternalReference::new_space_allocation_top_address(); |
| 4346 ExternalReference allocation_limit = |
| 4347 ExternalReference::new_space_allocation_limit_address(); |
| 4348 |
| 4349 // allocat := the address of the allocation top variable. |
| 4350 __ mov(allocation_top_addr_reg, Operand(allocation_top)); |
| 4351 // result_reg := the old allocation top. |
| 4352 __ ldr(result_reg, MemOperand(allocation_top_addr_reg)); |
| 4353 // scratch2 := the address of the allocation limit. |
| 4354 __ mov(scratch2, Operand(allocation_limit)); |
| 4355 // scratch2 := the allocation limit. |
| 4356 __ ldr(scratch2, MemOperand(scratch2)); |
| 4357 // result_reg := the new allocation top. |
| 4358 __ add(result_reg, result_reg, Operand(HeapNumber::kSize)); |
| 4359 // Compare new new allocation top and limit. |
| 4360 __ cmp(result_reg, Operand(scratch2)); |
| 4361 // Branch if out of space in young generation. |
| 4362 __ b(hi, need_gc); |
| 4363 // Store new allocation top. |
| 4364 __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top |
| 4365 // Tag and adjust back to start of new object. |
| 4366 __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag)); |
| 4367 // Get heap number map into scratch2. |
| 4368 __ mov(scratch2, Operand(Factory::heap_number_map())); |
| 4369 // Store heap number map in new object. |
| 4370 __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset)); |
| 4371 } |
| 4372 |
| 4373 |
| 4374 // We fall into this code if the operands were Smis, but the result was |
| 4375 // not (eg. overflow). We branch into this code (to the not_smi label) if |
| 4376 // the operands were not both Smi. |
4338 static void HandleBinaryOpSlowCases(MacroAssembler* masm, | 4377 static void HandleBinaryOpSlowCases(MacroAssembler* masm, |
4339 Label* not_smi, | 4378 Label* not_smi, |
4340 const Builtins::JavaScript& builtin, | 4379 const Builtins::JavaScript& builtin, |
4341 Token::Value operation, | 4380 Token::Value operation, |
4342 int swi_number, | 4381 int swi_number, |
4343 OverwriteMode mode) { | 4382 OverwriteMode mode) { |
4344 Label slow; | 4383 Label slow; |
4345 if (mode == NO_OVERWRITE) { | |
4346 __ bind(not_smi); | |
4347 } | |
4348 __ bind(&slow); | 4384 __ bind(&slow); |
4349 __ push(r1); | 4385 __ push(r1); |
4350 __ push(r0); | 4386 __ push(r0); |
4351 __ mov(r0, Operand(1)); // Set number of arguments. | 4387 __ mov(r0, Operand(1)); // Set number of arguments. |
4352 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. | 4388 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. |
4353 | 4389 |
4354 // Could it be a double-double op? If we already have a place to put | 4390 __ bind(not_smi); |
4355 // the answer then we can do the op and skip the builtin and runtime call. | 4391 __ tst(r0, Operand(kSmiTagMask)); |
4356 if (mode != NO_OVERWRITE) { | 4392 __ b(eq, &slow); // We can't handle a Smi-double combination yet. |
4357 __ bind(not_smi); | 4393 __ tst(r1, Operand(kSmiTagMask)); |
4358 __ tst(r0, Operand(kSmiTagMask)); | 4394 __ b(eq, &slow); // We can't handle a Smi-double combination yet. |
4359 __ b(eq, &slow); // We can't handle a Smi-double combination yet. | 4395 // Get map of r0 into r2. |
4360 __ tst(r1, Operand(kSmiTagMask)); | 4396 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
4361 __ b(eq, &slow); // We can't handle a Smi-double combination yet. | 4397 // Get type of r0 into r3. |
4362 // Get map of r0 into r2. | 4398 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset)); |
4363 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 4399 __ cmp(r3, Operand(HEAP_NUMBER_TYPE)); |
4364 // Get type of r0 into r3. | 4400 __ b(ne, &slow); |
4365 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | 4401 // Get type of r1 into r3. |
4366 __ cmp(r3, Operand(HEAP_NUMBER_TYPE)); | 4402 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
4367 __ b(ne, &slow); | 4403 // Check they are both the same map (heap number map). |
4368 // Get type of r1 into r3. | 4404 __ cmp(r2, r3); |
4369 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); | 4405 __ b(ne, &slow); |
4370 // Check they are both the same map (heap number map). | 4406 // Both are doubles. |
4371 __ cmp(r2, r3); | 4407 // Calling convention says that second double is in r2 and r3. |
4372 __ b(ne, &slow); | 4408 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
4373 // Both are doubles. | 4409 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); |
4374 // Calling convention says that second double is in r2 and r3. | 4410 |
4375 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 4411 if (mode == NO_OVERWRITE) { |
4376 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | 4412 // Get address of new heap number into r5. |
| 4413 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
4377 __ push(lr); | 4414 __ push(lr); |
4378 if (mode == OVERWRITE_LEFT) { | 4415 __ push(r5); |
4379 __ push(r1); | 4416 } else if (mode == OVERWRITE_LEFT) { |
4380 } else { | 4417 __ push(lr); |
4381 __ push(r0); | 4418 __ push(r1); |
4382 } | 4419 } else { |
4383 // Calling convention says that first double is in r0 and r1. | 4420 ASSERT(mode == OVERWRITE_RIGHT); |
4384 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4421 __ push(lr); |
4385 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4422 __ push(r0); |
4386 // Call C routine that may not cause GC or other trouble. | 4423 } |
4387 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 4424 // Calling convention says that first double is in r0 and r1. |
| 4425 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 4426 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); |
| 4427 // Call C routine that may not cause GC or other trouble. |
| 4428 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); |
4388 #if !defined(__arm__) | 4429 #if !defined(__arm__) |
4389 // Notify the simulator that we are calling an add routine in C. | 4430 // Notify the simulator that we are calling an add routine in C. |
4390 __ swi(swi_number); | 4431 __ swi(swi_number); |
4391 #else | 4432 #else |
4392 // Actually call the add routine written in C. | 4433 // Actually call the add routine written in C. |
4393 __ Call(r5); | 4434 __ Call(r5); |
4394 #endif | 4435 #endif |
4395 // Store answer in the overwritable heap number. | 4436 // Store answer in the overwritable heap number. |
4396 __ pop(r4); | 4437 __ pop(r4); |
4397 #if !defined(__ARM_EABI__) && defined(__arm__) | 4438 #if !defined(__ARM_EABI__) && defined(__arm__) |
4398 // Double returned in fp coprocessor register 0 and 1, encoded as register | 4439 // Double returned in fp coprocessor register 0 and 1, encoded as register |
4399 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | 4440 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
4400 // substract the tag from r4. | 4441 // substract the tag from r4. |
4401 __ sub(r5, r4, Operand(kHeapObjectTag)); | 4442 __ sub(r5, r4, Operand(kHeapObjectTag)); |
4402 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); | 4443 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); |
4403 #else | 4444 #else |
4404 // Double returned in fp coprocessor register 0 and 1. | 4445 // Double returned in fp coprocessor register 0 and 1. |
4405 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 4446 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); |
4406 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize)); | 4447 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize)); |
4407 #endif | 4448 #endif |
4408 __ mov(r0, Operand(r4)); | 4449 __ mov(r0, Operand(r4)); |
4409 // And we are done. | 4450 // And we are done. |
4410 __ pop(pc); | 4451 __ pop(pc); |
4411 } | |
4412 } | 4452 } |
4413 | 4453 |
4414 | 4454 |
4415 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 4455 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
4416 // r1 : x | 4456 // r1 : x |
4417 // r0 : y | 4457 // r0 : y |
4418 // result : r0 | 4458 // result : r0 |
4419 | 4459 |
4420 // All ops need to know whether we are dealing with two Smis. Set up r2 to | 4460 // All ops need to know whether we are dealing with two Smis. Set up r2 to |
4421 // tell us that. | 4461 // tell us that. |
(...skipping 722 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5144 __ mov(r2, Operand(0)); | 5184 __ mov(r2, Operand(0)); |
5145 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5185 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
5146 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5186 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), |
5147 RelocInfo::CODE_TARGET); | 5187 RelocInfo::CODE_TARGET); |
5148 } | 5188 } |
5149 | 5189 |
5150 | 5190 |
5151 #undef __ | 5191 #undef __ |
5152 | 5192 |
5153 } } // namespace v8::internal | 5193 } } // namespace v8::internal |
OLD | NEW |