Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(58)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 11037023: Use movw/movt instead of constant pool on ARMv7 (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: More fixes and nit fixes Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2453 matching lines...) Expand 10 before | Expand all | Expand 10 after
2464 Register map = temp; 2464 Register map = temp;
2465 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2465 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2466 { 2466 {
2467 // Block constant pool emission to ensure the positions of instructions are 2467 // Block constant pool emission to ensure the positions of instructions are
2468 // as expected by the patcher. See InstanceofStub::Generate(). 2468 // as expected by the patcher. See InstanceofStub::Generate().
2469 Assembler::BlockConstPoolScope block_const_pool(masm()); 2469 Assembler::BlockConstPoolScope block_const_pool(masm());
2470 __ bind(deferred->map_check()); // Label for calculating code patching. 2470 __ bind(deferred->map_check()); // Label for calculating code patching.
2471 // We use Factory::the_hole_value() on purpose instead of loading from the 2471 // We use Factory::the_hole_value() on purpose instead of loading from the
2472 // root array to force relocation to be able to later patch with 2472 // root array to force relocation to be able to later patch with
2473 // the cached map. 2473 // the cached map.
2474 ScopedPredictableCodeSize predictable(masm_);
2474 Handle<JSGlobalPropertyCell> cell = 2475 Handle<JSGlobalPropertyCell> cell =
2475 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); 2476 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2476 __ mov(ip, Operand(Handle<Object>(cell))); 2477 __ mov(ip, Operand(Handle<Object>(cell)));
2477 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); 2478 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2478 __ cmp(map, Operand(ip)); 2479 __ cmp(map, Operand(ip));
2479 __ b(ne, &cache_miss); 2480 __ b(ne, &cache_miss);
2480 // We use Factory::the_hole_value() on purpose instead of loading from the 2481 // We use Factory::the_hole_value() on purpose instead of loading from the
2481 // root array to force relocation to be able to later patch 2482 // root array to force relocation to be able to later patch
2482 // with true or false. 2483 // with true or false.
2483 __ mov(result, Operand(factory()->the_hole_value())); 2484 __ mov(result, Operand(factory()->the_hole_value()));
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
2525 2526
2526 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 2527 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2527 2528
2528 // Get the temp register reserved by the instruction. This needs to be r4 as 2529 // Get the temp register reserved by the instruction. This needs to be r4 as
2529 // its slot of the pushing of safepoint registers is used to communicate the 2530 // its slot of the pushing of safepoint registers is used to communicate the
2530 // offset to the location of the map check. 2531 // offset to the location of the map check.
2531 Register temp = ToRegister(instr->temp()); 2532 Register temp = ToRegister(instr->temp());
2532 ASSERT(temp.is(r4)); 2533 ASSERT(temp.is(r4));
2533 __ LoadHeapObject(InstanceofStub::right(), instr->function()); 2534 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2534 static const int kAdditionalDelta = 5; 2535 static const int kAdditionalDelta = 5;
2536 // Make sure that code size is predicable, since we use specific constants
2537 // offsets in the code to find embedded values..
2538 ScopedPredictableCodeSize predictable(masm_);
2535 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 2539 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2536 Label before_push_delta; 2540 Label before_push_delta;
2537 __ bind(&before_push_delta); 2541 __ bind(&before_push_delta);
2538 __ BlockConstPoolFor(kAdditionalDelta); 2542 __ BlockConstPoolFor(kAdditionalDelta);
2539 __ mov(temp, Operand(delta * kPointerSize)); 2543 __ mov(temp, Operand(delta * kPointerSize));
2540 // The mov above can generate one or two instructions. The delta was computed 2544 // The mov above can generate one or two instructions. The delta was computed
2541 // for two instructions, so we need to pad here in case of one instruction. 2545 // for two instructions, so we need to pad here in case of one instruction.
2542 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) { 2546 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2543 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); 2547 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2544 __ nop(); 2548 __ nop();
(...skipping 2999 matching lines...) Expand 10 before | Expand all | Expand 10 after
5544 LEnvironment* env = instr->environment(); 5548 LEnvironment* env = instr->environment();
5545 // There is no LLazyBailout instruction for stack-checks. We have to 5549 // There is no LLazyBailout instruction for stack-checks. We have to
5546 // prepare for lazy deoptimization explicitly here. 5550 // prepare for lazy deoptimization explicitly here.
5547 if (instr->hydrogen()->is_function_entry()) { 5551 if (instr->hydrogen()->is_function_entry()) {
5548 // Perform stack overflow check. 5552 // Perform stack overflow check.
5549 Label done; 5553 Label done;
5550 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5554 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5551 __ cmp(sp, Operand(ip)); 5555 __ cmp(sp, Operand(ip));
5552 __ b(hs, &done); 5556 __ b(hs, &done);
5553 StackCheckStub stub; 5557 StackCheckStub stub;
5558 ScopedPredictableCodeSize predictable_code_size(masm_);
5554 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5559 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5555 EnsureSpaceForLazyDeopt(); 5560 EnsureSpaceForLazyDeopt();
5556 __ bind(&done); 5561 __ bind(&done);
5557 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5562 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5558 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5563 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5559 } else { 5564 } else {
5560 ASSERT(instr->hydrogen()->is_backwards_branch()); 5565 ASSERT(instr->hydrogen()->is_backwards_branch());
5561 // Perform stack overflow check if this goto needs it before jumping. 5566 // Perform stack overflow check if this goto needs it before jumping.
5562 DeferredStackCheck* deferred_stack_check = 5567 DeferredStackCheck* deferred_stack_check =
5563 new(zone()) DeferredStackCheck(this, instr); 5568 new(zone()) DeferredStackCheck(this, instr);
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
5683 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 5688 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5684 __ ldr(result, FieldMemOperand(scratch, 5689 __ ldr(result, FieldMemOperand(scratch,
5685 FixedArray::kHeaderSize - kPointerSize)); 5690 FixedArray::kHeaderSize - kPointerSize));
5686 __ bind(&done); 5691 __ bind(&done);
5687 } 5692 }
5688 5693
5689 5694
5690 #undef __ 5695 #undef __
5691 5696
5692 } } // namespace v8::internal 5697 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698