| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 123 #ifdef _MSC_VER | 123 #ifdef _MSC_VER |
| 124 void LCodeGen::MakeSureStackPagesMapped(int offset) { | 124 void LCodeGen::MakeSureStackPagesMapped(int offset) { |
| 125 const int kPageSize = 4 * KB; | 125 const int kPageSize = 4 * KB; |
| 126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { | 126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { |
| 127 __ mov(Operand(esp, offset), eax); | 127 __ mov(Operand(esp, offset), eax); |
| 128 } | 128 } |
| 129 } | 129 } |
| 130 #endif | 130 #endif |
| 131 | 131 |
| 132 | 132 |
| 133 void LCodeGen::SaveCallerDoubles() { |
| 134 ASSERT(info()->saves_caller_doubles()); |
| 135 ASSERT(NeedsEagerFrame()); |
| 136 Comment(";;; Save clobbered callee double registers"); |
| 137 CpuFeatureScope scope(masm(), SSE2); |
| 138 int count = 0; |
| 139 BitVector* doubles = chunk()->allocated_double_registers(); |
| 140 BitVector::Iterator save_iterator(doubles); |
| 141 while (!save_iterator.Done()) { |
| 142 __ movsd(MemOperand(esp, count * kDoubleSize), |
| 143 XMMRegister::FromAllocationIndex(save_iterator.Current())); |
| 144 save_iterator.Advance(); |
| 145 count++; |
| 146 } |
| 147 } |
| 148 |
| 149 |
| 150 void LCodeGen::RestoreCallerDoubles() { |
| 151 ASSERT(info()->saves_caller_doubles()); |
| 152 ASSERT(NeedsEagerFrame()); |
| 153 Comment(";;; Restore clobbered callee double registers"); |
| 154 CpuFeatureScope scope(masm(), SSE2); |
| 155 BitVector* doubles = chunk()->allocated_double_registers(); |
| 156 BitVector::Iterator save_iterator(doubles); |
| 157 int count = 0; |
| 158 while (!save_iterator.Done()) { |
| 159 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), |
| 160 MemOperand(esp, count * kDoubleSize)); |
| 161 save_iterator.Advance(); |
| 162 count++; |
| 163 } |
| 164 } |
| 165 |
| 166 |
| 133 bool LCodeGen::GeneratePrologue() { | 167 bool LCodeGen::GeneratePrologue() { |
| 134 ASSERT(is_generating()); | 168 ASSERT(is_generating()); |
| 135 | 169 |
| 136 if (info()->IsOptimizing()) { | 170 if (info()->IsOptimizing()) { |
| 137 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 171 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| 138 | 172 |
| 139 #ifdef DEBUG | 173 #ifdef DEBUG |
| 140 if (strlen(FLAG_stop_at) > 0 && | 174 if (strlen(FLAG_stop_at) > 0 && |
| 141 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 175 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
| 142 __ int3(); | 176 __ int3(); |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 237 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; | 271 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; |
| 238 if (dynamic_frame_alignment_) { | 272 if (dynamic_frame_alignment_) { |
| 239 __ mov(Operand(ebp, offset), edx); | 273 __ mov(Operand(ebp, offset), edx); |
| 240 } else { | 274 } else { |
| 241 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); | 275 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); |
| 242 } | 276 } |
| 243 } | 277 } |
| 244 } | 278 } |
| 245 | 279 |
| 246 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { | 280 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
| 247 Comment(";;; Save clobbered callee double registers"); | 281 SaveCallerDoubles(); |
| 248 CpuFeatureScope scope(masm(), SSE2); | |
| 249 int count = 0; | |
| 250 BitVector* doubles = chunk()->allocated_double_registers(); | |
| 251 BitVector::Iterator save_iterator(doubles); | |
| 252 while (!save_iterator.Done()) { | |
| 253 __ movsd(MemOperand(esp, count * kDoubleSize), | |
| 254 XMMRegister::FromAllocationIndex(save_iterator.Current())); | |
| 255 save_iterator.Advance(); | |
| 256 count++; | |
| 257 } | |
| 258 } | 282 } |
| 259 } | 283 } |
| 260 | 284 |
| 261 // Possibly allocate a local context. | 285 // Possibly allocate a local context. |
| 262 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 286 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| 263 if (heap_slots > 0) { | 287 if (heap_slots > 0) { |
| 264 Comment(";;; Allocate local context"); | 288 Comment(";;; Allocate local context"); |
| 265 // Argument to NewContext is the function, which is still in edi. | 289 // Argument to NewContext is the function, which is still in edi. |
| 266 __ push(edi); | 290 __ push(edi); |
| 267 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 291 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 392 __ bind(&jump_table_[i].label); | 416 __ bind(&jump_table_[i].label); |
| 393 Address entry = jump_table_[i].address; | 417 Address entry = jump_table_[i].address; |
| 394 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; | 418 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
| 395 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 419 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 396 if (id == Deoptimizer::kNotDeoptimizationEntry) { | 420 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| 397 Comment(";;; jump table entry %d.", i); | 421 Comment(";;; jump table entry %d.", i); |
| 398 } else { | 422 } else { |
| 399 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 423 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 400 } | 424 } |
| 401 if (jump_table_[i].needs_frame) { | 425 if (jump_table_[i].needs_frame) { |
| 426 ASSERT(!info()->saves_caller_doubles()); |
| 402 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); | 427 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); |
| 403 if (needs_frame.is_bound()) { | 428 if (needs_frame.is_bound()) { |
| 404 __ jmp(&needs_frame); | 429 __ jmp(&needs_frame); |
| 405 } else { | 430 } else { |
| 406 __ bind(&needs_frame); | 431 __ bind(&needs_frame); |
| 407 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); | 432 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); |
| 408 // This variant of deopt can only be used with stubs. Since we don't | 433 // This variant of deopt can only be used with stubs. Since we don't |
| 409 // have a function pointer to install in the stack frame that we're | 434 // have a function pointer to install in the stack frame that we're |
| 410 // building, install a special marker there instead. | 435 // building, install a special marker there instead. |
| 411 ASSERT(info()->IsStub()); | 436 ASSERT(info()->IsStub()); |
| 412 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | 437 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); |
| 413 // Push a PC inside the function so that the deopt code can find where | 438 // Push a PC inside the function so that the deopt code can find where |
| 414 // the deopt comes from. It doesn't have to be the precise return | 439 // the deopt comes from. It doesn't have to be the precise return |
| 415 // address of a "calling" LAZY deopt, it only has to be somewhere | 440 // address of a "calling" LAZY deopt, it only has to be somewhere |
| 416 // inside the code body. | 441 // inside the code body. |
| 417 Label push_approx_pc; | 442 Label push_approx_pc; |
| 418 __ call(&push_approx_pc); | 443 __ call(&push_approx_pc); |
| 419 __ bind(&push_approx_pc); | 444 __ bind(&push_approx_pc); |
| 420 // Push the continuation which was stashed were the ebp should | 445 // Push the continuation which was stashed were the ebp should |
| 421 // be. Replace it with the saved ebp. | 446 // be. Replace it with the saved ebp. |
| 422 __ push(MemOperand(esp, 3 * kPointerSize)); | 447 __ push(MemOperand(esp, 3 * kPointerSize)); |
| 423 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); | 448 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); |
| 424 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); | 449 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); |
| 425 __ ret(0); // Call the continuation without clobbering registers. | 450 __ ret(0); // Call the continuation without clobbering registers. |
| 426 } | 451 } |
| 427 } else { | 452 } else { |
| 453 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
| 454 RestoreCallerDoubles(); |
| 455 } |
| 428 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 456 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 429 } | 457 } |
| 430 } | 458 } |
| 431 return !is_aborted(); | 459 return !is_aborted(); |
| 432 } | 460 } |
| 433 | 461 |
| 434 | 462 |
| 435 bool LCodeGen::GenerateDeferredCode() { | 463 bool LCodeGen::GenerateDeferredCode() { |
| 436 ASSERT(is_generating()); | 464 ASSERT(is_generating()); |
| 437 if (deferred_.length() > 0) { | 465 if (deferred_.length() > 0) { |
| (...skipping 2611 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3049 if (FLAG_trace && info()->IsOptimizing()) { | 3077 if (FLAG_trace && info()->IsOptimizing()) { |
| 3050 // Preserve the return value on the stack and rely on the runtime call | 3078 // Preserve the return value on the stack and rely on the runtime call |
| 3051 // to return the value in the same register. We're leaving the code | 3079 // to return the value in the same register. We're leaving the code |
| 3052 // managed by the register allocator and tearing down the frame, it's | 3080 // managed by the register allocator and tearing down the frame, it's |
| 3053 // safe to write to the context register. | 3081 // safe to write to the context register. |
| 3054 __ push(eax); | 3082 __ push(eax); |
| 3055 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 3083 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 3056 __ CallRuntime(Runtime::kTraceExit, 1); | 3084 __ CallRuntime(Runtime::kTraceExit, 1); |
| 3057 } | 3085 } |
| 3058 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { | 3086 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
| 3059 ASSERT(NeedsEagerFrame()); | 3087 RestoreCallerDoubles(); |
| 3060 CpuFeatureScope scope(masm(), SSE2); | |
| 3061 BitVector* doubles = chunk()->allocated_double_registers(); | |
| 3062 BitVector::Iterator save_iterator(doubles); | |
| 3063 int count = 0; | |
| 3064 while (!save_iterator.Done()) { | |
| 3065 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), | |
| 3066 MemOperand(esp, count * kDoubleSize)); | |
| 3067 save_iterator.Advance(); | |
| 3068 count++; | |
| 3069 } | |
| 3070 } | 3088 } |
| 3071 if (dynamic_frame_alignment_) { | 3089 if (dynamic_frame_alignment_) { |
| 3072 // Fetch the state of the dynamic frame alignment. | 3090 // Fetch the state of the dynamic frame alignment. |
| 3073 __ mov(edx, Operand(ebp, | 3091 __ mov(edx, Operand(ebp, |
| 3074 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 3092 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
| 3075 } | 3093 } |
| 3076 int no_frame_start = -1; | 3094 int no_frame_start = -1; |
| 3077 if (NeedsEagerFrame()) { | 3095 if (NeedsEagerFrame()) { |
| 3078 __ mov(esp, ebp); | 3096 __ mov(esp, ebp); |
| 3079 __ pop(ebp); | 3097 __ pop(ebp); |
| (...skipping 3314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6394 FixedArray::kHeaderSize - kPointerSize)); | 6412 FixedArray::kHeaderSize - kPointerSize)); |
| 6395 __ bind(&done); | 6413 __ bind(&done); |
| 6396 } | 6414 } |
| 6397 | 6415 |
| 6398 | 6416 |
| 6399 #undef __ | 6417 #undef __ |
| 6400 | 6418 |
| 6401 } } // namespace v8::internal | 6419 } } // namespace v8::internal |
| 6402 | 6420 |
| 6403 #endif // V8_TARGET_ARCH_IA32 | 6421 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |