Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 148593004: A64: Synchronize with r18084. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
123 #ifdef _MSC_VER 123 #ifdef _MSC_VER
124 void LCodeGen::MakeSureStackPagesMapped(int offset) { 124 void LCodeGen::MakeSureStackPagesMapped(int offset) {
125 const int kPageSize = 4 * KB; 125 const int kPageSize = 4 * KB;
126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 126 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
127 __ mov(Operand(esp, offset), eax); 127 __ mov(Operand(esp, offset), eax);
128 } 128 }
129 } 129 }
130 #endif 130 #endif
131 131
132 132
133 void LCodeGen::SaveCallerDoubles() {
134 ASSERT(info()->saves_caller_doubles());
135 ASSERT(NeedsEagerFrame());
136 Comment(";;; Save clobbered callee double registers");
137 CpuFeatureScope scope(masm(), SSE2);
138 int count = 0;
139 BitVector* doubles = chunk()->allocated_double_registers();
140 BitVector::Iterator save_iterator(doubles);
141 while (!save_iterator.Done()) {
142 __ movsd(MemOperand(esp, count * kDoubleSize),
143 XMMRegister::FromAllocationIndex(save_iterator.Current()));
144 save_iterator.Advance();
145 count++;
146 }
147 }
148
149
150 void LCodeGen::RestoreCallerDoubles() {
151 ASSERT(info()->saves_caller_doubles());
152 ASSERT(NeedsEagerFrame());
153 Comment(";;; Restore clobbered callee double registers");
154 CpuFeatureScope scope(masm(), SSE2);
155 BitVector* doubles = chunk()->allocated_double_registers();
156 BitVector::Iterator save_iterator(doubles);
157 int count = 0;
158 while (!save_iterator.Done()) {
159 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
160 MemOperand(esp, count * kDoubleSize));
161 save_iterator.Advance();
162 count++;
163 }
164 }
165
166
133 bool LCodeGen::GeneratePrologue() { 167 bool LCodeGen::GeneratePrologue() {
134 ASSERT(is_generating()); 168 ASSERT(is_generating());
135 169
136 if (info()->IsOptimizing()) { 170 if (info()->IsOptimizing()) {
137 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 171 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138 172
139 #ifdef DEBUG 173 #ifdef DEBUG
140 if (strlen(FLAG_stop_at) > 0 && 174 if (strlen(FLAG_stop_at) > 0 &&
141 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 175 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
142 __ int3(); 176 __ int3();
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; 271 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
238 if (dynamic_frame_alignment_) { 272 if (dynamic_frame_alignment_) {
239 __ mov(Operand(ebp, offset), edx); 273 __ mov(Operand(ebp, offset), edx);
240 } else { 274 } else {
241 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); 275 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
242 } 276 }
243 } 277 }
244 } 278 }
245 279
246 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 280 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
247 Comment(";;; Save clobbered callee double registers"); 281 SaveCallerDoubles();
248 CpuFeatureScope scope(masm(), SSE2);
249 int count = 0;
250 BitVector* doubles = chunk()->allocated_double_registers();
251 BitVector::Iterator save_iterator(doubles);
252 while (!save_iterator.Done()) {
253 __ movsd(MemOperand(esp, count * kDoubleSize),
254 XMMRegister::FromAllocationIndex(save_iterator.Current()));
255 save_iterator.Advance();
256 count++;
257 }
258 } 282 }
259 } 283 }
260 284
261 // Possibly allocate a local context. 285 // Possibly allocate a local context.
262 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 286 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
263 if (heap_slots > 0) { 287 if (heap_slots > 0) {
264 Comment(";;; Allocate local context"); 288 Comment(";;; Allocate local context");
265 // Argument to NewContext is the function, which is still in edi. 289 // Argument to NewContext is the function, which is still in edi.
266 __ push(edi); 290 __ push(edi);
267 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 291 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
392 __ bind(&jump_table_[i].label); 416 __ bind(&jump_table_[i].label);
393 Address entry = jump_table_[i].address; 417 Address entry = jump_table_[i].address;
394 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; 418 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
395 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 419 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
396 if (id == Deoptimizer::kNotDeoptimizationEntry) { 420 if (id == Deoptimizer::kNotDeoptimizationEntry) {
397 Comment(";;; jump table entry %d.", i); 421 Comment(";;; jump table entry %d.", i);
398 } else { 422 } else {
399 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 423 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
400 } 424 }
401 if (jump_table_[i].needs_frame) { 425 if (jump_table_[i].needs_frame) {
426 ASSERT(!info()->saves_caller_doubles());
402 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); 427 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
403 if (needs_frame.is_bound()) { 428 if (needs_frame.is_bound()) {
404 __ jmp(&needs_frame); 429 __ jmp(&needs_frame);
405 } else { 430 } else {
406 __ bind(&needs_frame); 431 __ bind(&needs_frame);
407 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); 432 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
408 // This variant of deopt can only be used with stubs. Since we don't 433 // This variant of deopt can only be used with stubs. Since we don't
409 // have a function pointer to install in the stack frame that we're 434 // have a function pointer to install in the stack frame that we're
410 // building, install a special marker there instead. 435 // building, install a special marker there instead.
411 ASSERT(info()->IsStub()); 436 ASSERT(info()->IsStub());
412 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 437 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
413 // Push a PC inside the function so that the deopt code can find where 438 // Push a PC inside the function so that the deopt code can find where
414 // the deopt comes from. It doesn't have to be the precise return 439 // the deopt comes from. It doesn't have to be the precise return
415 // address of a "calling" LAZY deopt, it only has to be somewhere 440 // address of a "calling" LAZY deopt, it only has to be somewhere
416 // inside the code body. 441 // inside the code body.
417 Label push_approx_pc; 442 Label push_approx_pc;
418 __ call(&push_approx_pc); 443 __ call(&push_approx_pc);
419 __ bind(&push_approx_pc); 444 __ bind(&push_approx_pc);
420 // Push the continuation which was stashed were the ebp should 445 // Push the continuation which was stashed were the ebp should
421 // be. Replace it with the saved ebp. 446 // be. Replace it with the saved ebp.
422 __ push(MemOperand(esp, 3 * kPointerSize)); 447 __ push(MemOperand(esp, 3 * kPointerSize));
423 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); 448 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
424 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); 449 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
425 __ ret(0); // Call the continuation without clobbering registers. 450 __ ret(0); // Call the continuation without clobbering registers.
426 } 451 }
427 } else { 452 } else {
453 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
454 RestoreCallerDoubles();
455 }
428 __ call(entry, RelocInfo::RUNTIME_ENTRY); 456 __ call(entry, RelocInfo::RUNTIME_ENTRY);
429 } 457 }
430 } 458 }
431 return !is_aborted(); 459 return !is_aborted();
432 } 460 }
433 461
434 462
435 bool LCodeGen::GenerateDeferredCode() { 463 bool LCodeGen::GenerateDeferredCode() {
436 ASSERT(is_generating()); 464 ASSERT(is_generating());
437 if (deferred_.length() > 0) { 465 if (deferred_.length() > 0) {
(...skipping 2684 matching lines...) Expand 10 before | Expand all | Expand 10 after
3122 if (FLAG_trace && info()->IsOptimizing()) { 3150 if (FLAG_trace && info()->IsOptimizing()) {
3123 // Preserve the return value on the stack and rely on the runtime call 3151 // Preserve the return value on the stack and rely on the runtime call
3124 // to return the value in the same register. We're leaving the code 3152 // to return the value in the same register. We're leaving the code
3125 // managed by the register allocator and tearing down the frame, it's 3153 // managed by the register allocator and tearing down the frame, it's
3126 // safe to write to the context register. 3154 // safe to write to the context register.
3127 __ push(eax); 3155 __ push(eax);
3128 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 3156 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3129 __ CallRuntime(Runtime::kTraceExit, 1); 3157 __ CallRuntime(Runtime::kTraceExit, 1);
3130 } 3158 }
3131 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { 3159 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3132 ASSERT(NeedsEagerFrame()); 3160 RestoreCallerDoubles();
3133 CpuFeatureScope scope(masm(), SSE2);
3134 BitVector* doubles = chunk()->allocated_double_registers();
3135 BitVector::Iterator save_iterator(doubles);
3136 int count = 0;
3137 while (!save_iterator.Done()) {
3138 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
3139 MemOperand(esp, count * kDoubleSize));
3140 save_iterator.Advance();
3141 count++;
3142 }
3143 } 3161 }
3144 if (dynamic_frame_alignment_) { 3162 if (dynamic_frame_alignment_) {
3145 // Fetch the state of the dynamic frame alignment. 3163 // Fetch the state of the dynamic frame alignment.
3146 __ mov(edx, Operand(ebp, 3164 __ mov(edx, Operand(ebp,
3147 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 3165 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3148 } 3166 }
3149 int no_frame_start = -1; 3167 int no_frame_start = -1;
3150 if (NeedsEagerFrame()) { 3168 if (NeedsEagerFrame()) {
3151 __ mov(esp, ebp); 3169 __ mov(esp, ebp);
3152 __ pop(ebp); 3170 __ pop(ebp);
(...skipping 1007 matching lines...) Expand 10 before | Expand all | Expand 10 after
4160 MathPowStub stub(MathPowStub::INTEGER); 4178 MathPowStub stub(MathPowStub::INTEGER);
4161 __ CallStub(&stub); 4179 __ CallStub(&stub);
4162 } else { 4180 } else {
4163 ASSERT(exponent_type.IsDouble()); 4181 ASSERT(exponent_type.IsDouble());
4164 MathPowStub stub(MathPowStub::DOUBLE); 4182 MathPowStub stub(MathPowStub::DOUBLE);
4165 __ CallStub(&stub); 4183 __ CallStub(&stub);
4166 } 4184 }
4167 } 4185 }
4168 4186
4169 4187
4170 void LCodeGen::DoRandom(LRandom* instr) {
4171 CpuFeatureScope scope(masm(), SSE2);
4172
4173 // Assert that the register size is indeed the size of each seed.
4174 static const int kSeedSize = sizeof(uint32_t);
4175 STATIC_ASSERT(kPointerSize == kSeedSize);
4176
4177 // Load native context
4178 Register global_object = ToRegister(instr->global_object());
4179 Register native_context = global_object;
4180 __ mov(native_context, FieldOperand(
4181 global_object, GlobalObject::kNativeContextOffset));
4182
4183 // Load state (FixedArray of the native context's random seeds)
4184 static const int kRandomSeedOffset =
4185 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
4186 Register state = native_context;
4187 __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
4188
4189 // Load state[0].
4190 Register state0 = ToRegister(instr->scratch());
4191 __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
4192 // Load state[1].
4193 Register state1 = ToRegister(instr->scratch2());
4194 __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
4195
4196 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
4197 Register scratch3 = ToRegister(instr->scratch3());
4198 __ movzx_w(scratch3, state0);
4199 __ imul(scratch3, scratch3, 18273);
4200 __ shr(state0, 16);
4201 __ add(state0, scratch3);
4202 // Save state[0].
4203 __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
4204
4205 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
4206 __ movzx_w(scratch3, state1);
4207 __ imul(scratch3, scratch3, 36969);
4208 __ shr(state1, 16);
4209 __ add(state1, scratch3);
4210 // Save state[1].
4211 __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
4212
4213 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
4214 Register random = state0;
4215 __ shl(random, 14);
4216 __ and_(state1, Immediate(0x3FFFF));
4217 __ add(random, state1);
4218
4219 // Convert 32 random bits in random to 0.(32 random bits) in a double
4220 // by computing:
4221 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4222 XMMRegister result = ToDoubleRegister(instr->result());
4223 XMMRegister scratch4 = double_scratch0();
4224 __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
4225 __ movd(scratch4, scratch3);
4226 __ movd(result, random);
4227 __ cvtss2sd(scratch4, scratch4);
4228 __ xorps(result, scratch4);
4229 __ subsd(result, scratch4);
4230 }
4231
4232
4233 void LCodeGen::DoMathLog(LMathLog* instr) { 4188 void LCodeGen::DoMathLog(LMathLog* instr) {
4234 CpuFeatureScope scope(masm(), SSE2); 4189 CpuFeatureScope scope(masm(), SSE2);
4235 ASSERT(instr->value()->Equals(instr->result())); 4190 ASSERT(instr->value()->Equals(instr->result()));
4236 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4191 XMMRegister input_reg = ToDoubleRegister(instr->value());
4237 XMMRegister xmm_scratch = double_scratch0(); 4192 XMMRegister xmm_scratch = double_scratch0();
4238 Label positive, done, zero; 4193 Label positive, done, zero;
4239 __ xorps(xmm_scratch, xmm_scratch); 4194 __ xorps(xmm_scratch, xmm_scratch);
4240 __ ucomisd(input_reg, xmm_scratch); 4195 __ ucomisd(input_reg, xmm_scratch);
4241 __ j(above, &positive, Label::kNear); 4196 __ j(above, &positive, Label::kNear);
4242 __ j(equal, &zero, Label::kNear); 4197 __ j(equal, &zero, Label::kNear);
(...skipping 2233 matching lines...) Expand 10 before | Expand all | Expand 10 after
6476 FixedArray::kHeaderSize - kPointerSize)); 6431 FixedArray::kHeaderSize - kPointerSize));
6477 __ bind(&done); 6432 __ bind(&done);
6478 } 6433 }
6479 6434
6480 6435
6481 #undef __ 6436 #undef __
6482 6437
6483 } } // namespace v8::internal 6438 } } // namespace v8::internal
6484 6439
6485 #endif // V8_TARGET_ARCH_IA32 6440 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698