| OLD | NEW |
| 1 // Copyright 2008 the V8 project authors. All rights reserved. | 1 // Copyright 2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 165 for (int i = 0; i < RegisterFile::kNumRegisters; i++) { | 165 for (int i = 0; i < RegisterFile::kNumRegisters; i++) { |
| 166 int count = frame_registers_.count(i); | 166 int count = frame_registers_.count(i); |
| 167 if (count < min_count && count == cgen_->allocator()->count(i)) { | 167 if (count < min_count && count == cgen_->allocator()->count(i)) { |
| 168 min_count = count; | 168 min_count = count; |
| 169 best_register_code = i; | 169 best_register_code = i; |
| 170 } | 170 } |
| 171 } | 171 } |
| 172 | 172 |
| 173 if (best_register_code != no_reg.code_) { | 173 if (best_register_code != no_reg.code_) { |
| 174 // Spill all occurrences of the register. There are min_count | 174 // Spill all occurrences of the register. There are min_count |
| 175 // occurrences, stop when we've spilled them all to avoid syncing | 175 // occurrences, stop when we have spilled them all to avoid syncing |
| 176 // elements unnecessarily. | 176 // elements unnecessarily. |
| 177 int i = 0; | 177 int i = 0; |
| 178 while (min_count > 0) { | 178 while (min_count > 0) { |
| 179 ASSERT(i < elements_.length()); | 179 ASSERT(i < elements_.length()); |
| 180 if (elements_[i].is_register() && | 180 if (elements_[i].is_register() && |
| 181 elements_[i].reg().code() == best_register_code) { | 181 elements_[i].reg().code() == best_register_code) { |
| 182 // Found an instance of the best_register being used in the frame. | 182 // Found an instance of the best_register being used in the frame. |
| 183 // Spill it. | 183 // Spill it. |
| 184 SpillElementAt(i); | 184 SpillElementAt(i); |
| 185 min_count--; | 185 min_count--; |
| 186 } else { | |
| 187 if (i > stack_pointer_) { | |
| 188 // Make sure to materialize elements on the virtual frame in | |
| 189 // memory. We rely on this to spill occurrences of the register | |
| 190 // lying above the current virtual stack pointer. | |
| 191 SyncElementAt(i); | |
| 192 } | |
| 193 } | 186 } |
| 187 i++; |
| 194 } | 188 } |
| 195 } | 189 } |
| 196 | 190 |
| 191 ASSERT(cgen_->allocator()->count(best_register_code) == 0); |
| 197 Register result = { best_register_code }; | 192 Register result = { best_register_code }; |
| 198 return result; | 193 return result; |
| 199 } | 194 } |
| 200 | 195 |
| 201 | 196 |
| 202 // Make the type of the element at a given index be MEMORY. We can only | 197 // Make the type of the element at a given index be MEMORY. We can only |
| 203 // allocate space in the actual frame for the virtual element immediately | 198 // allocate space in the actual frame for the virtual element immediately |
| 204 // above the stack pointer. | 199 // above the stack pointer. |
| 205 void VirtualFrame::SpillElementAt(int index) { | 200 void VirtualFrame::SpillElementAt(int index) { |
| 201 if (index > stack_pointer_ + 1) { |
| 202 SyncRange(stack_pointer_ + 1, index); |
| 203 } |
| 206 SyncElementAt(index); | 204 SyncElementAt(index); |
| 207 // The element is now in memory. | 205 // The element is now in memory. |
| 208 if (elements_[index].is_register()) { | 206 if (elements_[index].is_register()) { |
| 209 Unuse(elements_[index].reg()); | 207 Unuse(elements_[index].reg()); |
| 210 } | 208 } |
| 211 elements_[index] = FrameElement(); | 209 elements_[index] = FrameElement(); |
| 212 } | 210 } |
| 213 | 211 |
| 214 | 212 |
| 215 // Clear the dirty bits for all elements. | 213 // Clear the dirty bits for the range of elements in [begin, end). |
| 216 void VirtualFrame::SyncAll() { | 214 void VirtualFrame::SyncRange(int begin, int end) { |
| 217 for (int i = 0; i < elements_.length(); i++) { | 215 ASSERT(begin >= 0); |
| 216 ASSERT(end <= elements_.length()); |
| 217 for (int i = begin; i < end; i++) { |
| 218 SyncElementAt(i); | 218 SyncElementAt(i); |
| 219 } | 219 } |
| 220 } | 220 } |
| 221 | 221 |
| 222 | |
| 223 // Make the type of all elements be MEMORY. | 222 // Make the type of all elements be MEMORY. |
| 224 void VirtualFrame::SpillAll() { | 223 void VirtualFrame::SpillAll() { |
| 225 for (int i = 0; i < elements_.length(); i++) { | 224 for (int i = 0; i < elements_.length(); i++) { |
| 226 SpillElementAt(i); | 225 SpillElementAt(i); |
| 227 } | 226 } |
| 228 } | 227 } |
| 229 | 228 |
| 230 | 229 |
| 231 void VirtualFrame::PrepareForCall(int frame_arg_count) { | 230 void VirtualFrame::PrepareForCall(int frame_arg_count) { |
| 232 ASSERT(height() >= frame_arg_count); | 231 ASSERT(height() >= frame_arg_count); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 246 } else { | 245 } else { |
| 247 SyncElementAt(i); | 246 SyncElementAt(i); |
| 248 } | 247 } |
| 249 } | 248 } |
| 250 | 249 |
| 251 // Forget the frame elements that will be popped by the call. | 250 // Forget the frame elements that will be popped by the call. |
| 252 Forget(frame_arg_count); | 251 Forget(frame_arg_count); |
| 253 } | 252 } |
| 254 | 253 |
| 255 | 254 |
| 256 void VirtualFrame::EnsureMergable() { | 255 bool VirtualFrame::IsMergable() { |
| 257 // We cannot merge to a frame that has constants as elements, because an | 256 // We cannot merge to a frame that has constants as elements, because an |
| 258 // arbitrary frame might not have constants in those locations. | 257 // arbitrary frame may not have the same constants at those locations. We |
| 259 // | 258 // cannot merge to a frame that has registers that are mulitply referenced |
| 260 // We cannot merge to a frame that has registers as elements because we | 259 // in the frame, because an arbitrary frame might not exhibit the same |
| 261 // haven't implemented merging for such frames yet. | 260 // sharing. Thus, a frame is mergable if all elements are in memory or a |
| 262 SpillAll(); | 261 // register and no register is multiply referenced. |
| 262 for (int i = 0; i < RegisterFile::kNumRegisters; i++) { |
| 263 if (frame_registers_.count(i) > 1) { |
| 264 return false; |
| 265 } |
| 266 } |
| 267 |
| 268 for (int i = 0; i < elements_.length(); i++) { |
| 269 if (!elements_[i].is_memory() && !elements_[i].is_register()) { |
| 270 return false; |
| 271 } |
| 272 } |
| 273 |
| 274 return true; |
| 275 } |
| 276 |
| 277 |
| 278 void VirtualFrame::MakeMergable() { |
| 279 Comment cmnt(masm_, "[ Make frame mergable"); |
| 280 // Remove constants from the frame and ensure that no registers are |
| 281 // multiply referenced within the frame. Allocate elements to their |
| 282 // new locations from the top down so that the topmost elements have |
| 283 // a chance to be in registers, then fill them into memory from the |
| 284 // bottom up. (NB: Currently when spilling registers that are |
| 285 // multiply referenced, it is the lowermost occurrence that gets to |
| 286 // stay in the register.) |
| 287 FrameElement* new_elements = new FrameElement[elements_.length()]; |
| 288 FrameElement memory_element; |
| 289 for (int i = elements_.length() - 1; i >= 0; i--) { |
| 290 FrameElement element = elements_[i]; |
| 291 if (element.is_constant() || |
| 292 (element.is_register() && |
| 293 frame_registers_.count(element.reg().code()) > 1)) { |
| 294 // A simple strategy is to locate these elements in memory if they are |
| 295 // synced (avoiding a spill right now) and otherwise to prefer a |
| 296 // register for them. |
| 297 if (element.is_synced()) { |
| 298 new_elements[i] = memory_element; |
| 299 } else { |
| 300 // This code path is currently not triggered. UNIMPLEMENTED is |
| 301 // temporarily used to trap when it becomes active so we can test |
| 302 // it. |
| 303 UNIMPLEMENTED(); |
| 304 Register reg = cgen_->allocator()->AllocateWithoutSpilling(); |
| 305 if (reg.is(no_reg)) { |
| 306 new_elements[i] = memory_element; |
| 307 } else { |
| 308 FrameElement register_element(reg, FrameElement::NOT_SYNCED); |
| 309 new_elements[i] = register_element; |
| 310 } |
| 311 } |
| 312 |
| 313 // We have not moved register references, but record that we will so |
| 314 // that we do not unnecessarily spill the last reference within the |
| 315 // frame. |
| 316 if (element.is_register()) { |
| 317 Unuse(element.reg()); |
| 318 } |
| 319 } else { |
| 320 // The element is in memory or a singly-frame-referenced register. |
| 321 new_elements[i] = element; |
| 322 } |
| 323 } |
| 324 |
| 325 // Perform the moves. |
| 326 for (int i = 0; i < elements_.length(); i++) { |
| 327 FrameElement source = elements_[i]; |
| 328 FrameElement target = new_elements[i]; |
| 329 ASSERT(target.is_register() || target.is_memory()); |
| 330 if (target.is_register()) { |
| 331 if (source.is_constant()) { |
| 332 // The allocator's register reference count was incremented by |
| 333 // register allocation, so we only record the new reference in the |
| 334 // frame. The frame now owns the reference. |
| 335 frame_registers_.Use(target.reg()); |
| 336 __ Set(target.reg(), Immediate(source.handle())); |
| 337 } else if (source.is_register() && !source.reg().is(target.reg())) { |
| 338 // The frame now owns the reference. |
| 339 frame_registers_.Use(target.reg()); |
| 340 __ mov(target.reg(), source.reg()); |
| 341 } |
| 342 elements_[i] = target; |
| 343 } else { |
| 344 // The target is memory. |
| 345 SpillElementAt(i); |
| 346 } |
| 347 } |
| 348 |
| 349 delete[] new_elements; |
| 263 } | 350 } |
| 264 | 351 |
| 265 | 352 |
| 266 void VirtualFrame::MergeTo(VirtualFrame* expected) { | 353 void VirtualFrame::MergeTo(VirtualFrame* expected) { |
| 354 Comment cmnt(masm_, "[ Merge frame"); |
| 267 ASSERT(cgen_ == expected->cgen_); | 355 ASSERT(cgen_ == expected->cgen_); |
| 268 ASSERT(masm_ == expected->masm_); | 356 ASSERT(masm_ == expected->masm_); |
| 269 ASSERT(elements_.length() == expected->elements_.length()); | 357 ASSERT(elements_.length() == expected->elements_.length()); |
| 270 ASSERT(parameter_count_ == expected->parameter_count_); | 358 ASSERT(parameter_count_ == expected->parameter_count_); |
| 271 ASSERT(local_count_ == expected->local_count_); | 359 ASSERT(local_count_ == expected->local_count_); |
| 272 ASSERT(frame_pointer_ == expected->frame_pointer_); | 360 ASSERT(frame_pointer_ == expected->frame_pointer_); |
| 273 | 361 |
| 274 // Mergable frames do not have constants and they do not (currently) have | 362 // Mergable frames have all elements in locations, either memory or |
| 275 // registers. They are always fully spilled, so the only thing needed to | 363 // register. We thus have a series of to-memory and to-register moves. |
| 276 // make this frame match the expected one is to spill everything. | 364 // First perform all to-memory moves, register-to-memory moves because |
| 277 // | 365 // they can free registers and constant-to-memory moves because they do |
| 278 // TODO(): Implement a non-stupid way of merging frames. | 366 // not use registers. |
| 279 SpillAll(); | 367 for (int i = 0; i < elements_.length(); i++) { |
| 368 FrameElement source = elements_[i]; |
| 369 FrameElement target = expected->elements_[i]; |
| 370 if (target.is_memory() && !source.is_memory()) { |
| 371 ASSERT(source.is_register() || source.is_constant()); |
| 372 SpillElementAt(i); |
| 373 } |
| 374 } |
| 375 |
| 376 // Then register-to-register moves, not yet implemented. |
| 377 for (int i = 0; i < elements_.length(); i++) { |
| 378 FrameElement source = elements_[i]; |
| 379 FrameElement target = expected->elements_[i]; |
| 380 ASSERT(!source.is_register() || !target.is_register()); |
| 381 } |
| 382 |
| 383 // Finally, constant-to-register and memory-to-register. We do these from |
| 384 // the top down so we can use pop for memory-to-register moves above the |
| 385 // expected stack pointer. |
| 386 for (int i = elements_.length() - 1; i >= 0; i--) { |
| 387 FrameElement source = elements_[i]; |
| 388 FrameElement target = expected->elements_[i]; |
| 389 if (target.is_register() && !source.is_register()) { |
| 390 ASSERT(source.is_constant() || source.is_memory()); |
| 391 if (source.is_memory()) { |
| 392 ASSERT(i <= stack_pointer_); |
| 393 if (i <= expected->stack_pointer_) { |
| 394 // Elements below both stack pointers can just be moved. |
| 395 __ mov(target.reg(), Operand(ebp, fp_relative(i))); |
| 396 } else { |
| 397 // Elements below the current stack pointer but above the expected |
| 398 // one can be popped, bet first we may have to adjust the stack |
| 399 // pointer downward. |
| 400 if (stack_pointer_ > i + 1) { |
| 401 #ifdef DEBUG |
| 402 // In debug builds check to ensure this is safe. |
| 403 for (int j = stack_pointer_; j > i; j--) { |
| 404 ASSERT(!elements_[j].is_memory()); |
| 405 } |
| 406 #endif |
| 407 stack_pointer_ = i + 1; |
| 408 __ add(Operand(esp), |
| 409 Immediate((stack_pointer_ - i) * kPointerSize)); |
| 410 } |
| 411 stack_pointer_--; |
| 412 __ pop(target.reg()); |
| 413 } |
| 414 Use(target.reg()); |
| 415 } else if (source.is_constant()) { |
| 416 // Not yet implemented. When done, code in common with the |
| 417 // memory-to-register just above case can be factored out. |
| 418 UNIMPLEMENTED(); |
| 419 } |
| 420 elements_[i] = target; |
| 421 } |
| 422 } |
| 280 | 423 |
| 281 ASSERT(stack_pointer_ == expected->stack_pointer_); | 424 ASSERT(stack_pointer_ == expected->stack_pointer_); |
| 282 } | 425 } |
| 283 | 426 |
| 284 | 427 |
| 285 void VirtualFrame::Enter() { | 428 void VirtualFrame::Enter() { |
| 286 // Registers live on entry: esp, ebp, esi, edi. | 429 // Registers live on entry: esp, ebp, esi, edi. |
| 287 Comment cmnt(masm_, "[ Enter JS frame"); | 430 Comment cmnt(masm_, "[ Enter JS frame"); |
| 288 EmitPush(ebp); | 431 EmitPush(ebp); |
| 289 | 432 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 323 EmitPop(ebp); | 466 EmitPop(ebp); |
| 324 } | 467 } |
| 325 | 468 |
| 326 | 469 |
| 327 void VirtualFrame::AllocateStackSlots(int count) { | 470 void VirtualFrame::AllocateStackSlots(int count) { |
| 328 ASSERT(height() == 0); | 471 ASSERT(height() == 0); |
| 329 local_count_ = count; | 472 local_count_ = count; |
| 330 | 473 |
| 331 if (count > 0) { | 474 if (count > 0) { |
| 332 Comment cmnt(masm_, "[ Allocate space for locals"); | 475 Comment cmnt(masm_, "[ Allocate space for locals"); |
| 333 // The locals are constants (the undefined value), but we sync them with | 476 // The locals are initialized to a constant (the undefined value), but |
| 334 // the actual frame to allocate space for spilling them. | 477 // we sync them with the actual frame to allocate space for spilling |
| 335 SyncAll(); | 478 // them later. First sync everything above the stack pointer so we can |
| 479 // use pushes to allocate and initialize the locals. |
| 480 SyncRange(stack_pointer_ + 1, elements_.length()); |
| 336 Handle<Object> undefined = Factory::undefined_value(); | 481 Handle<Object> undefined = Factory::undefined_value(); |
| 337 FrameElement initial_value(undefined, FrameElement::SYNCED); | 482 FrameElement initial_value(undefined, FrameElement::SYNCED); |
| 338 Register tmp = cgen_->allocator()->Allocate(); | 483 Register tmp = cgen_->allocator()->Allocate(); |
| 339 __ Set(tmp, Immediate(undefined)); | 484 __ Set(tmp, Immediate(undefined)); |
| 340 for (int i = 0; i < count; i++) { | 485 for (int i = 0; i < count; i++) { |
| 341 elements_.Add(initial_value); | 486 elements_.Add(initial_value); |
| 342 stack_pointer_++; | 487 stack_pointer_++; |
| 343 __ push(tmp); | 488 __ push(tmp); |
| 344 } | 489 } |
| 345 cgen_->allocator()->Unuse(tmp); | 490 cgen_->allocator()->Unuse(tmp); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 361 elements_[index] = top; | 506 elements_[index] = top; |
| 362 | 507 |
| 363 if (top.is_memory()) { | 508 if (top.is_memory()) { |
| 364 // Emit code to store memory values into the required frame slot. | 509 // Emit code to store memory values into the required frame slot. |
| 365 Register temp = cgen_->allocator()->Allocate(); | 510 Register temp = cgen_->allocator()->Allocate(); |
| 366 ASSERT(!temp.is(no_reg)); | 511 ASSERT(!temp.is(no_reg)); |
| 367 __ mov(temp, Top()); | 512 __ mov(temp, Top()); |
| 368 __ mov(Operand(ebp, fp_relative(index)), temp); | 513 __ mov(Operand(ebp, fp_relative(index)), temp); |
| 369 cgen_->allocator()->Unuse(temp); | 514 cgen_->allocator()->Unuse(temp); |
| 370 } else { | 515 } else { |
| 371 // We haven't actually written the value to memory. | 516 // We have not actually written the value to memory. |
| 372 elements_[index].clear_sync(); | 517 elements_[index].clear_sync(); |
| 373 | 518 |
| 374 if (top.is_register()) { | 519 if (top.is_register()) { |
| 375 // Establish another frame-internal reference to the register. | 520 // Establish another frame-internal reference to the register. |
| 376 Use(top.reg()); | 521 Use(top.reg()); |
| 377 } | 522 } |
| 378 } | 523 } |
| 379 } | 524 } |
| 380 | 525 |
| 381 | 526 |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 510 return false; | 655 return false; |
| 511 } | 656 } |
| 512 } | 657 } |
| 513 return true; | 658 return true; |
| 514 } | 659 } |
| 515 #endif | 660 #endif |
| 516 | 661 |
| 517 #undef __ | 662 #undef __ |
| 518 | 663 |
| 519 } } // namespace v8::internal | 664 } } // namespace v8::internal |
| OLD | NEW |