| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 318 } | 318 } |
| 319 __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT); | 319 __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT); |
| 320 __ movq(kScratchRegister, | 320 __ movq(kScratchRegister, |
| 321 reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)), | 321 reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)), |
| 322 RelocInfo::NONE); | 322 RelocInfo::NONE); |
| 323 __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), | 323 __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), |
| 324 kScratchRegister); | 324 kScratchRegister); |
| 325 } | 325 } |
| 326 | 326 |
| 327 | 327 |
| 328 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, | 328 void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
| 329 Label* back_edge_target) { | 329 Label* back_edge_target) { |
| 330 Comment cmnt(masm_, "[ Stack check"); | 330 Comment cmnt(masm_, "[ Back edge bookkeeping"); |
| 331 Label ok; | 331 Label ok; |
| 332 | 332 |
| 333 if (FLAG_count_based_interrupts) { | 333 int weight = 1; |
| 334 int weight = 1; | 334 if (FLAG_weighted_back_edges) { |
| 335 if (FLAG_weighted_back_edges) { | 335 ASSERT(back_edge_target->is_bound()); |
| 336 ASSERT(back_edge_target->is_bound()); | 336 int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); |
| 337 int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); | 337 weight = Min(kMaxBackEdgeWeight, |
| 338 weight = Min(kMaxBackEdgeWeight, | 338 Max(1, distance / kBackEdgeDistanceUnit)); |
| 339 Max(1, distance / kBackEdgeDistanceUnit)); | |
| 340 } | |
| 341 EmitProfilingCounterDecrement(weight); | |
| 342 __ j(positive, &ok, Label::kNear); | |
| 343 InterruptStub stub; | |
| 344 __ CallStub(&stub); | |
| 345 } else { | |
| 346 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); | |
| 347 __ j(above_equal, &ok, Label::kNear); | |
| 348 StackCheckStub stub; | |
| 349 __ CallStub(&stub); | |
| 350 } | 339 } |
| 340 EmitProfilingCounterDecrement(weight); |
| 341 __ j(positive, &ok, Label::kNear); |
| 342 InterruptStub stub; |
| 343 __ CallStub(&stub); |
| 351 | 344 |
| 352 // Record a mapping of this PC offset to the OSR id. This is used to find | 345 // Record a mapping of this PC offset to the OSR id. This is used to find |
| 353 // the AST id from the unoptimized code in order to use it as a key into | 346 // the AST id from the unoptimized code in order to use it as a key into |
| 354 // the deoptimization input data found in the optimized code. | 347 // the deoptimization input data found in the optimized code. |
| 355 RecordStackCheck(stmt->OsrEntryId()); | 348 RecordBackEdge(stmt->OsrEntryId()); |
| 356 | 349 |
| 357 // Loop stack checks can be patched to perform on-stack replacement. In | 350 // Loop stack checks can be patched to perform on-stack replacement. In |
| 358 // order to decide whether or not to perform OSR we embed the loop depth | 351 // order to decide whether or not to perform OSR we embed the loop depth |
| 359 // in a test instruction after the call so we can extract it from the OSR | 352 // in a test instruction after the call so we can extract it from the OSR |
| 360 // builtin. | 353 // builtin. |
| 361 ASSERT(loop_depth() > 0); | 354 ASSERT(loop_depth() > 0); |
| 362 __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); | 355 __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); |
| 363 | 356 |
| 364 if (FLAG_count_based_interrupts) { | 357 EmitProfilingCounterReset(); |
| 365 EmitProfilingCounterReset(); | |
| 366 } | |
| 367 | 358 |
| 368 __ bind(&ok); | 359 __ bind(&ok); |
| 369 PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); | 360 PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); |
| 370 // Record a mapping of the OSR id to this PC. This is used if the OSR | 361 // Record a mapping of the OSR id to this PC. This is used if the OSR |
| 371 // entry becomes the target of a bailout. We don't expect it to be, but | 362 // entry becomes the target of a bailout. We don't expect it to be, but |
| 372 // we want it to work if it is. | 363 // we want it to work if it is. |
| 373 PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); | 364 PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); |
| 374 } | 365 } |
| 375 | 366 |
| 376 | 367 |
| (...skipping 837 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1214 } | 1205 } |
| 1215 | 1206 |
| 1216 // Generate code for the body of the loop. | 1207 // Generate code for the body of the loop. |
| 1217 Visit(stmt->body()); | 1208 Visit(stmt->body()); |
| 1218 | 1209 |
| 1219 // Generate code for going to the next element by incrementing the | 1210 // Generate code for going to the next element by incrementing the |
| 1220 // index (smi) stored on top of the stack. | 1211 // index (smi) stored on top of the stack. |
| 1221 __ bind(loop_statement.continue_label()); | 1212 __ bind(loop_statement.continue_label()); |
| 1222 __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1)); | 1213 __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1)); |
| 1223 | 1214 |
| 1224 EmitStackCheck(stmt, &loop); | 1215 EmitBackEdgeBookkeeping(stmt, &loop); |
| 1225 __ jmp(&loop); | 1216 __ jmp(&loop); |
| 1226 | 1217 |
| 1227 // Remove the pointers stored on the stack. | 1218 // Remove the pointers stored on the stack. |
| 1228 __ bind(loop_statement.break_label()); | 1219 __ bind(loop_statement.break_label()); |
| 1229 __ addq(rsp, Immediate(5 * kPointerSize)); | 1220 __ addq(rsp, Immediate(5 * kPointerSize)); |
| 1230 | 1221 |
| 1231 // Exit and decrement the loop depth. | 1222 // Exit and decrement the loop depth. |
| 1232 PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); | 1223 PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); |
| 1233 __ bind(&exit); | 1224 __ bind(&exit); |
| 1234 decrement_loop_depth(); | 1225 decrement_loop_depth(); |
| (...skipping 3292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4527 *context_length = 0; | 4518 *context_length = 0; |
| 4528 return previous_; | 4519 return previous_; |
| 4529 } | 4520 } |
| 4530 | 4521 |
| 4531 | 4522 |
| 4532 #undef __ | 4523 #undef __ |
| 4533 | 4524 |
| 4534 } } // namespace v8::internal | 4525 } } // namespace v8::internal |
| 4535 | 4526 |
| 4536 #endif // V8_TARGET_ARCH_X64 | 4527 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |