OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
322 if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) { | 322 if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) { |
323 // Self-optimization is a one-off thing: if it fails, don't try again. | 323 // Self-optimization is a one-off thing: if it fails, don't try again. |
324 reset_value = Smi::kMaxValue; | 324 reset_value = Smi::kMaxValue; |
325 } | 325 } |
326 __ mov(ebx, Immediate(profiling_counter_)); | 326 __ mov(ebx, Immediate(profiling_counter_)); |
327 __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), | 327 __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), |
328 Immediate(Smi::FromInt(reset_value))); | 328 Immediate(Smi::FromInt(reset_value))); |
329 } | 329 } |
330 | 330 |
331 | 331 |
332 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, | 332 void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
333 Label* back_edge_target) { | 333 Label* back_edge_target) { |
334 Comment cmnt(masm_, "[ Stack check"); | 334 Comment cmnt(masm_, "[ Back edge bookkeeping"); |
335 Label ok; | 335 Label ok; |
336 | 336 |
337 if (FLAG_count_based_interrupts) { | 337 int weight = 1; |
338 int weight = 1; | 338 if (FLAG_weighted_back_edges) { |
339 if (FLAG_weighted_back_edges) { | 339 ASSERT(back_edge_target->is_bound()); |
340 ASSERT(back_edge_target->is_bound()); | 340 int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); |
341 int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); | 341 weight = Min(kMaxBackEdgeWeight, |
342 weight = Min(kMaxBackEdgeWeight, | 342 Max(1, distance / kBackEdgeDistanceUnit)); |
343 Max(1, distance / kBackEdgeDistanceUnit)); | |
344 } | |
345 EmitProfilingCounterDecrement(weight); | |
346 __ j(positive, &ok, Label::kNear); | |
347 InterruptStub stub; | |
348 __ CallStub(&stub); | |
349 } else { | |
350 // Count based interrupts happen often enough when they are enabled | |
351 // that the additional stack checks are not necessary (they would | |
352 // only check for interrupts). | |
353 ExternalReference stack_limit = | |
354 ExternalReference::address_of_stack_limit(isolate()); | |
355 __ cmp(esp, Operand::StaticVariable(stack_limit)); | |
356 __ j(above_equal, &ok, Label::kNear); | |
357 StackCheckStub stub; | |
358 __ CallStub(&stub); | |
359 } | 343 } |
| 344 EmitProfilingCounterDecrement(weight); |
| 345 __ j(positive, &ok, Label::kNear); |
| 346 InterruptStub stub; |
| 347 __ CallStub(&stub); |
360 | 348 |
361 // Record a mapping of this PC offset to the OSR id. This is used to find | 349 // Record a mapping of this PC offset to the OSR id. This is used to find |
362 // the AST id from the unoptimized code in order to use it as a key into | 350 // the AST id from the unoptimized code in order to use it as a key into |
363 // the deoptimization input data found in the optimized code. | 351 // the deoptimization input data found in the optimized code. |
364 RecordStackCheck(stmt->OsrEntryId()); | 352 RecordBackEdge(stmt->OsrEntryId()); |
365 | 353 |
366 // Loop stack checks can be patched to perform on-stack replacement. In | 354 // Loop stack checks can be patched to perform on-stack replacement. In |
367 // order to decide whether or not to perform OSR we embed the loop depth | 355 // order to decide whether or not to perform OSR we embed the loop depth |
368 // in a test instruction after the call so we can extract it from the OSR | 356 // in a test instruction after the call so we can extract it from the OSR |
369 // builtin. | 357 // builtin. |
370 ASSERT(loop_depth() > 0); | 358 ASSERT(loop_depth() > 0); |
371 __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); | 359 __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); |
372 | 360 |
373 if (FLAG_count_based_interrupts) { | 361 EmitProfilingCounterReset(); |
374 EmitProfilingCounterReset(); | |
375 } | |
376 | 362 |
377 __ bind(&ok); | 363 __ bind(&ok); |
378 PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); | 364 PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); |
379 // Record a mapping of the OSR id to this PC. This is used if the OSR | 365 // Record a mapping of the OSR id to this PC. This is used if the OSR |
380 // entry becomes the target of a bailout. We don't expect it to be, but | 366 // entry becomes the target of a bailout. We don't expect it to be, but |
381 // we want it to work if it is. | 367 // we want it to work if it is. |
382 PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); | 368 PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS); |
383 } | 369 } |
384 | 370 |
385 | 371 |
(...skipping 807 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1193 } | 1179 } |
1194 | 1180 |
1195 // Generate code for the body of the loop. | 1181 // Generate code for the body of the loop. |
1196 Visit(stmt->body()); | 1182 Visit(stmt->body()); |
1197 | 1183 |
1198 // Generate code for going to the next element by incrementing the | 1184 // Generate code for going to the next element by incrementing the |
1199 // index (smi) stored on top of the stack. | 1185 // index (smi) stored on top of the stack. |
1200 __ bind(loop_statement.continue_label()); | 1186 __ bind(loop_statement.continue_label()); |
1201 __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); | 1187 __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); |
1202 | 1188 |
1203 EmitStackCheck(stmt, &loop); | 1189 EmitBackEdgeBookkeeping(stmt, &loop); |
1204 __ jmp(&loop); | 1190 __ jmp(&loop); |
1205 | 1191 |
1206 // Remove the pointers stored on the stack. | 1192 // Remove the pointers stored on the stack. |
1207 __ bind(loop_statement.break_label()); | 1193 __ bind(loop_statement.break_label()); |
1208 __ add(esp, Immediate(5 * kPointerSize)); | 1194 __ add(esp, Immediate(5 * kPointerSize)); |
1209 | 1195 |
1210 // Exit and decrement the loop depth. | 1196 // Exit and decrement the loop depth. |
1211 PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); | 1197 PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); |
1212 __ bind(&exit); | 1198 __ bind(&exit); |
1213 decrement_loop_depth(); | 1199 decrement_loop_depth(); |
(...skipping 3319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4533 *stack_depth = 0; | 4519 *stack_depth = 0; |
4534 *context_length = 0; | 4520 *context_length = 0; |
4535 return previous_; | 4521 return previous_; |
4536 } | 4522 } |
4537 | 4523 |
4538 #undef __ | 4524 #undef __ |
4539 | 4525 |
4540 } } // namespace v8::internal | 4526 } } // namespace v8::internal |
4541 | 4527 |
4542 #endif // V8_TARGET_ARCH_IA32 | 4528 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |