Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/mips/full-codegen-mips.cc

Issue 153983002: MIPS: Fixes for patch sites if long branches are emitted. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/deoptimizer-mips.cc ('k') | src/mips/lithium-codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
77 } 77 }
78 78
79 // When initially emitting this ensure that a jump is always generated to skip 79 // When initially emitting this ensure that a jump is always generated to skip
80 // the inlined smi code. 80 // the inlined smi code.
81 void EmitJumpIfNotSmi(Register reg, Label* target) { 81 void EmitJumpIfNotSmi(Register reg, Label* target) {
82 ASSERT(!patch_site_.is_bound() && !info_emitted_); 82 ASSERT(!patch_site_.is_bound() && !info_emitted_);
83 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 83 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
84 __ bind(&patch_site_); 84 __ bind(&patch_site_);
85 __ andi(at, reg, 0); 85 __ andi(at, reg, 0);
86 // Always taken before patched. 86 // Always taken before patched.
87 __ Branch(target, eq, at, Operand(zero_reg)); 87 __ BranchShort(target, eq, at, Operand(zero_reg));
88 } 88 }
89 89
90 // When initially emitting this ensure that a jump is never generated to skip 90 // When initially emitting this ensure that a jump is never generated to skip
91 // the inlined smi code. 91 // the inlined smi code.
92 void EmitJumpIfSmi(Register reg, Label* target) { 92 void EmitJumpIfSmi(Register reg, Label* target) {
93 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 93 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
94 ASSERT(!patch_site_.is_bound() && !info_emitted_); 94 ASSERT(!patch_site_.is_bound() && !info_emitted_);
95 __ bind(&patch_site_); 95 __ bind(&patch_site_);
96 __ andi(at, reg, 0); 96 __ andi(at, reg, 0);
97 // Never taken before patched. 97 // Never taken before patched.
98 __ Branch(target, ne, at, Operand(zero_reg)); 98 __ BranchShort(target, ne, at, Operand(zero_reg));
99 } 99 }
100 100
101 void EmitPatchInfo() { 101 void EmitPatchInfo() {
102 if (patch_site_.is_bound()) { 102 if (patch_site_.is_bound()) {
103 int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); 103 int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
104 Register reg = Register::from_code(delta_to_patch_site / kImm16Mask); 104 Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
105 __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask); 105 __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
106 #ifdef DEBUG 106 #ifdef DEBUG
107 info_emitted_ = true; 107 info_emitted_ = true;
108 #endif 108 #endif
(...skipping 2231 matching lines...) Expand 10 before | Expand all | Expand 10 after
2340 CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL, 2340 CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
2341 expr->BinaryOperationFeedbackId()); 2341 expr->BinaryOperationFeedbackId());
2342 patch_site.EmitPatchInfo(); 2342 patch_site.EmitPatchInfo();
2343 __ jmp(&done); 2343 __ jmp(&done);
2344 2344
2345 __ bind(&smi_case); 2345 __ bind(&smi_case);
2346 // Smi case. This code works the same way as the smi-smi case in the type 2346 // Smi case. This code works the same way as the smi-smi case in the type
2347 // recording binary operation stub, see 2347 // recording binary operation stub, see
2348 switch (op) { 2348 switch (op) {
2349 case Token::SAR: 2349 case Token::SAR:
2350 __ Branch(&stub_call);
2351 __ GetLeastBitsFromSmi(scratch1, right, 5); 2350 __ GetLeastBitsFromSmi(scratch1, right, 5);
2352 __ srav(right, left, scratch1); 2351 __ srav(right, left, scratch1);
2353 __ And(v0, right, Operand(~kSmiTagMask)); 2352 __ And(v0, right, Operand(~kSmiTagMask));
2354 break; 2353 break;
2355 case Token::SHL: { 2354 case Token::SHL: {
2356 __ Branch(&stub_call);
2357 __ SmiUntag(scratch1, left); 2355 __ SmiUntag(scratch1, left);
2358 __ GetLeastBitsFromSmi(scratch2, right, 5); 2356 __ GetLeastBitsFromSmi(scratch2, right, 5);
2359 __ sllv(scratch1, scratch1, scratch2); 2357 __ sllv(scratch1, scratch1, scratch2);
2360 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2358 __ Addu(scratch2, scratch1, Operand(0x40000000));
2361 __ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); 2359 __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
2362 __ SmiTag(v0, scratch1); 2360 __ SmiTag(v0, scratch1);
2363 break; 2361 break;
2364 } 2362 }
2365 case Token::SHR: { 2363 case Token::SHR: {
2366 __ Branch(&stub_call);
2367 __ SmiUntag(scratch1, left); 2364 __ SmiUntag(scratch1, left);
2368 __ GetLeastBitsFromSmi(scratch2, right, 5); 2365 __ GetLeastBitsFromSmi(scratch2, right, 5);
2369 __ srlv(scratch1, scratch1, scratch2); 2366 __ srlv(scratch1, scratch1, scratch2);
2370 __ And(scratch2, scratch1, 0xc0000000); 2367 __ And(scratch2, scratch1, 0xc0000000);
2371 __ Branch(&stub_call, ne, scratch2, Operand(zero_reg)); 2368 __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
2372 __ SmiTag(v0, scratch1); 2369 __ SmiTag(v0, scratch1);
2373 break; 2370 break;
2374 } 2371 }
2375 case Token::ADD: 2372 case Token::ADD:
2376 __ AdduAndCheckForOverflow(v0, left, right, scratch1); 2373 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after
3048 Label materialize_true, materialize_false; 3045 Label materialize_true, materialize_false;
3049 Label* if_true = NULL; 3046 Label* if_true = NULL;
3050 Label* if_false = NULL; 3047 Label* if_false = NULL;
3051 Label* fall_through = NULL; 3048 Label* fall_through = NULL;
3052 context()->PrepareTest(&materialize_true, &materialize_false, 3049 context()->PrepareTest(&materialize_true, &materialize_false,
3053 &if_true, &if_false, &fall_through); 3050 &if_true, &if_false, &fall_through);
3054 3051
3055 __ JumpIfSmi(v0, if_false); 3052 __ JumpIfSmi(v0, if_false);
3056 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 3053 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3057 __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset)); 3054 __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
3055 PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
3058 __ And(at, a1, Operand(1 << Map::kIsUndetectable)); 3056 __ And(at, a1, Operand(1 << Map::kIsUndetectable));
3059 PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
3060 Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through); 3057 Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
3061 3058
3062 context()->Plug(if_true, if_false); 3059 context()->Plug(if_true, if_false);
3063 } 3060 }
3064 3061
3065 3062
3066 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( 3063 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
3067 CallRuntime* expr) { 3064 CallRuntime* expr) {
3068 ZoneList<Expression*>* args = expr->arguments(); 3065 ZoneList<Expression*>* args = expr->arguments();
3069 ASSERT(args->length() == 1); 3066 ASSERT(args->length() == 1);
(...skipping 1903 matching lines...) Expand 10 before | Expand all | Expand 10 after
4973 Assembler::target_address_at(pc_immediate_load_address)) == 4970 Assembler::target_address_at(pc_immediate_load_address)) ==
4974 reinterpret_cast<uint32_t>( 4971 reinterpret_cast<uint32_t>(
4975 isolate->builtins()->OsrAfterStackCheck()->entry())); 4972 isolate->builtins()->OsrAfterStackCheck()->entry()));
4976 return OSR_AFTER_STACK_CHECK; 4973 return OSR_AFTER_STACK_CHECK;
4977 } 4974 }
4978 4975
4979 4976
4980 } } // namespace v8::internal 4977 } } // namespace v8::internal
4981 4978
4982 #endif // V8_TARGET_ARCH_MIPS 4979 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/deoptimizer-mips.cc ('k') | src/mips/lithium-codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698