OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 6240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6251 | 6251 |
6252 #ifdef DEBUG | 6252 #ifdef DEBUG |
6253 bool CodeGenerator::HasValidEntryRegisters() { return true; } | 6253 bool CodeGenerator::HasValidEntryRegisters() { return true; } |
6254 #endif | 6254 #endif |
6255 | 6255 |
6256 | 6256 |
6257 #undef __ | 6257 #undef __ |
6258 #define __ ACCESS_MASM(masm) | 6258 #define __ ACCESS_MASM(masm) |
6259 | 6259 |
6260 | 6260 |
6261 // This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 | |
6262 // trick. See http://en.wikipedia.org/wiki/Divisibility_rule | |
6263 // Takes the sum of the digits base (mask + 1) repeatedly until we have a | |
6264 // number from 0 to mask. On exit the 'eq' condition flags are set if the | |
6265 // answer is exactly the mask. | |
6266 void DigitSum(MacroAssembler* masm, | |
Søren Thygesen Gjesse
2010/06/28 07:19:36
static (times 5)? And why are these functions not
| |
6267 Register lhs, | |
6268 int mask, | |
6269 int shift) { | |
6270 ASSERT(mask > 0); | |
6271 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. | |
6272 Label loop, entry; | |
6273 __ jmp(&entry); | |
6274 __ bind(&loop); | |
6275 __ and_(ip, lhs, Operand(mask)); | |
6276 __ add(lhs, ip, Operand(lhs, LSR, shift)); | |
6277 __ bind(&entry); | |
6278 __ cmp(lhs, Operand(mask)); | |
6279 __ b(gt, &loop); | |
6280 } | |
6281 | |
6282 | |
6283 void DigitSum(MacroAssembler* masm, | |
6284 Register lhs, | |
6285 Register scratch, | |
6286 int mask, | |
6287 int shift1, | |
6288 int shift2) { | |
6289 ASSERT(mask > 0); | |
6290 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. | |
6291 Label loop, entry; | |
6292 __ jmp(&entry); | |
6293 __ bind(&loop); | |
6294 __ bic(scratch, lhs, Operand(mask)); | |
6295 __ and_(ip, lhs, Operand(mask)); | |
6296 __ add(lhs, ip, Operand(lhs, LSR, shift1)); | |
6297 __ add(lhs, lhs, Operand(scratch, LSR, shift2)); | |
6298 __ bind(&entry); | |
6299 __ cmp(lhs, Operand(mask)); | |
6300 __ b(gt, &loop); | |
6301 } | |
6302 | |
6303 | |
6304 // Splits the number into two halves (bottom half has shift bits). The top | |
6305 // half is subtracted from the bottom half. If the result is negative then | |
6306 // rhs is added. | |
6307 void ModGetInRangeBySubtraction(MacroAssembler* masm, | |
6308 Register lhs, | |
6309 int shift, | |
6310 int rhs) { | |
6311 int mask = (1 << shift) - 1; | |
6312 __ and_(ip, lhs, Operand(mask)); | |
6313 __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); | |
6314 __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); | |
6315 } | |
6316 | |
6317 | |
6318 void ModReduce(MacroAssembler* masm, | |
6319 Register lhs, | |
6320 int max, | |
6321 int denominator) { | |
6322 int limit = denominator; | |
6323 while (limit * 2 <= max) limit *= 2; | |
6324 while (limit >= denominator) { | |
6325 __ cmp(lhs, Operand(limit)); | |
6326 __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); | |
6327 limit >>= 1; | |
6328 } | |
6329 } | |
6330 | |
6331 | |
6332 void ModAnswer(MacroAssembler* masm, | |
6333 Register result, | |
6334 Register shift_distance, | |
6335 Register mask_bits, | |
6336 Register sum_of_digits) { | |
6337 __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); | |
6338 __ Ret(); | |
6339 } | |
6340 | |
6261 Handle<String> Reference::GetName() { | 6341 Handle<String> Reference::GetName() { |
6262 ASSERT(type_ == NAMED); | 6342 ASSERT(type_ == NAMED); |
6263 Property* property = expression_->AsProperty(); | 6343 Property* property = expression_->AsProperty(); |
6264 if (property == NULL) { | 6344 if (property == NULL) { |
6265 // Global variable reference treated as a named property reference. | 6345 // Global variable reference treated as a named property reference. |
6266 VariableProxy* proxy = expression_->AsVariableProxy(); | 6346 VariableProxy* proxy = expression_->AsVariableProxy(); |
6267 ASSERT(proxy->AsVariable() != NULL); | 6347 ASSERT(proxy->AsVariable() != NULL); |
6268 ASSERT(proxy->AsVariable()->is_global()); | 6348 ASSERT(proxy->AsVariable()->is_global()); |
6269 return proxy->name(); | 6349 return proxy->name(); |
6270 } else { | 6350 } else { |
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6614 static const uint32_t exponent_word_for_1 = | 6694 static const uint32_t exponent_word_for_1 = |
6615 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | 6695 HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
6616 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); | 6696 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); |
6617 // 1, 0 and -1 all have 0 for the second word. | 6697 // 1, 0 and -1 all have 0 for the second word. |
6618 __ mov(mantissa, Operand(0)); | 6698 __ mov(mantissa, Operand(0)); |
6619 __ Ret(); | 6699 __ Ret(); |
6620 | 6700 |
6621 __ bind(¬_special); | 6701 __ bind(¬_special); |
6622 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. | 6702 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. |
6623 // Gets the wrong answer for 0, but we already checked for that case above. | 6703 // Gets the wrong answer for 0, but we already checked for that case above. |
6624 __ CountLeadingZeros(source_, mantissa, zeros_); | 6704 __ CountLeadingZeros(zeros_, source_, mantissa); |
6625 // Compute exponent and or it into the exponent register. | 6705 // Compute exponent and or it into the exponent register. |
6626 // We use mantissa as a scratch register here. Use a fudge factor to | 6706 // We use mantissa as a scratch register here. Use a fudge factor to |
6627 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts | 6707 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts |
6628 // that fit in the ARM's constant field. | 6708 // that fit in the ARM's constant field. |
6629 int fudge = 0x400; | 6709 int fudge = 0x400; |
6630 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); | 6710 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); |
6631 __ add(mantissa, mantissa, Operand(fudge)); | 6711 __ add(mantissa, mantissa, Operand(fudge)); |
6632 __ orr(exponent, | 6712 __ orr(exponent, |
6633 exponent, | 6713 exponent, |
6634 Operand(mantissa, LSL, HeapNumber::kExponentShift)); | 6714 Operand(mantissa, LSL, HeapNumber::kExponentShift)); |
(...skipping 708 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7343 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7423 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
7344 | 7424 |
7345 // Smi-smi case (overflow). | 7425 // Smi-smi case (overflow). |
7346 // Since both are Smis there is no heap number to overwrite, so allocate. | 7426 // Since both are Smis there is no heap number to overwrite, so allocate. |
7347 // The new heap number is in r5. r3 and r7 are scratch. | 7427 // The new heap number is in r5. r3 and r7 are scratch. |
7348 __ AllocateHeapNumber( | 7428 __ AllocateHeapNumber( |
7349 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); | 7429 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); |
7350 | 7430 |
7351 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 7431 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, |
7352 // using registers d7 and d6 for the double values. | 7432 // using registers d7 and d6 for the double values. |
7353 if (use_fp_registers) { | 7433 if (CpuFeatures::IsSupported(VFP3)) { |
7354 CpuFeatures::Scope scope(VFP3); | 7434 CpuFeatures::Scope scope(VFP3); |
7355 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); | 7435 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); |
7356 __ vmov(s15, r7); | 7436 __ vmov(s15, r7); |
7357 __ vcvt_f64_s32(d7, s15); | 7437 __ vcvt_f64_s32(d7, s15); |
7358 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); | 7438 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); |
7359 __ vmov(s13, r7); | 7439 __ vmov(s13, r7); |
7360 __ vcvt_f64_s32(d6, s13); | 7440 __ vcvt_f64_s32(d6, s13); |
7441 if (!use_fp_registers) { | |
7442 __ vmov(r2, r3, d7); | |
7443 __ vmov(r0, r1, d6); | |
7444 } | |
7361 } else { | 7445 } else { |
7362 // Write Smi from rhs to r3 and r2 in double format. r3 is scratch. | 7446 // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. |
7363 __ mov(r7, Operand(rhs)); | 7447 __ mov(r7, Operand(rhs)); |
7364 ConvertToDoubleStub stub1(r3, r2, r7, r9); | 7448 ConvertToDoubleStub stub1(r3, r2, r7, r9); |
7365 __ push(lr); | 7449 __ push(lr); |
7366 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 7450 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
7367 // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. | 7451 // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. |
7368 __ mov(r7, Operand(lhs)); | 7452 __ mov(r7, Operand(lhs)); |
7369 ConvertToDoubleStub stub2(r1, r0, r7, r9); | 7453 ConvertToDoubleStub stub2(r1, r0, r7, r9); |
7370 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 7454 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
7371 __ pop(lr); | 7455 __ pop(lr); |
7372 } | 7456 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7427 // Calling convention says that second double is in r2 and r3. | 7511 // Calling convention says that second double is in r2 and r3. |
7428 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 7512 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
7429 } | 7513 } |
7430 __ jmp(&finished_loading_r0); | 7514 __ jmp(&finished_loading_r0); |
7431 __ bind(&r0_is_smi); | 7515 __ bind(&r0_is_smi); |
7432 if (mode_ == OVERWRITE_RIGHT) { | 7516 if (mode_ == OVERWRITE_RIGHT) { |
7433 // We can't overwrite a Smi so get address of new heap number into r5. | 7517 // We can't overwrite a Smi so get address of new heap number into r5. |
7434 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 7518 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
7435 } | 7519 } |
7436 | 7520 |
7437 if (use_fp_registers) { | 7521 if (CpuFeatures::IsSupported(VFP3)) { |
7438 CpuFeatures::Scope scope(VFP3); | 7522 CpuFeatures::Scope scope(VFP3); |
7439 // Convert smi in r0 to double in d7. | 7523 // Convert smi in r0 to double in d7. |
7440 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 7524 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
7441 __ vmov(s15, r7); | 7525 __ vmov(s15, r7); |
7442 __ vcvt_f64_s32(d7, s15); | 7526 __ vcvt_f64_s32(d7, s15); |
7527 if (!use_fp_registers) { | |
7528 __ vmov(r2, r3, d7); | |
7529 } | |
7443 } else { | 7530 } else { |
7444 // Write Smi from r0 to r3 and r2 in double format. | 7531 // Write Smi from r0 to r3 and r2 in double format. |
7445 __ mov(r7, Operand(r0)); | 7532 __ mov(r7, Operand(r0)); |
7446 ConvertToDoubleStub stub3(r3, r2, r7, r4); | 7533 ConvertToDoubleStub stub3(r3, r2, r7, r4); |
7447 __ push(lr); | 7534 __ push(lr); |
7448 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 7535 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
7449 __ pop(lr); | 7536 __ pop(lr); |
7450 } | 7537 } |
7451 | 7538 |
7452 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. | 7539 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. |
(...skipping 30 matching lines...) Expand all Loading... | |
7483 // Calling convention says that first double is in r0 and r1. | 7570 // Calling convention says that first double is in r0 and r1. |
7484 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 7571 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
7485 } | 7572 } |
7486 __ jmp(&finished_loading_r1); | 7573 __ jmp(&finished_loading_r1); |
7487 __ bind(&r1_is_smi); | 7574 __ bind(&r1_is_smi); |
7488 if (mode_ == OVERWRITE_LEFT) { | 7575 if (mode_ == OVERWRITE_LEFT) { |
7489 // We can't overwrite a Smi so get address of new heap number into r5. | 7576 // We can't overwrite a Smi so get address of new heap number into r5. |
7490 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 7577 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
7491 } | 7578 } |
7492 | 7579 |
7493 if (use_fp_registers) { | 7580 if (CpuFeatures::IsSupported(VFP3)) { |
7494 CpuFeatures::Scope scope(VFP3); | 7581 CpuFeatures::Scope scope(VFP3); |
7495 // Convert smi in r1 to double in d6. | 7582 // Convert smi in r1 to double in d6. |
7496 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 7583 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
7497 __ vmov(s13, r7); | 7584 __ vmov(s13, r7); |
7498 __ vcvt_f64_s32(d6, s13); | 7585 __ vcvt_f64_s32(d6, s13); |
7586 if (!use_fp_registers) { | |
7587 __ vmov(r0, r1, d6); | |
7588 } | |
7499 } else { | 7589 } else { |
7500 // Write Smi from r1 to r1 and r0 in double format. | 7590 // Write Smi from r1 to r1 and r0 in double format. |
7501 __ mov(r7, Operand(r1)); | 7591 __ mov(r7, Operand(r1)); |
7502 ConvertToDoubleStub stub4(r1, r0, r7, r9); | 7592 ConvertToDoubleStub stub4(r1, r0, r7, r9); |
7503 __ push(lr); | 7593 __ push(lr); |
7504 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 7594 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
7505 __ pop(lr); | 7595 __ pop(lr); |
7506 } | 7596 } |
7507 | 7597 |
7508 __ bind(&finished_loading_r1); | 7598 __ bind(&finished_loading_r1); |
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7935 *required_shift = 2; | 8025 *required_shift = 2; |
7936 break; | 8026 break; |
7937 default: | 8027 default: |
7938 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. | 8028 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. |
7939 __ mul(result, source, known_int_register); | 8029 __ mul(result, source, known_int_register); |
7940 *required_shift = 0; | 8030 *required_shift = 0; |
7941 } | 8031 } |
7942 } | 8032 } |
7943 | 8033 |
7944 | 8034 |
8035 // See comment for class. | |
8036 void IntegerModStub::Generate(MacroAssembler* masm) { | |
8037 __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); | |
8038 __ bic(odd_number_, odd_number_, Operand(1)); | |
8039 __ mov(odd_number_, Operand(odd_number_, LSL, 1)); | |
8040 // We now have (odd_number_ - 1) * 2 in the register. | |
8041 // Build a switch out of branches instead of data because it avoids | |
8042 // having to teach the assembler about intra-code-object pointers | |
8043 // that are not in relative branch instructions. | |
8044 Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; | |
8045 Label mod21, mod23, mod25; | |
8046 { Assembler::BlockConstPoolScope block_const_pool(masm); | |
8047 __ add(pc, pc, Operand(odd_number_)); | |
8048 // When you read pc it is always 8 ahead, but when you write it you always | |
8049 // write the actual value. So we put in two nops to take up the slack. | |
8050 __ nop(); | |
8051 __ nop(); | |
8052 __ b(&mod3); | |
8053 __ b(&mod5); | |
8054 __ b(&mod7); | |
8055 __ b(&mod9); | |
8056 __ b(&mod11); | |
8057 __ b(&mod13); | |
8058 __ b(&mod15); | |
8059 __ b(&mod17); | |
8060 __ b(&mod19); | |
8061 __ b(&mod21); | |
8062 __ b(&mod23); | |
8063 __ b(&mod25); | |
8064 } | |
8065 __ bind(&mod3); | |
8066 DigitSum(masm, lhs_, 3, 2); | |
8067 __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); | |
8068 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8069 | |
8070 __ bind(&mod5); | |
8071 DigitSum(masm, lhs_, 0xf, 4); | |
8072 ModGetInRangeBySubtraction(masm, lhs_, 2, 5); | |
8073 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8074 | |
8075 __ bind(&mod7); | |
8076 DigitSum(masm, lhs_, 7, 3); | |
8077 __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); | |
8078 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8079 | |
8080 __ bind(&mod9); | |
8081 DigitSum(masm, lhs_, 0x3f, 6); | |
8082 ModGetInRangeBySubtraction(masm, lhs_, 3, 9); | |
8083 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8084 | |
8085 __ bind(&mod11); | |
8086 DigitSum(masm, lhs_, r5, 0x3f, 6, 3); | |
8087 ModReduce(masm, lhs_, 0x3f, 11); | |
8088 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8089 | |
8090 __ bind(&mod13); | |
8091 DigitSum(masm, lhs_, r5, 0xff, 8, 5); | |
8092 ModReduce(masm, lhs_, 0xff, 13); | |
8093 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8094 | |
8095 __ bind(&mod15); | |
8096 DigitSum(masm, lhs_, 0xf, 4); | |
8097 __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); | |
8098 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8099 | |
8100 __ bind(&mod17); | |
8101 DigitSum(masm, lhs_, 0xff, 8); | |
8102 ModGetInRangeBySubtraction(masm, lhs_, 4, 17); | |
8103 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8104 | |
8105 __ bind(&mod19); | |
8106 DigitSum(masm, lhs_, r5, 0xff, 8, 5); | |
8107 ModReduce(masm, lhs_, 0xff, 19); | |
8108 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8109 | |
8110 __ bind(&mod21); | |
8111 DigitSum(masm, lhs_, 0x3f, 6); | |
8112 ModReduce(masm, lhs_, 0x3f, 21); | |
8113 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8114 | |
8115 __ bind(&mod23); | |
8116 DigitSum(masm, lhs_, r5, 0xff, 8, 7); | |
8117 ModReduce(masm, lhs_, 0xff, 23); | |
8118 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8119 | |
8120 __ bind(&mod25); | |
8121 DigitSum(masm, lhs_, r5, 0x7f, 7, 6); | |
8122 ModReduce(masm, lhs_, 0x7f, 25); | |
8123 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
8124 } | |
8125 | |
8126 | |
7945 const char* GenericBinaryOpStub::GetName() { | 8127 const char* GenericBinaryOpStub::GetName() { |
7946 if (name_ != NULL) return name_; | 8128 if (name_ != NULL) return name_; |
7947 const int len = 100; | 8129 const int len = 100; |
7948 name_ = Bootstrapper::AllocateAutoDeletedArray(len); | 8130 name_ = Bootstrapper::AllocateAutoDeletedArray(len); |
7949 if (name_ == NULL) return "OOM"; | 8131 if (name_ == NULL) return "OOM"; |
7950 const char* op_name = Token::Name(op_); | 8132 const char* op_name = Token::Name(op_); |
7951 const char* overwrite_name; | 8133 const char* overwrite_name; |
7952 switch (mode_) { | 8134 switch (mode_) { |
7953 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | 8135 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
7954 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | 8136 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
8062 __ bind(&slow); | 8244 __ bind(&slow); |
8063 } | 8245 } |
8064 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); | 8246 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); |
8065 break; | 8247 break; |
8066 } | 8248 } |
8067 | 8249 |
8068 case Token::DIV: | 8250 case Token::DIV: |
8069 case Token::MOD: { | 8251 case Token::MOD: { |
8070 Label not_smi; | 8252 Label not_smi; |
8071 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { | 8253 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { |
8072 Label smi_is_unsuitable; | 8254 Label lhs_is_unsuitable; |
8073 __ BranchOnNotSmi(lhs, ¬_smi); | 8255 __ BranchOnNotSmi(lhs, ¬_smi); |
8074 if (IsPowerOf2(constant_rhs_)) { | 8256 if (IsPowerOf2(constant_rhs_)) { |
8075 if (op_ == Token::MOD) { | 8257 if (op_ == Token::MOD) { |
8076 __ and_(rhs, | 8258 __ and_(rhs, |
8077 lhs, | 8259 lhs, |
8078 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 8260 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), |
8079 SetCC); | 8261 SetCC); |
8080 // We now have the answer, but if the input was negative we also | 8262 // We now have the answer, but if the input was negative we also |
8081 // have the sign bit. Our work is done if the result is | 8263 // have the sign bit. Our work is done if the result is |
8082 // positive or zero: | 8264 // positive or zero: |
8083 if (!rhs.is(r0)) { | 8265 if (!rhs.is(r0)) { |
8084 __ mov(r0, rhs, LeaveCC, pl); | 8266 __ mov(r0, rhs, LeaveCC, pl); |
8085 } | 8267 } |
8086 __ Ret(pl); | 8268 __ Ret(pl); |
8087 // A mod of a negative left hand side must return a negative number. | 8269 // A mod of a negative left hand side must return a negative number. |
8088 // Unfortunately if the answer is 0 then we must return -0. And we | 8270 // Unfortunately if the answer is 0 then we must return -0. And we |
8089 // already optimistically trashed rhs so we may need to restore it. | 8271 // already optimistically trashed rhs so we may need to restore it. |
8090 __ eor(rhs, rhs, Operand(0x80000000u), SetCC); | 8272 __ eor(rhs, rhs, Operand(0x80000000u), SetCC); |
8091 // Next two instructions are conditional on the answer being -0. | 8273 // Next two instructions are conditional on the answer being -0. |
8092 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); | 8274 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); |
8093 __ b(eq, &smi_is_unsuitable); | 8275 __ b(eq, &lhs_is_unsuitable); |
8094 // We need to subtract the dividend. Eg. -3 % 4 == -3. | 8276 // We need to subtract the dividend. Eg. -3 % 4 == -3. |
8095 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); | 8277 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); |
8096 } else { | 8278 } else { |
8097 ASSERT(op_ == Token::DIV); | 8279 ASSERT(op_ == Token::DIV); |
8098 __ tst(lhs, | 8280 __ tst(lhs, |
8099 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); | 8281 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); |
8100 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder. | 8282 __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. |
8101 int shift = 0; | 8283 int shift = 0; |
8102 int d = constant_rhs_; | 8284 int d = constant_rhs_; |
8103 while ((d & 1) == 0) { | 8285 while ((d & 1) == 0) { |
8104 d >>= 1; | 8286 d >>= 1; |
8105 shift++; | 8287 shift++; |
8106 } | 8288 } |
8107 __ mov(r0, Operand(lhs, LSR, shift)); | 8289 __ mov(r0, Operand(lhs, LSR, shift)); |
8108 __ bic(r0, r0, Operand(kSmiTagMask)); | 8290 __ bic(r0, r0, Operand(kSmiTagMask)); |
8109 } | 8291 } |
8110 } else { | 8292 } else { |
8111 // Not a power of 2. | 8293 // Not a power of 2. |
8112 __ tst(lhs, Operand(0x80000000u)); | 8294 __ tst(lhs, Operand(0x80000000u)); |
8113 __ b(ne, &smi_is_unsuitable); | 8295 __ b(ne, &lhs_is_unsuitable); |
8114 // Find a fixed point reciprocal of the divisor so we can divide by | 8296 // Find a fixed point reciprocal of the divisor so we can divide by |
8115 // multiplying. | 8297 // multiplying. |
8116 double divisor = 1.0 / constant_rhs_; | 8298 double divisor = 1.0 / constant_rhs_; |
8117 int shift = 32; | 8299 int shift = 32; |
8118 double scale = 4294967296.0; // 1 << 32. | 8300 double scale = 4294967296.0; // 1 << 32. |
8119 uint32_t mul; | 8301 uint32_t mul; |
8120 // Maximise the precision of the fixed point reciprocal. | 8302 // Maximise the precision of the fixed point reciprocal. |
8121 while (true) { | 8303 while (true) { |
8122 mul = static_cast<uint32_t>(scale * divisor); | 8304 mul = static_cast<uint32_t>(scale * divisor); |
8123 if (mul >= 0x7fffffff) break; | 8305 if (mul >= 0x7fffffff) break; |
(...skipping 14 matching lines...) Expand all Loading... | |
8138 MultiplyByKnownInt2(masm, | 8320 MultiplyByKnownInt2(masm, |
8139 scratch, | 8321 scratch, |
8140 scratch2, | 8322 scratch2, |
8141 rhs, | 8323 rhs, |
8142 constant_rhs_, | 8324 constant_rhs_, |
8143 &required_scratch_shift); | 8325 &required_scratch_shift); |
8144 // scratch << required_scratch_shift is now the Smi tagged rhs * | 8326 // scratch << required_scratch_shift is now the Smi tagged rhs * |
8145 // (lhs / rhs) where / indicates integer division. | 8327 // (lhs / rhs) where / indicates integer division. |
8146 if (op_ == Token::DIV) { | 8328 if (op_ == Token::DIV) { |
8147 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); | 8329 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); |
8148 __ b(ne, &smi_is_unsuitable); // There was a remainder. | 8330 __ b(ne, &lhs_is_unsuitable); // There was a remainder. |
8149 __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); | 8331 __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); |
8150 } else { | 8332 } else { |
8151 ASSERT(op_ == Token::MOD); | 8333 ASSERT(op_ == Token::MOD); |
8152 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); | 8334 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); |
8153 } | 8335 } |
8154 } | 8336 } |
8155 __ Ret(); | 8337 __ Ret(); |
8156 __ bind(&smi_is_unsuitable); | 8338 __ bind(&lhs_is_unsuitable); |
8157 } else if (op_ == Token::MOD && | 8339 } else if (op_ == Token::MOD && |
8158 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && | 8340 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && |
8159 runtime_operands_type_ != BinaryOpIC::STRINGS) { | 8341 runtime_operands_type_ != BinaryOpIC::STRINGS) { |
8160 // Do generate a bit of smi code for modulus even though the default for | 8342 // Do generate a bit of smi code for modulus even though the default for |
8161 // modulus is not to do it, but as the ARM processor has no coprocessor | 8343 // modulus is not to do it, but as the ARM processor has no coprocessor |
8162 // support for modulus checking for smis makes sense. | 8344 // support for modulus checking for smis makes sense. We can handle |
8345 // 1 to 25 times any power of 2. This covers over half the numbers from | |
8346 // 1 to 100 including all of the first 25. (Actually the constants < 10 | |
8347 // are handled above by reciprocal multiplication. We only get here for | |
8348 // those cases if the right hand side is not a constant or for cases | |
8349 // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod | |
8350 // stub.) | |
8163 Label slow; | 8351 Label slow; |
8352 Label not_power_of_2; | |
8164 ASSERT(!ShouldGenerateSmiCode()); | 8353 ASSERT(!ShouldGenerateSmiCode()); |
8165 ASSERT(kSmiTag == 0); // Adjust code below. | 8354 ASSERT(kSmiTag == 0); // Adjust code below. |
8166 // Check for two positive smis. | 8355 // Check for two positive smis. |
8167 __ orr(smi_test_reg, lhs, Operand(rhs)); | 8356 __ orr(smi_test_reg, lhs, Operand(rhs)); |
8168 __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); | 8357 __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); |
8169 __ b(ne, &slow); | 8358 __ b(ne, &slow); |
8170 // Check that rhs is a power of two and not zero. | 8359 // Check that rhs is a power of two and not zero. |
8360 Register mask_bits = r3; | |
8171 __ sub(scratch, rhs, Operand(1), SetCC); | 8361 __ sub(scratch, rhs, Operand(1), SetCC); |
8172 __ b(mi, &slow); | 8362 __ b(mi, &slow); |
8173 __ tst(rhs, scratch); | 8363 __ and_(mask_bits, rhs, Operand(scratch), SetCC); |
8174 __ b(ne, &slow); | 8364 __ b(ne, ¬_power_of_2); |
8175 // Calculate power of two modulus. | 8365 // Calculate power of two modulus. |
8176 __ and_(result, lhs, Operand(scratch)); | 8366 __ and_(result, lhs, Operand(scratch)); |
8177 __ Ret(); | 8367 __ Ret(); |
8368 | |
8369 __ bind(¬_power_of_2); | |
8370 __ eor(scratch, scratch, Operand(mask_bits)); | |
8371 // At least two bits are set in the modulus. The high one(s) are in | |
8372 // mask_bits and the low one is scratch + 1. | |
8373 __ and_(mask_bits, scratch, Operand(lhs)); | |
8374 Register shift_distance = scratch; | |
8375 scratch = no_reg; | |
8376 | |
8377 // The rhs consists of a power of 2 multiplied by some odd number. | |
8378 // The power-of-2 part we handle by putting the corresponding bits | |
8379 // from the lhs in the mask_bits register, and the power in the | |
8380 // shift_distance register. Shift distance is never 0 due to Smi | |
8381 // tagging. | |
8382 __ CountLeadingZeros(r4, shift_distance, shift_distance); | |
8383 __ rsb(shift_distance, r4, Operand(32)); | |
8384 | |
8385 // Now we need to find out what the odd number is. The last bit is | |
8386 // always 1. | |
8387 Register odd_number = r4; | |
8388 __ mov(odd_number, Operand(rhs, LSR, shift_distance)); | |
8389 __ cmp(odd_number, Operand(25)); | |
8390 __ b(gt, &slow); | |
8391 | |
8392 IntegerModStub stub( | |
8393 result, shift_distance, odd_number, mask_bits, lhs, r5); | |
8394 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. | |
8395 | |
8178 __ bind(&slow); | 8396 __ bind(&slow); |
8179 } | 8397 } |
8180 HandleBinaryOpSlowCases( | 8398 HandleBinaryOpSlowCases( |
8181 masm, | 8399 masm, |
8182 ¬_smi, | 8400 ¬_smi, |
8183 lhs, | 8401 lhs, |
8184 rhs, | 8402 rhs, |
8185 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); | 8403 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); |
8186 break; | 8404 break; |
8187 } | 8405 } |
(...skipping 2517 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
10705 __ bind(&string_add_runtime); | 10923 __ bind(&string_add_runtime); |
10706 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 10924 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
10707 } | 10925 } |
10708 | 10926 |
10709 | 10927 |
10710 #undef __ | 10928 #undef __ |
10711 | 10929 |
10712 } } // namespace v8::internal | 10930 } } // namespace v8::internal |
10713 | 10931 |
10714 #endif // V8_TARGET_ARCH_ARM | 10932 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |