| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_ARM) |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 33 #include "code-stubs-arm.h" |
| 33 #include "codegen-inl.h" | 34 #include "codegen-inl.h" |
| 34 #include "compiler.h" | 35 #include "compiler.h" |
| 35 #include "debug.h" | 36 #include "debug.h" |
| 36 #include "ic-inl.h" | 37 #include "ic-inl.h" |
| 37 #include "jsregexp.h" | 38 #include "jsregexp.h" |
| 38 #include "jump-target-light-inl.h" | 39 #include "jump-target-light-inl.h" |
| 39 #include "parser.h" | 40 #include "parser.h" |
| 40 #include "regexp-macro-assembler.h" | 41 #include "regexp-macro-assembler.h" |
| 41 #include "regexp-stack.h" | 42 #include "regexp-stack.h" |
| 42 #include "register-allocator-inl.h" | 43 #include "register-allocator-inl.h" |
| 43 #include "runtime.h" | 44 #include "runtime.h" |
| 44 #include "scopes.h" | 45 #include "scopes.h" |
| 45 #include "virtual-frame-inl.h" | 46 #include "virtual-frame-inl.h" |
| 46 #include "virtual-frame-arm-inl.h" | 47 #include "virtual-frame-arm-inl.h" |
| 47 | 48 |
| 48 namespace v8 { | 49 namespace v8 { |
| 49 namespace internal { | 50 namespace internal { |
| 50 | 51 |
| 51 | 52 |
| 52 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
| 53 Label* slow, | |
| 54 Condition cc, | |
| 55 bool never_nan_nan); | |
| 56 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
| 57 Register lhs, | |
| 58 Register rhs, | |
| 59 Label* lhs_not_nan, | |
| 60 Label* slow, | |
| 61 bool strict); | |
| 62 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | |
| 63 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | |
| 64 Register lhs, | |
| 65 Register rhs); | |
| 66 static void MultiplyByKnownInt(MacroAssembler* masm, | |
| 67 Register source, | |
| 68 Register destination, | |
| 69 int known_int); | |
| 70 static bool IsEasyToMultiplyBy(int x); | |
| 71 | |
| 72 | |
| 73 #define __ ACCESS_MASM(masm_) | 53 #define __ ACCESS_MASM(masm_) |
| 74 | 54 |
| 75 // ------------------------------------------------------------------------- | 55 // ------------------------------------------------------------------------- |
| 76 // Platform-specific DeferredCode functions. | 56 // Platform-specific DeferredCode functions. |
| 77 | 57 |
| 78 void DeferredCode::SaveRegisters() { | 58 void DeferredCode::SaveRegisters() { |
| 79 // On ARM you either have a completely spilled frame or you | 59 // On ARM you either have a completely spilled frame or you |
| 80 // handle it yourself, but at the moment there's no automation | 60 // handle it yourself, but at the moment there's no automation |
| 81 // of registers and deferred code. | 61 // of registers and deferred code. |
| 82 } | 62 } |
| (...skipping 959 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1042 x >>= 4; | 1022 x >>= 4; |
| 1043 } | 1023 } |
| 1044 while ((x & 1) == 0) { | 1024 while ((x & 1) == 0) { |
| 1045 bit_posn++; | 1025 bit_posn++; |
| 1046 x >>= 1; | 1026 x >>= 1; |
| 1047 } | 1027 } |
| 1048 return bit_posn; | 1028 return bit_posn; |
| 1049 } | 1029 } |
| 1050 | 1030 |
| 1051 | 1031 |
| 1032 // Can we multiply by x with max two shifts and an add. |
| 1033 // This answers yes to all integers from 2 to 10. |
| 1034 static bool IsEasyToMultiplyBy(int x) { |
| 1035 if (x < 2) return false; // Avoid special cases. |
| 1036 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. |
| 1037 if (IsPowerOf2(x)) return true; // Simple shift. |
| 1038 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. |
| 1039 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. |
| 1040 return false; |
| 1041 } |
| 1042 |
| 1043 |
| 1044 // Can multiply by anything that IsEasyToMultiplyBy returns true for. |
| 1045 // Source and destination may be the same register. This routine does |
| 1046 // not set carry and overflow the way a mul instruction would. |
| 1047 static void InlineMultiplyByKnownInt(MacroAssembler* masm, |
| 1048 Register source, |
| 1049 Register destination, |
| 1050 int known_int) { |
| 1051 if (IsPowerOf2(known_int)) { |
| 1052 masm->mov(destination, Operand(source, LSL, BitPosition(known_int))); |
| 1053 } else if (PopCountLessThanEqual2(known_int)) { |
| 1054 int first_bit = BitPosition(known_int); |
| 1055 int second_bit = BitPosition(known_int ^ (1 << first_bit)); |
| 1056 masm->add(destination, source, |
| 1057 Operand(source, LSL, second_bit - first_bit)); |
| 1058 if (first_bit != 0) { |
| 1059 masm->mov(destination, Operand(destination, LSL, first_bit)); |
| 1060 } |
| 1061 } else { |
| 1062 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. |
| 1063 int the_bit = BitPosition(known_int + 1); |
| 1064 masm->rsb(destination, source, Operand(source, LSL, the_bit)); |
| 1065 } |
| 1066 } |
| 1067 |
| 1068 |
| 1052 void CodeGenerator::SmiOperation(Token::Value op, | 1069 void CodeGenerator::SmiOperation(Token::Value op, |
| 1053 Handle<Object> value, | 1070 Handle<Object> value, |
| 1054 bool reversed, | 1071 bool reversed, |
| 1055 OverwriteMode mode) { | 1072 OverwriteMode mode) { |
| 1056 int int_value = Smi::cast(*value)->value(); | 1073 int int_value = Smi::cast(*value)->value(); |
| 1057 | 1074 |
| 1058 bool both_sides_are_smi = frame_->KnownSmiAt(0); | 1075 bool both_sides_are_smi = frame_->KnownSmiAt(0); |
| 1059 | 1076 |
| 1060 bool something_to_inline; | 1077 bool something_to_inline; |
| 1061 switch (op) { | 1078 switch (op) { |
| (...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1352 while ((mask & max_smi_that_wont_overflow) == 0) { | 1369 while ((mask & max_smi_that_wont_overflow) == 0) { |
| 1353 mask |= mask >> 1; | 1370 mask |= mask >> 1; |
| 1354 } | 1371 } |
| 1355 mask |= kSmiTagMask; | 1372 mask |= kSmiTagMask; |
| 1356 // This does a single mask that checks for a too high value in a | 1373 // This does a single mask that checks for a too high value in a |
| 1357 // conservative way and for a non-Smi. It also filters out negative | 1374 // conservative way and for a non-Smi. It also filters out negative |
| 1358 // numbers, unfortunately, but since this code is inline we prefer | 1375 // numbers, unfortunately, but since this code is inline we prefer |
| 1359 // brevity to comprehensiveness. | 1376 // brevity to comprehensiveness. |
| 1360 __ tst(tos, Operand(mask)); | 1377 __ tst(tos, Operand(mask)); |
| 1361 deferred->Branch(ne); | 1378 deferred->Branch(ne); |
| 1362 MultiplyByKnownInt(masm_, tos, tos, int_value); | 1379 InlineMultiplyByKnownInt(masm_, tos, tos, int_value); |
| 1363 deferred->BindExit(); | 1380 deferred->BindExit(); |
| 1364 frame_->EmitPush(tos); | 1381 frame_->EmitPush(tos); |
| 1365 break; | 1382 break; |
| 1366 } | 1383 } |
| 1367 | 1384 |
| 1368 default: | 1385 default: |
| 1369 UNREACHABLE(); | 1386 UNREACHABLE(); |
| 1370 break; | 1387 break; |
| 1371 } | 1388 } |
| 1372 } | 1389 } |
| (...skipping 5676 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7049 set_unloaded(); | 7066 set_unloaded(); |
| 7050 break; | 7067 break; |
| 7051 } | 7068 } |
| 7052 | 7069 |
| 7053 default: | 7070 default: |
| 7054 UNREACHABLE(); | 7071 UNREACHABLE(); |
| 7055 } | 7072 } |
| 7056 } | 7073 } |
| 7057 | 7074 |
| 7058 | 7075 |
| 7059 void FastNewClosureStub::Generate(MacroAssembler* masm) { | |
| 7060 // Create a new closure from the given function info in new | |
| 7061 // space. Set the context to the current context in cp. | |
| 7062 Label gc; | |
| 7063 | |
| 7064 // Pop the function info from the stack. | |
| 7065 __ pop(r3); | |
| 7066 | |
| 7067 // Attempt to allocate new JSFunction in new space. | |
| 7068 __ AllocateInNewSpace(JSFunction::kSize, | |
| 7069 r0, | |
| 7070 r1, | |
| 7071 r2, | |
| 7072 &gc, | |
| 7073 TAG_OBJECT); | |
| 7074 | |
| 7075 // Compute the function map in the current global context and set that | |
| 7076 // as the map of the allocated object. | |
| 7077 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 7078 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); | |
| 7079 __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); | |
| 7080 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 7081 | |
| 7082 // Initialize the rest of the function. We don't have to update the | |
| 7083 // write barrier because the allocated object is in new space. | |
| 7084 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); | |
| 7085 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); | |
| 7086 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | |
| 7087 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); | |
| 7088 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); | |
| 7089 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); | |
| 7090 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); | |
| 7091 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); | |
| 7092 | |
| 7093 // Initialize the code pointer in the function to be the one | |
| 7094 // found in the shared function info object. | |
| 7095 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); | |
| 7096 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 7097 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | |
| 7098 | |
| 7099 // Return result. The argument function info has been popped already. | |
| 7100 __ Ret(); | |
| 7101 | |
| 7102 // Create a new closure through the slower runtime call. | |
| 7103 __ bind(&gc); | |
| 7104 __ Push(cp, r3); | |
| 7105 __ TailCallRuntime(Runtime::kNewClosure, 2, 1); | |
| 7106 } | |
| 7107 | |
| 7108 | |
| 7109 void FastNewContextStub::Generate(MacroAssembler* masm) { | |
| 7110 // Try to allocate the context in new space. | |
| 7111 Label gc; | |
| 7112 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | |
| 7113 | |
| 7114 // Attempt to allocate the context in new space. | |
| 7115 __ AllocateInNewSpace(FixedArray::SizeFor(length), | |
| 7116 r0, | |
| 7117 r1, | |
| 7118 r2, | |
| 7119 &gc, | |
| 7120 TAG_OBJECT); | |
| 7121 | |
| 7122 // Load the function from the stack. | |
| 7123 __ ldr(r3, MemOperand(sp, 0)); | |
| 7124 | |
| 7125 // Setup the object header. | |
| 7126 __ LoadRoot(r2, Heap::kContextMapRootIndex); | |
| 7127 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 7128 __ mov(r2, Operand(Smi::FromInt(length))); | |
| 7129 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); | |
| 7130 | |
| 7131 // Setup the fixed slots. | |
| 7132 __ mov(r1, Operand(Smi::FromInt(0))); | |
| 7133 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); | |
| 7134 __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); | |
| 7135 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); | |
| 7136 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); | |
| 7137 | |
| 7138 // Copy the global object from the surrounding context. | |
| 7139 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 7140 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 7141 | |
| 7142 // Initialize the rest of the slots to undefined. | |
| 7143 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); | |
| 7144 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { | |
| 7145 __ str(r1, MemOperand(r0, Context::SlotOffset(i))); | |
| 7146 } | |
| 7147 | |
| 7148 // Remove the on-stack argument and return. | |
| 7149 __ mov(cp, r0); | |
| 7150 __ pop(); | |
| 7151 __ Ret(); | |
| 7152 | |
| 7153 // Need to collect. Call into runtime system. | |
| 7154 __ bind(&gc); | |
| 7155 __ TailCallRuntime(Runtime::kNewContext, 1, 1); | |
| 7156 } | |
| 7157 | |
| 7158 | |
| 7159 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { | |
| 7160 // Stack layout on entry: | |
| 7161 // | |
| 7162 // [sp]: constant elements. | |
| 7163 // [sp + kPointerSize]: literal index. | |
| 7164 // [sp + (2 * kPointerSize)]: literals array. | |
| 7165 | |
| 7166 // All sizes here are multiples of kPointerSize. | |
| 7167 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; | |
| 7168 int size = JSArray::kSize + elements_size; | |
| 7169 | |
| 7170 // Load boilerplate object into r3 and check if we need to create a | |
| 7171 // boilerplate. | |
| 7172 Label slow_case; | |
| 7173 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | |
| 7174 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | |
| 7175 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 7176 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 7177 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 7178 __ cmp(r3, ip); | |
| 7179 __ b(eq, &slow_case); | |
| 7180 | |
| 7181 if (FLAG_debug_code) { | |
| 7182 const char* message; | |
| 7183 Heap::RootListIndex expected_map_index; | |
| 7184 if (mode_ == CLONE_ELEMENTS) { | |
| 7185 message = "Expected (writable) fixed array"; | |
| 7186 expected_map_index = Heap::kFixedArrayMapRootIndex; | |
| 7187 } else { | |
| 7188 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); | |
| 7189 message = "Expected copy-on-write fixed array"; | |
| 7190 expected_map_index = Heap::kFixedCOWArrayMapRootIndex; | |
| 7191 } | |
| 7192 __ push(r3); | |
| 7193 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | |
| 7194 __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
| 7195 __ LoadRoot(ip, expected_map_index); | |
| 7196 __ cmp(r3, ip); | |
| 7197 __ Assert(eq, message); | |
| 7198 __ pop(r3); | |
| 7199 } | |
| 7200 | |
| 7201 // Allocate both the JS array and the elements array in one big | |
| 7202 // allocation. This avoids multiple limit checks. | |
| 7203 __ AllocateInNewSpace(size, | |
| 7204 r0, | |
| 7205 r1, | |
| 7206 r2, | |
| 7207 &slow_case, | |
| 7208 TAG_OBJECT); | |
| 7209 | |
| 7210 // Copy the JS array part. | |
| 7211 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { | |
| 7212 if ((i != JSArray::kElementsOffset) || (length_ == 0)) { | |
| 7213 __ ldr(r1, FieldMemOperand(r3, i)); | |
| 7214 __ str(r1, FieldMemOperand(r0, i)); | |
| 7215 } | |
| 7216 } | |
| 7217 | |
| 7218 if (length_ > 0) { | |
| 7219 // Get hold of the elements array of the boilerplate and setup the | |
| 7220 // elements pointer in the resulting object. | |
| 7221 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | |
| 7222 __ add(r2, r0, Operand(JSArray::kSize)); | |
| 7223 __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); | |
| 7224 | |
| 7225 // Copy the elements array. | |
| 7226 __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); | |
| 7227 } | |
| 7228 | |
| 7229 // Return and remove the on-stack parameters. | |
| 7230 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 7231 __ Ret(); | |
| 7232 | |
| 7233 __ bind(&slow_case); | |
| 7234 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | |
| 7235 } | |
| 7236 | |
| 7237 | |
| 7238 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | |
| 7239 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | |
| 7240 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | |
| 7241 // scratch register. Destroys the source register. No GC occurs during this | |
| 7242 // stub so you don't have to set up the frame. | |
| 7243 class ConvertToDoubleStub : public CodeStub { | |
| 7244 public: | |
| 7245 ConvertToDoubleStub(Register result_reg_1, | |
| 7246 Register result_reg_2, | |
| 7247 Register source_reg, | |
| 7248 Register scratch_reg) | |
| 7249 : result1_(result_reg_1), | |
| 7250 result2_(result_reg_2), | |
| 7251 source_(source_reg), | |
| 7252 zeros_(scratch_reg) { } | |
| 7253 | |
| 7254 private: | |
| 7255 Register result1_; | |
| 7256 Register result2_; | |
| 7257 Register source_; | |
| 7258 Register zeros_; | |
| 7259 | |
| 7260 // Minor key encoding in 16 bits. | |
| 7261 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
| 7262 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
| 7263 | |
| 7264 Major MajorKey() { return ConvertToDouble; } | |
| 7265 int MinorKey() { | |
| 7266 // Encode the parameters in a unique 16 bit value. | |
| 7267 return result1_.code() + | |
| 7268 (result2_.code() << 4) + | |
| 7269 (source_.code() << 8) + | |
| 7270 (zeros_.code() << 12); | |
| 7271 } | |
| 7272 | |
| 7273 void Generate(MacroAssembler* masm); | |
| 7274 | |
| 7275 const char* GetName() { return "ConvertToDoubleStub"; } | |
| 7276 | |
| 7277 #ifdef DEBUG | |
| 7278 void Print() { PrintF("ConvertToDoubleStub\n"); } | |
| 7279 #endif | |
| 7280 }; | |
| 7281 | |
| 7282 | |
| 7283 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | |
| 7284 #ifndef BIG_ENDIAN_FLOATING_POINT | |
| 7285 Register exponent = result1_; | |
| 7286 Register mantissa = result2_; | |
| 7287 #else | |
| 7288 Register exponent = result2_; | |
| 7289 Register mantissa = result1_; | |
| 7290 #endif | |
| 7291 Label not_special; | |
| 7292 // Convert from Smi to integer. | |
| 7293 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | |
| 7294 // Move sign bit from source to destination. This works because the sign bit | |
| 7295 // in the exponent word of the double has the same position and polarity as | |
| 7296 // the 2's complement sign bit in a Smi. | |
| 7297 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | |
| 7298 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | |
| 7299 // Subtract from 0 if source was negative. | |
| 7300 __ rsb(source_, source_, Operand(0), LeaveCC, ne); | |
| 7301 | |
| 7302 // We have -1, 0 or 1, which we treat specially. Register source_ contains | |
| 7303 // absolute value: it is either equal to 1 (special case of -1 and 1), | |
| 7304 // greater than 1 (not a special case) or less than 1 (special case of 0). | |
| 7305 __ cmp(source_, Operand(1)); | |
| 7306 __ b(gt, ¬_special); | |
| 7307 | |
| 7308 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | |
| 7309 static const uint32_t exponent_word_for_1 = | |
| 7310 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | |
| 7311 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); | |
| 7312 // 1, 0 and -1 all have 0 for the second word. | |
| 7313 __ mov(mantissa, Operand(0)); | |
| 7314 __ Ret(); | |
| 7315 | |
| 7316 __ bind(¬_special); | |
| 7317 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. | |
| 7318 // Gets the wrong answer for 0, but we already checked for that case above. | |
| 7319 __ CountLeadingZeros(zeros_, source_, mantissa); | |
| 7320 // Compute exponent and or it into the exponent register. | |
| 7321 // We use mantissa as a scratch register here. Use a fudge factor to | |
| 7322 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts | |
| 7323 // that fit in the ARM's constant field. | |
| 7324 int fudge = 0x400; | |
| 7325 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); | |
| 7326 __ add(mantissa, mantissa, Operand(fudge)); | |
| 7327 __ orr(exponent, | |
| 7328 exponent, | |
| 7329 Operand(mantissa, LSL, HeapNumber::kExponentShift)); | |
| 7330 // Shift up the source chopping the top bit off. | |
| 7331 __ add(zeros_, zeros_, Operand(1)); | |
| 7332 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | |
| 7333 __ mov(source_, Operand(source_, LSL, zeros_)); | |
| 7334 // Compute lower part of fraction (last 12 bits). | |
| 7335 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
| 7336 // And the top (top 20 bits). | |
| 7337 __ orr(exponent, | |
| 7338 exponent, | |
| 7339 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | |
| 7340 __ Ret(); | |
| 7341 } | |
| 7342 | |
| 7343 | |
| 7344 // See comment for class. | |
| 7345 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | |
| 7346 Label max_negative_int; | |
| 7347 // the_int_ has the answer which is a signed int32 but not a Smi. | |
| 7348 // We test for the special value that has a different exponent. This test | |
| 7349 // has the neat side effect of setting the flags according to the sign. | |
| 7350 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | |
| 7351 __ cmp(the_int_, Operand(0x80000000u)); | |
| 7352 __ b(eq, &max_negative_int); | |
| 7353 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | |
| 7354 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | |
| 7355 uint32_t non_smi_exponent = | |
| 7356 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
| 7357 __ mov(scratch_, Operand(non_smi_exponent)); | |
| 7358 // Set the sign bit in scratch_ if the value was negative. | |
| 7359 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); | |
| 7360 // Subtract from 0 if the value was negative. | |
| 7361 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); | |
| 7362 // We should be masking the implict first digit of the mantissa away here, | |
| 7363 // but it just ends up combining harmlessly with the last digit of the | |
| 7364 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | |
| 7365 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | |
| 7366 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | |
| 7367 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
| 7368 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); | |
| 7369 __ str(scratch_, FieldMemOperand(the_heap_number_, | |
| 7370 HeapNumber::kExponentOffset)); | |
| 7371 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); | |
| 7372 __ str(scratch_, FieldMemOperand(the_heap_number_, | |
| 7373 HeapNumber::kMantissaOffset)); | |
| 7374 __ Ret(); | |
| 7375 | |
| 7376 __ bind(&max_negative_int); | |
| 7377 // The max negative int32 is stored as a positive number in the mantissa of | |
| 7378 // a double because it uses a sign bit instead of using two's complement. | |
| 7379 // The actual mantissa bits stored are all 0 because the implicit most | |
| 7380 // significant 1 bit is not stored. | |
| 7381 non_smi_exponent += 1 << HeapNumber::kExponentShift; | |
| 7382 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); | |
| 7383 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); | |
| 7384 __ mov(ip, Operand(0)); | |
| 7385 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | |
| 7386 __ Ret(); | |
| 7387 } | |
| 7388 | |
| 7389 | |
| 7390 // Handle the case where the lhs and rhs are the same object. | |
| 7391 // Equality is almost reflexive (everything but NaN), so this is a test | |
| 7392 // for "identity and not NaN". | |
| 7393 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
| 7394 Label* slow, | |
| 7395 Condition cc, | |
| 7396 bool never_nan_nan) { | |
| 7397 Label not_identical; | |
| 7398 Label heap_number, return_equal; | |
| 7399 __ cmp(r0, r1); | |
| 7400 __ b(ne, ¬_identical); | |
| 7401 | |
| 7402 // The two objects are identical. If we know that one of them isn't NaN then | |
| 7403 // we now know they test equal. | |
| 7404 if (cc != eq || !never_nan_nan) { | |
| 7405 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | |
| 7406 // so we do the second best thing - test it ourselves. | |
| 7407 // They are both equal and they are not both Smis so both of them are not | |
| 7408 // Smis. If it's not a heap number, then return equal. | |
| 7409 if (cc == lt || cc == gt) { | |
| 7410 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | |
| 7411 __ b(ge, slow); | |
| 7412 } else { | |
| 7413 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | |
| 7414 __ b(eq, &heap_number); | |
| 7415 // Comparing JS objects with <=, >= is complicated. | |
| 7416 if (cc != eq) { | |
| 7417 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 7418 __ b(ge, slow); | |
| 7419 // Normally here we fall through to return_equal, but undefined is | |
| 7420 // special: (undefined == undefined) == true, but | |
| 7421 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | |
| 7422 if (cc == le || cc == ge) { | |
| 7423 __ cmp(r4, Operand(ODDBALL_TYPE)); | |
| 7424 __ b(ne, &return_equal); | |
| 7425 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | |
| 7426 __ cmp(r0, r2); | |
| 7427 __ b(ne, &return_equal); | |
| 7428 if (cc == le) { | |
| 7429 // undefined <= undefined should fail. | |
| 7430 __ mov(r0, Operand(GREATER)); | |
| 7431 } else { | |
| 7432 // undefined >= undefined should fail. | |
| 7433 __ mov(r0, Operand(LESS)); | |
| 7434 } | |
| 7435 __ Ret(); | |
| 7436 } | |
| 7437 } | |
| 7438 } | |
| 7439 } | |
| 7440 | |
| 7441 __ bind(&return_equal); | |
| 7442 if (cc == lt) { | |
| 7443 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. | |
| 7444 } else if (cc == gt) { | |
| 7445 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. | |
| 7446 } else { | |
| 7447 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. | |
| 7448 } | |
| 7449 __ Ret(); | |
| 7450 | |
| 7451 if (cc != eq || !never_nan_nan) { | |
| 7452 // For less and greater we don't have to check for NaN since the result of | |
| 7453 // x < x is false regardless. For the others here is some code to check | |
| 7454 // for NaN. | |
| 7455 if (cc != lt && cc != gt) { | |
| 7456 __ bind(&heap_number); | |
| 7457 // It is a heap number, so return non-equal if it's NaN and equal if it's | |
| 7458 // not NaN. | |
| 7459 | |
| 7460 // The representation of NaN values has all exponent bits (52..62) set, | |
| 7461 // and not all mantissa bits (0..51) clear. | |
| 7462 // Read top bits of double representation (second word of value). | |
| 7463 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 7464 // Test that exponent bits are all set. | |
| 7465 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 7466 // NaNs have all-one exponents so they sign extend to -1. | |
| 7467 __ cmp(r3, Operand(-1)); | |
| 7468 __ b(ne, &return_equal); | |
| 7469 | |
| 7470 // Shift out flag and all exponent bits, retaining only mantissa. | |
| 7471 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | |
| 7472 // Or with all low-bits of mantissa. | |
| 7473 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
| 7474 __ orr(r0, r3, Operand(r2), SetCC); | |
| 7475 // For equal we already have the right value in r0: Return zero (equal) | |
| 7476 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | |
| 7477 // not (it's a NaN). For <= and >= we need to load r0 with the failing | |
| 7478 // value if it's a NaN. | |
| 7479 if (cc != eq) { | |
| 7480 // All-zero means Infinity means equal. | |
| 7481 __ Ret(eq); | |
| 7482 if (cc == le) { | |
| 7483 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. | |
| 7484 } else { | |
| 7485 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. | |
| 7486 } | |
| 7487 } | |
| 7488 __ Ret(); | |
| 7489 } | |
| 7490 // No fall through here. | |
| 7491 } | |
| 7492 | |
| 7493 __ bind(¬_identical); | |
| 7494 } | |
| 7495 | |
| 7496 | |
| 7497 // See comment at call site. | |
| 7498 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
| 7499 Register lhs, | |
| 7500 Register rhs, | |
| 7501 Label* lhs_not_nan, | |
| 7502 Label* slow, | |
| 7503 bool strict) { | |
| 7504 ASSERT((lhs.is(r0) && rhs.is(r1)) || | |
| 7505 (lhs.is(r1) && rhs.is(r0))); | |
| 7506 | |
| 7507 Label rhs_is_smi; | |
| 7508 __ tst(rhs, Operand(kSmiTagMask)); | |
| 7509 __ b(eq, &rhs_is_smi); | |
| 7510 | |
| 7511 // Lhs is a Smi. Check whether the rhs is a heap number. | |
| 7512 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | |
| 7513 if (strict) { | |
| 7514 // If rhs is not a number and lhs is a Smi then strict equality cannot | |
| 7515 // succeed. Return non-equal | |
| 7516 // If rhs is r0 then there is already a non zero value in it. | |
| 7517 if (!rhs.is(r0)) { | |
| 7518 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | |
| 7519 } | |
| 7520 __ Ret(ne); | |
| 7521 } else { | |
| 7522 // Smi compared non-strictly with a non-Smi non-heap-number. Call | |
| 7523 // the runtime. | |
| 7524 __ b(ne, slow); | |
| 7525 } | |
| 7526 | |
| 7527 // Lhs is a smi, rhs is a number. | |
| 7528 if (CpuFeatures::IsSupported(VFP3)) { | |
| 7529 // Convert lhs to a double in d7. | |
| 7530 CpuFeatures::Scope scope(VFP3); | |
| 7531 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | |
| 7532 // Load the double from rhs, tagged HeapNumber r0, to d6. | |
| 7533 __ sub(r7, rhs, Operand(kHeapObjectTag)); | |
| 7534 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
| 7535 } else { | |
| 7536 __ push(lr); | |
| 7537 // Convert lhs to a double in r2, r3. | |
| 7538 __ mov(r7, Operand(lhs)); | |
| 7539 ConvertToDoubleStub stub1(r3, r2, r7, r6); | |
| 7540 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | |
| 7541 // Load rhs to a double in r0, r1. | |
| 7542 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 7543 __ pop(lr); | |
| 7544 } | |
| 7545 | |
| 7546 // We now have both loaded as doubles but we can skip the lhs nan check | |
| 7547 // since it's a smi. | |
| 7548 __ jmp(lhs_not_nan); | |
| 7549 | |
| 7550 __ bind(&rhs_is_smi); | |
| 7551 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | |
| 7552 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | |
| 7553 if (strict) { | |
| 7554 // If lhs is not a number and rhs is a smi then strict equality cannot | |
| 7555 // succeed. Return non-equal. | |
| 7556 // If lhs is r0 then there is already a non zero value in it. | |
| 7557 if (!lhs.is(r0)) { | |
| 7558 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | |
| 7559 } | |
| 7560 __ Ret(ne); | |
| 7561 } else { | |
| 7562 // Smi compared non-strictly with a non-smi non-heap-number. Call | |
| 7563 // the runtime. | |
| 7564 __ b(ne, slow); | |
| 7565 } | |
| 7566 | |
| 7567 // Rhs is a smi, lhs is a heap number. | |
| 7568 if (CpuFeatures::IsSupported(VFP3)) { | |
| 7569 CpuFeatures::Scope scope(VFP3); | |
| 7570 // Load the double from lhs, tagged HeapNumber r1, to d7. | |
| 7571 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
| 7572 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
| 7573 // Convert rhs to a double in d6 . | |
| 7574 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | |
| 7575 } else { | |
| 7576 __ push(lr); | |
| 7577 // Load lhs to a double in r2, r3. | |
| 7578 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 7579 // Convert rhs to a double in r0, r1. | |
| 7580 __ mov(r7, Operand(rhs)); | |
| 7581 ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
| 7582 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | |
| 7583 __ pop(lr); | |
| 7584 } | |
| 7585 // Fall through to both_loaded_as_doubles. | |
| 7586 } | |
| 7587 | |
| 7588 | |
| 7589 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { | |
| 7590 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
| 7591 Register rhs_exponent = exp_first ? r0 : r1; | |
| 7592 Register lhs_exponent = exp_first ? r2 : r3; | |
| 7593 Register rhs_mantissa = exp_first ? r1 : r0; | |
| 7594 Register lhs_mantissa = exp_first ? r3 : r2; | |
| 7595 Label one_is_nan, neither_is_nan; | |
| 7596 | |
| 7597 __ Sbfx(r4, | |
| 7598 lhs_exponent, | |
| 7599 HeapNumber::kExponentShift, | |
| 7600 HeapNumber::kExponentBits); | |
| 7601 // NaNs have all-one exponents so they sign extend to -1. | |
| 7602 __ cmp(r4, Operand(-1)); | |
| 7603 __ b(ne, lhs_not_nan); | |
| 7604 __ mov(r4, | |
| 7605 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | |
| 7606 SetCC); | |
| 7607 __ b(ne, &one_is_nan); | |
| 7608 __ cmp(lhs_mantissa, Operand(0)); | |
| 7609 __ b(ne, &one_is_nan); | |
| 7610 | |
| 7611 __ bind(lhs_not_nan); | |
| 7612 __ Sbfx(r4, | |
| 7613 rhs_exponent, | |
| 7614 HeapNumber::kExponentShift, | |
| 7615 HeapNumber::kExponentBits); | |
| 7616 // NaNs have all-one exponents so they sign extend to -1. | |
| 7617 __ cmp(r4, Operand(-1)); | |
| 7618 __ b(ne, &neither_is_nan); | |
| 7619 __ mov(r4, | |
| 7620 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | |
| 7621 SetCC); | |
| 7622 __ b(ne, &one_is_nan); | |
| 7623 __ cmp(rhs_mantissa, Operand(0)); | |
| 7624 __ b(eq, &neither_is_nan); | |
| 7625 | |
| 7626 __ bind(&one_is_nan); | |
| 7627 // NaN comparisons always fail. | |
| 7628 // Load whatever we need in r0 to make the comparison fail. | |
| 7629 if (cc == lt || cc == le) { | |
| 7630 __ mov(r0, Operand(GREATER)); | |
| 7631 } else { | |
| 7632 __ mov(r0, Operand(LESS)); | |
| 7633 } | |
| 7634 __ Ret(); | |
| 7635 | |
| 7636 __ bind(&neither_is_nan); | |
| 7637 } | |
| 7638 | |
| 7639 | |
| 7640 // See comment at call site. | |
| 7641 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { | |
| 7642 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
| 7643 Register rhs_exponent = exp_first ? r0 : r1; | |
| 7644 Register lhs_exponent = exp_first ? r2 : r3; | |
| 7645 Register rhs_mantissa = exp_first ? r1 : r0; | |
| 7646 Register lhs_mantissa = exp_first ? r3 : r2; | |
| 7647 | |
| 7648 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | |
| 7649 if (cc == eq) { | |
| 7650 // Doubles are not equal unless they have the same bit pattern. | |
| 7651 // Exception: 0 and -0. | |
| 7652 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | |
| 7653 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | |
| 7654 // Return non-zero if the numbers are unequal. | |
| 7655 __ Ret(ne); | |
| 7656 | |
| 7657 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | |
| 7658 // If exponents are equal then return 0. | |
| 7659 __ Ret(eq); | |
| 7660 | |
| 7661 // Exponents are unequal. The only way we can return that the numbers | |
| 7662 // are equal is if one is -0 and the other is 0. We already dealt | |
| 7663 // with the case where both are -0 or both are 0. | |
| 7664 // We start by seeing if the mantissas (that are equal) or the bottom | |
| 7665 // 31 bits of the rhs exponent are non-zero. If so we return not | |
| 7666 // equal. | |
| 7667 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); | |
| 7668 __ mov(r0, Operand(r4), LeaveCC, ne); | |
| 7669 __ Ret(ne); | |
| 7670 // Now they are equal if and only if the lhs exponent is zero in its | |
| 7671 // low 31 bits. | |
| 7672 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); | |
| 7673 __ Ret(); | |
| 7674 } else { | |
| 7675 // Call a native function to do a comparison between two non-NaNs. | |
| 7676 // Call C routine that may not cause GC or other trouble. | |
| 7677 __ push(lr); | |
| 7678 __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. | |
| 7679 __ CallCFunction(ExternalReference::compare_doubles(), 4); | |
| 7680 __ pop(pc); // Return. | |
| 7681 } | |
| 7682 } | |
| 7683 | |
| 7684 | |
| 7685 // See comment at call site. | |
| 7686 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | |
| 7687 Register lhs, | |
| 7688 Register rhs) { | |
| 7689 ASSERT((lhs.is(r0) && rhs.is(r1)) || | |
| 7690 (lhs.is(r1) && rhs.is(r0))); | |
| 7691 | |
| 7692 // If either operand is a JSObject or an oddball value, then they are | |
| 7693 // not equal since their pointers are different. | |
| 7694 // There is no test for undetectability in strict equality. | |
| 7695 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
| 7696 Label first_non_object; | |
| 7697 // Get the type of the first operand into r2 and compare it with | |
| 7698 // FIRST_JS_OBJECT_TYPE. | |
| 7699 __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); | |
| 7700 __ b(lt, &first_non_object); | |
| 7701 | |
| 7702 // Return non-zero (r0 is not zero) | |
| 7703 Label return_not_equal; | |
| 7704 __ bind(&return_not_equal); | |
| 7705 __ Ret(); | |
| 7706 | |
| 7707 __ bind(&first_non_object); | |
| 7708 // Check for oddballs: true, false, null, undefined. | |
| 7709 __ cmp(r2, Operand(ODDBALL_TYPE)); | |
| 7710 __ b(eq, &return_not_equal); | |
| 7711 | |
| 7712 __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); | |
| 7713 __ b(ge, &return_not_equal); | |
| 7714 | |
| 7715 // Check for oddballs: true, false, null, undefined. | |
| 7716 __ cmp(r3, Operand(ODDBALL_TYPE)); | |
| 7717 __ b(eq, &return_not_equal); | |
| 7718 | |
| 7719 // Now that we have the types we might as well check for symbol-symbol. | |
| 7720 // Ensure that no non-strings have the symbol bit set. | |
| 7721 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); | |
| 7722 STATIC_ASSERT(kSymbolTag != 0); | |
| 7723 __ and_(r2, r2, Operand(r3)); | |
| 7724 __ tst(r2, Operand(kIsSymbolMask)); | |
| 7725 __ b(ne, &return_not_equal); | |
| 7726 } | |
| 7727 | |
| 7728 | |
| 7729 // See comment at call site. | |
| 7730 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | |
| 7731 Register lhs, | |
| 7732 Register rhs, | |
| 7733 Label* both_loaded_as_doubles, | |
| 7734 Label* not_heap_numbers, | |
| 7735 Label* slow) { | |
| 7736 ASSERT((lhs.is(r0) && rhs.is(r1)) || | |
| 7737 (lhs.is(r1) && rhs.is(r0))); | |
| 7738 | |
| 7739 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | |
| 7740 __ b(ne, not_heap_numbers); | |
| 7741 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 7742 __ cmp(r2, r3); | |
| 7743 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | |
| 7744 | |
| 7745 // Both are heap numbers. Load them up then jump to the code we have | |
| 7746 // for that. | |
| 7747 if (CpuFeatures::IsSupported(VFP3)) { | |
| 7748 CpuFeatures::Scope scope(VFP3); | |
| 7749 __ sub(r7, rhs, Operand(kHeapObjectTag)); | |
| 7750 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
| 7751 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
| 7752 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
| 7753 } else { | |
| 7754 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 7755 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 7756 } | |
| 7757 __ jmp(both_loaded_as_doubles); | |
| 7758 } | |
| 7759 | |
| 7760 | |
| 7761 // Fast negative check for symbol-to-symbol equality. | |
| 7762 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, | |
| 7763 Register lhs, | |
| 7764 Register rhs, | |
| 7765 Label* possible_strings, | |
| 7766 Label* not_both_strings) { | |
| 7767 ASSERT((lhs.is(r0) && rhs.is(r1)) || | |
| 7768 (lhs.is(r1) && rhs.is(r0))); | |
| 7769 | |
| 7770 // r2 is object type of rhs. | |
| 7771 // Ensure that no non-strings have the symbol bit set. | |
| 7772 Label object_test; | |
| 7773 STATIC_ASSERT(kSymbolTag != 0); | |
| 7774 __ tst(r2, Operand(kIsNotStringMask)); | |
| 7775 __ b(ne, &object_test); | |
| 7776 __ tst(r2, Operand(kIsSymbolMask)); | |
| 7777 __ b(eq, possible_strings); | |
| 7778 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); | |
| 7779 __ b(ge, not_both_strings); | |
| 7780 __ tst(r3, Operand(kIsSymbolMask)); | |
| 7781 __ b(eq, possible_strings); | |
| 7782 | |
| 7783 // Both are symbols. We already checked they weren't the same pointer | |
| 7784 // so they are not equal. | |
| 7785 __ mov(r0, Operand(NOT_EQUAL)); | |
| 7786 __ Ret(); | |
| 7787 | |
| 7788 __ bind(&object_test); | |
| 7789 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 7790 __ b(lt, not_both_strings); | |
| 7791 __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); | |
| 7792 __ b(lt, not_both_strings); | |
| 7793 // If both objects are undetectable, they are equal. Otherwise, they | |
| 7794 // are not equal, since they are different objects and an object is not | |
| 7795 // equal to undefined. | |
| 7796 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 7797 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); | |
| 7798 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); | |
| 7799 __ and_(r0, r2, Operand(r3)); | |
| 7800 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); | |
| 7801 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); | |
| 7802 __ Ret(); | |
| 7803 } | |
| 7804 | |
| 7805 | |
| 7806 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, | |
| 7807 Register object, | |
| 7808 Register result, | |
| 7809 Register scratch1, | |
| 7810 Register scratch2, | |
| 7811 Register scratch3, | |
| 7812 bool object_is_smi, | |
| 7813 Label* not_found) { | |
| 7814 // Use of registers. Register result is used as a temporary. | |
| 7815 Register number_string_cache = result; | |
| 7816 Register mask = scratch3; | |
| 7817 | |
| 7818 // Load the number string cache. | |
| 7819 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | |
| 7820 | |
| 7821 // Make the hash mask from the length of the number string cache. It | |
| 7822 // contains two elements (number and string) for each cache entry. | |
| 7823 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); | |
| 7824 // Divide length by two (length is a smi). | |
| 7825 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | |
| 7826 __ sub(mask, mask, Operand(1)); // Make mask. | |
| 7827 | |
| 7828 // Calculate the entry in the number string cache. The hash value in the | |
| 7829 // number string cache for smis is just the smi value, and the hash for | |
| 7830 // doubles is the xor of the upper and lower words. See | |
| 7831 // Heap::GetNumberStringCache. | |
| 7832 Label is_smi; | |
| 7833 Label load_result_from_cache; | |
| 7834 if (!object_is_smi) { | |
| 7835 __ BranchOnSmi(object, &is_smi); | |
| 7836 if (CpuFeatures::IsSupported(VFP3)) { | |
| 7837 CpuFeatures::Scope scope(VFP3); | |
| 7838 __ CheckMap(object, | |
| 7839 scratch1, | |
| 7840 Heap::kHeapNumberMapRootIndex, | |
| 7841 not_found, | |
| 7842 true); | |
| 7843 | |
| 7844 STATIC_ASSERT(8 == kDoubleSize); | |
| 7845 __ add(scratch1, | |
| 7846 object, | |
| 7847 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | |
| 7848 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | |
| 7849 __ eor(scratch1, scratch1, Operand(scratch2)); | |
| 7850 __ and_(scratch1, scratch1, Operand(mask)); | |
| 7851 | |
| 7852 // Calculate address of entry in string cache: each entry consists | |
| 7853 // of two pointer sized fields. | |
| 7854 __ add(scratch1, | |
| 7855 number_string_cache, | |
| 7856 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
| 7857 | |
| 7858 Register probe = mask; | |
| 7859 __ ldr(probe, | |
| 7860 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
| 7861 __ BranchOnSmi(probe, not_found); | |
| 7862 __ sub(scratch2, object, Operand(kHeapObjectTag)); | |
| 7863 __ vldr(d0, scratch2, HeapNumber::kValueOffset); | |
| 7864 __ sub(probe, probe, Operand(kHeapObjectTag)); | |
| 7865 __ vldr(d1, probe, HeapNumber::kValueOffset); | |
| 7866 __ vcmp(d0, d1); | |
| 7867 __ vmrs(pc); | |
| 7868 __ b(ne, not_found); // The cache did not contain this value. | |
| 7869 __ b(&load_result_from_cache); | |
| 7870 } else { | |
| 7871 __ b(not_found); | |
| 7872 } | |
| 7873 } | |
| 7874 | |
| 7875 __ bind(&is_smi); | |
| 7876 Register scratch = scratch1; | |
| 7877 __ and_(scratch, mask, Operand(object, ASR, 1)); | |
| 7878 // Calculate address of entry in string cache: each entry consists | |
| 7879 // of two pointer sized fields. | |
| 7880 __ add(scratch, | |
| 7881 number_string_cache, | |
| 7882 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
| 7883 | |
| 7884 // Check if the entry is the smi we are looking for. | |
| 7885 Register probe = mask; | |
| 7886 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | |
| 7887 __ cmp(object, probe); | |
| 7888 __ b(ne, not_found); | |
| 7889 | |
| 7890 // Get the result from the cache. | |
| 7891 __ bind(&load_result_from_cache); | |
| 7892 __ ldr(result, | |
| 7893 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | |
| 7894 __ IncrementCounter(&Counters::number_to_string_native, | |
| 7895 1, | |
| 7896 scratch1, | |
| 7897 scratch2); | |
| 7898 } | |
| 7899 | |
| 7900 | |
| 7901 void NumberToStringStub::Generate(MacroAssembler* masm) { | |
| 7902 Label runtime; | |
| 7903 | |
| 7904 __ ldr(r1, MemOperand(sp, 0)); | |
| 7905 | |
| 7906 // Generate code to lookup number in the number string cache. | |
| 7907 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); | |
| 7908 __ add(sp, sp, Operand(1 * kPointerSize)); | |
| 7909 __ Ret(); | |
| 7910 | |
| 7911 __ bind(&runtime); | |
| 7912 // Handle number to string in the runtime system if not found in the cache. | |
| 7913 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | |
| 7914 } | |
| 7915 | |
| 7916 | |
| 7917 void RecordWriteStub::Generate(MacroAssembler* masm) { | |
| 7918 __ add(offset_, object_, Operand(offset_)); | |
| 7919 __ RecordWriteHelper(object_, offset_, scratch_); | |
| 7920 __ Ret(); | |
| 7921 } | |
| 7922 | |
| 7923 | |
| 7924 // On entry lhs_ and rhs_ are the values to be compared. | |
| 7925 // On exit r0 is 0, positive or negative to indicate the result of | |
| 7926 // the comparison. | |
| 7927 void CompareStub::Generate(MacroAssembler* masm) { | |
| 7928 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | |
| 7929 (lhs_.is(r1) && rhs_.is(r0))); | |
| 7930 | |
| 7931 Label slow; // Call builtin. | |
| 7932 Label not_smis, both_loaded_as_doubles, lhs_not_nan; | |
| 7933 | |
| 7934 // NOTICE! This code is only reached after a smi-fast-case check, so | |
| 7935 // it is certain that at least one operand isn't a smi. | |
| 7936 | |
| 7937 // Handle the case where the objects are identical. Either returns the answer | |
| 7938 // or goes to slow. Only falls through if the objects were not identical. | |
| 7939 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | |
| 7940 | |
| 7941 // If either is a Smi (we know that not both are), then they can only | |
| 7942 // be strictly equal if the other is a HeapNumber. | |
| 7943 STATIC_ASSERT(kSmiTag == 0); | |
| 7944 ASSERT_EQ(0, Smi::FromInt(0)); | |
| 7945 __ and_(r2, lhs_, Operand(rhs_)); | |
| 7946 __ tst(r2, Operand(kSmiTagMask)); | |
| 7947 __ b(ne, ¬_smis); | |
| 7948 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | |
| 7949 // 1) Return the answer. | |
| 7950 // 2) Go to slow. | |
| 7951 // 3) Fall through to both_loaded_as_doubles. | |
| 7952 // 4) Jump to lhs_not_nan. | |
| 7953 // In cases 3 and 4 we have found out we were dealing with a number-number | |
| 7954 // comparison. If VFP3 is supported the double values of the numbers have | |
| 7955 // been loaded into d7 and d6. Otherwise, the double values have been loaded | |
| 7956 // into r0, r1, r2, and r3. | |
| 7957 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | |
| 7958 | |
| 7959 __ bind(&both_loaded_as_doubles); | |
| 7960 // The arguments have been converted to doubles and stored in d6 and d7, if | |
| 7961 // VFP3 is supported, or in r0, r1, r2, and r3. | |
| 7962 if (CpuFeatures::IsSupported(VFP3)) { | |
| 7963 __ bind(&lhs_not_nan); | |
| 7964 CpuFeatures::Scope scope(VFP3); | |
| 7965 Label no_nan; | |
| 7966 // ARMv7 VFP3 instructions to implement double precision comparison. | |
| 7967 __ vcmp(d7, d6); | |
| 7968 __ vmrs(pc); // Move vector status bits to normal status bits. | |
| 7969 Label nan; | |
| 7970 __ b(vs, &nan); | |
| 7971 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | |
| 7972 __ mov(r0, Operand(LESS), LeaveCC, lt); | |
| 7973 __ mov(r0, Operand(GREATER), LeaveCC, gt); | |
| 7974 __ Ret(); | |
| 7975 | |
| 7976 __ bind(&nan); | |
| 7977 // If one of the sides was a NaN then the v flag is set. Load r0 with | |
| 7978 // whatever it takes to make the comparison fail, since comparisons with NaN | |
| 7979 // always fail. | |
| 7980 if (cc_ == lt || cc_ == le) { | |
| 7981 __ mov(r0, Operand(GREATER)); | |
| 7982 } else { | |
| 7983 __ mov(r0, Operand(LESS)); | |
| 7984 } | |
| 7985 __ Ret(); | |
| 7986 } else { | |
| 7987 // Checks for NaN in the doubles we have loaded. Can return the answer or | |
| 7988 // fall through if neither is a NaN. Also binds lhs_not_nan. | |
| 7989 EmitNanCheck(masm, &lhs_not_nan, cc_); | |
| 7990 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | |
| 7991 // answer. Never falls through. | |
| 7992 EmitTwoNonNanDoubleComparison(masm, cc_); | |
| 7993 } | |
| 7994 | |
| 7995 __ bind(¬_smis); | |
| 7996 // At this point we know we are dealing with two different objects, | |
| 7997 // and neither of them is a Smi. The objects are in rhs_ and lhs_. | |
| 7998 if (strict_) { | |
| 7999 // This returns non-equal for some object types, or falls through if it | |
| 8000 // was not lucky. | |
| 8001 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); | |
| 8002 } | |
| 8003 | |
| 8004 Label check_for_symbols; | |
| 8005 Label flat_string_check; | |
| 8006 // Check for heap-number-heap-number comparison. Can jump to slow case, | |
| 8007 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles | |
| 8008 // that case. If the inputs are not doubles then jumps to check_for_symbols. | |
| 8009 // In this case r2 will contain the type of rhs_. Never falls through. | |
| 8010 EmitCheckForTwoHeapNumbers(masm, | |
| 8011 lhs_, | |
| 8012 rhs_, | |
| 8013 &both_loaded_as_doubles, | |
| 8014 &check_for_symbols, | |
| 8015 &flat_string_check); | |
| 8016 | |
| 8017 __ bind(&check_for_symbols); | |
| 8018 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of | |
| 8019 // symbols. | |
| 8020 if (cc_ == eq && !strict_) { | |
| 8021 // Returns an answer for two symbols or two detectable objects. | |
| 8022 // Otherwise jumps to string case or not both strings case. | |
| 8023 // Assumes that r2 is the type of rhs_ on entry. | |
| 8024 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); | |
| 8025 } | |
| 8026 | |
| 8027 // Check for both being sequential ASCII strings, and inline if that is the | |
| 8028 // case. | |
| 8029 __ bind(&flat_string_check); | |
| 8030 | |
| 8031 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); | |
| 8032 | |
| 8033 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | |
| 8034 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | |
| 8035 lhs_, | |
| 8036 rhs_, | |
| 8037 r2, | |
| 8038 r3, | |
| 8039 r4, | |
| 8040 r5); | |
| 8041 // Never falls through to here. | |
| 8042 | |
| 8043 __ bind(&slow); | |
| 8044 | |
| 8045 __ Push(lhs_, rhs_); | |
| 8046 // Figure out which native to call and setup the arguments. | |
| 8047 Builtins::JavaScript native; | |
| 8048 if (cc_ == eq) { | |
| 8049 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | |
| 8050 } else { | |
| 8051 native = Builtins::COMPARE; | |
| 8052 int ncr; // NaN compare result | |
| 8053 if (cc_ == lt || cc_ == le) { | |
| 8054 ncr = GREATER; | |
| 8055 } else { | |
| 8056 ASSERT(cc_ == gt || cc_ == ge); // remaining cases | |
| 8057 ncr = LESS; | |
| 8058 } | |
| 8059 __ mov(r0, Operand(Smi::FromInt(ncr))); | |
| 8060 __ push(r0); | |
| 8061 } | |
| 8062 | |
| 8063 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | |
| 8064 // tagged as a small integer. | |
| 8065 __ InvokeBuiltin(native, JUMP_JS); | |
| 8066 } | |
| 8067 | |
| 8068 | |
| 8069 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | |
| 8070 // The stub returns zero for false, and a non-zero value for true. | |
| 8071 void ToBooleanStub::Generate(MacroAssembler* masm) { | |
| 8072 Label false_result; | |
| 8073 Label not_heap_number; | |
| 8074 Register scratch0 = VirtualFrame::scratch0(); | |
| 8075 | |
| 8076 // HeapNumber => false iff +0, -0, or NaN. | |
| 8077 __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | |
| 8078 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
| 8079 __ cmp(scratch0, ip); | |
| 8080 __ b(¬_heap_number, ne); | |
| 8081 | |
| 8082 __ sub(ip, tos_, Operand(kHeapObjectTag)); | |
| 8083 __ vldr(d1, ip, HeapNumber::kValueOffset); | |
| 8084 __ vcmp(d1, 0.0); | |
| 8085 __ vmrs(pc); | |
| 8086 // "tos_" is a register, and contains a non zero value by default. | |
| 8087 // Hence we only need to overwrite "tos_" with zero to return false for | |
| 8088 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | |
| 8089 __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO | |
| 8090 __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN | |
| 8091 __ Ret(); | |
| 8092 | |
| 8093 __ bind(¬_heap_number); | |
| 8094 | |
| 8095 // Check if the value is 'null'. | |
| 8096 // 'null' => false. | |
| 8097 __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 8098 __ cmp(tos_, ip); | |
| 8099 __ b(&false_result, eq); | |
| 8100 | |
| 8101 // It can be an undetectable object. | |
| 8102 // Undetectable => false. | |
| 8103 __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); | |
| 8104 __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset)); | |
| 8105 __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); | |
| 8106 __ cmp(scratch0, Operand(1 << Map::kIsUndetectable)); | |
| 8107 __ b(&false_result, eq); | |
| 8108 | |
| 8109 // JavaScript object => true. | |
| 8110 __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | |
| 8111 __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | |
| 8112 __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 8113 // "tos_" is a register and contains a non-zero value. | |
| 8114 // Hence we implicitly return true if the greater than | |
| 8115 // condition is satisfied. | |
| 8116 __ Ret(gt); | |
| 8117 | |
| 8118 // Check for string | |
| 8119 __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | |
| 8120 __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | |
| 8121 __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE)); | |
| 8122 // "tos_" is a register and contains a non-zero value. | |
| 8123 // Hence we implicitly return true if the greater than | |
| 8124 // condition is satisfied. | |
| 8125 __ Ret(gt); | |
| 8126 | |
| 8127 // String value => false iff empty, i.e., length is zero | |
| 8128 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); | |
| 8129 // If length is zero, "tos_" contains zero ==> false. | |
| 8130 // If length is not zero, "tos_" contains a non-zero value ==> true. | |
| 8131 __ Ret(); | |
| 8132 | |
| 8133 // Return 0 in "tos_" for false . | |
| 8134 __ bind(&false_result); | |
| 8135 __ mov(tos_, Operand(0)); | |
| 8136 __ Ret(); | |
| 8137 } | |
| 8138 | |
| 8139 | |
| 8140 // We fall into this code if the operands were Smis, but the result was | |
| 8141 // not (eg. overflow). We branch into this code (to the not_smi label) if | |
| 8142 // the operands were not both Smi. The operands are in r0 and r1. In order | |
| 8143 // to call the C-implemented binary fp operation routines we need to end up | |
| 8144 // with the double precision floating point operands in r0 and r1 (for the | |
| 8145 // value in r1) and r2 and r3 (for the value in r0). | |
| 8146 void GenericBinaryOpStub::HandleBinaryOpSlowCases( | |
| 8147 MacroAssembler* masm, | |
| 8148 Label* not_smi, | |
| 8149 Register lhs, | |
| 8150 Register rhs, | |
| 8151 const Builtins::JavaScript& builtin) { | |
| 8152 Label slow, slow_reverse, do_the_call; | |
| 8153 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; | |
| 8154 | |
| 8155 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); | |
| 8156 Register heap_number_map = r6; | |
| 8157 | |
| 8158 if (ShouldGenerateSmiCode()) { | |
| 8159 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 8160 | |
| 8161 // Smi-smi case (overflow). | |
| 8162 // Since both are Smis there is no heap number to overwrite, so allocate. | |
| 8163 // The new heap number is in r5. r3 and r7 are scratch. | |
| 8164 __ AllocateHeapNumber( | |
| 8165 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); | |
| 8166 | |
| 8167 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | |
| 8168 // using registers d7 and d6 for the double values. | |
| 8169 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8170 CpuFeatures::Scope scope(VFP3); | |
| 8171 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); | |
| 8172 __ vmov(s15, r7); | |
| 8173 __ vcvt_f64_s32(d7, s15); | |
| 8174 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); | |
| 8175 __ vmov(s13, r7); | |
| 8176 __ vcvt_f64_s32(d6, s13); | |
| 8177 if (!use_fp_registers) { | |
| 8178 __ vmov(r2, r3, d7); | |
| 8179 __ vmov(r0, r1, d6); | |
| 8180 } | |
| 8181 } else { | |
| 8182 // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. | |
| 8183 __ mov(r7, Operand(rhs)); | |
| 8184 ConvertToDoubleStub stub1(r3, r2, r7, r9); | |
| 8185 __ push(lr); | |
| 8186 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | |
| 8187 // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. | |
| 8188 __ mov(r7, Operand(lhs)); | |
| 8189 ConvertToDoubleStub stub2(r1, r0, r7, r9); | |
| 8190 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | |
| 8191 __ pop(lr); | |
| 8192 } | |
| 8193 __ jmp(&do_the_call); // Tail call. No return. | |
| 8194 } | |
| 8195 | |
| 8196 // We branch here if at least one of r0 and r1 is not a Smi. | |
| 8197 __ bind(not_smi); | |
| 8198 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 8199 | |
| 8200 // After this point we have the left hand side in r1 and the right hand side | |
| 8201 // in r0. | |
| 8202 if (lhs.is(r0)) { | |
| 8203 __ Swap(r0, r1, ip); | |
| 8204 } | |
| 8205 | |
| 8206 // The type transition also calculates the answer. | |
| 8207 bool generate_code_to_calculate_answer = true; | |
| 8208 | |
| 8209 if (ShouldGenerateFPCode()) { | |
| 8210 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | |
| 8211 switch (op_) { | |
| 8212 case Token::ADD: | |
| 8213 case Token::SUB: | |
| 8214 case Token::MUL: | |
| 8215 case Token::DIV: | |
| 8216 GenerateTypeTransition(masm); // Tail call. | |
| 8217 generate_code_to_calculate_answer = false; | |
| 8218 break; | |
| 8219 | |
| 8220 default: | |
| 8221 break; | |
| 8222 } | |
| 8223 } | |
| 8224 | |
| 8225 if (generate_code_to_calculate_answer) { | |
| 8226 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | |
| 8227 if (mode_ == NO_OVERWRITE) { | |
| 8228 // In the case where there is no chance of an overwritable float we may | |
| 8229 // as well do the allocation immediately while r0 and r1 are untouched. | |
| 8230 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); | |
| 8231 } | |
| 8232 | |
| 8233 // Move r0 to a double in r2-r3. | |
| 8234 __ tst(r0, Operand(kSmiTagMask)); | |
| 8235 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | |
| 8236 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 8237 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 8238 __ cmp(r4, heap_number_map); | |
| 8239 __ b(ne, &slow); | |
| 8240 if (mode_ == OVERWRITE_RIGHT) { | |
| 8241 __ mov(r5, Operand(r0)); // Overwrite this heap number. | |
| 8242 } | |
| 8243 if (use_fp_registers) { | |
| 8244 CpuFeatures::Scope scope(VFP3); | |
| 8245 // Load the double from tagged HeapNumber r0 to d7. | |
| 8246 __ sub(r7, r0, Operand(kHeapObjectTag)); | |
| 8247 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
| 8248 } else { | |
| 8249 // Calling convention says that second double is in r2 and r3. | |
| 8250 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 8251 } | |
| 8252 __ jmp(&finished_loading_r0); | |
| 8253 __ bind(&r0_is_smi); | |
| 8254 if (mode_ == OVERWRITE_RIGHT) { | |
| 8255 // We can't overwrite a Smi so get address of new heap number into r5. | |
| 8256 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
| 8257 } | |
| 8258 | |
| 8259 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8260 CpuFeatures::Scope scope(VFP3); | |
| 8261 // Convert smi in r0 to double in d7. | |
| 8262 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | |
| 8263 __ vmov(s15, r7); | |
| 8264 __ vcvt_f64_s32(d7, s15); | |
| 8265 if (!use_fp_registers) { | |
| 8266 __ vmov(r2, r3, d7); | |
| 8267 } | |
| 8268 } else { | |
| 8269 // Write Smi from r0 to r3 and r2 in double format. | |
| 8270 __ mov(r7, Operand(r0)); | |
| 8271 ConvertToDoubleStub stub3(r3, r2, r7, r4); | |
| 8272 __ push(lr); | |
| 8273 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | |
| 8274 __ pop(lr); | |
| 8275 } | |
| 8276 | |
| 8277 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. | |
| 8278 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. | |
| 8279 Label r1_is_not_smi; | |
| 8280 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { | |
| 8281 __ tst(r1, Operand(kSmiTagMask)); | |
| 8282 __ b(ne, &r1_is_not_smi); | |
| 8283 GenerateTypeTransition(masm); // Tail call. | |
| 8284 } | |
| 8285 | |
| 8286 __ bind(&finished_loading_r0); | |
| 8287 | |
| 8288 // Move r1 to a double in r0-r1. | |
| 8289 __ tst(r1, Operand(kSmiTagMask)); | |
| 8290 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | |
| 8291 __ bind(&r1_is_not_smi); | |
| 8292 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); | |
| 8293 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 8294 __ cmp(r4, heap_number_map); | |
| 8295 __ b(ne, &slow); | |
| 8296 if (mode_ == OVERWRITE_LEFT) { | |
| 8297 __ mov(r5, Operand(r1)); // Overwrite this heap number. | |
| 8298 } | |
| 8299 if (use_fp_registers) { | |
| 8300 CpuFeatures::Scope scope(VFP3); | |
| 8301 // Load the double from tagged HeapNumber r1 to d6. | |
| 8302 __ sub(r7, r1, Operand(kHeapObjectTag)); | |
| 8303 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
| 8304 } else { | |
| 8305 // Calling convention says that first double is in r0 and r1. | |
| 8306 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | |
| 8307 } | |
| 8308 __ jmp(&finished_loading_r1); | |
| 8309 __ bind(&r1_is_smi); | |
| 8310 if (mode_ == OVERWRITE_LEFT) { | |
| 8311 // We can't overwrite a Smi so get address of new heap number into r5. | |
| 8312 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
| 8313 } | |
| 8314 | |
| 8315 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8316 CpuFeatures::Scope scope(VFP3); | |
| 8317 // Convert smi in r1 to double in d6. | |
| 8318 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | |
| 8319 __ vmov(s13, r7); | |
| 8320 __ vcvt_f64_s32(d6, s13); | |
| 8321 if (!use_fp_registers) { | |
| 8322 __ vmov(r0, r1, d6); | |
| 8323 } | |
| 8324 } else { | |
| 8325 // Write Smi from r1 to r1 and r0 in double format. | |
| 8326 __ mov(r7, Operand(r1)); | |
| 8327 ConvertToDoubleStub stub4(r1, r0, r7, r9); | |
| 8328 __ push(lr); | |
| 8329 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | |
| 8330 __ pop(lr); | |
| 8331 } | |
| 8332 | |
| 8333 __ bind(&finished_loading_r1); | |
| 8334 } | |
| 8335 | |
| 8336 if (generate_code_to_calculate_answer || do_the_call.is_linked()) { | |
| 8337 __ bind(&do_the_call); | |
| 8338 // If we are inlining the operation using VFP3 instructions for | |
| 8339 // add, subtract, multiply, or divide, the arguments are in d6 and d7. | |
| 8340 if (use_fp_registers) { | |
| 8341 CpuFeatures::Scope scope(VFP3); | |
| 8342 // ARMv7 VFP3 instructions to implement | |
| 8343 // double precision, add, subtract, multiply, divide. | |
| 8344 | |
| 8345 if (Token::MUL == op_) { | |
| 8346 __ vmul(d5, d6, d7); | |
| 8347 } else if (Token::DIV == op_) { | |
| 8348 __ vdiv(d5, d6, d7); | |
| 8349 } else if (Token::ADD == op_) { | |
| 8350 __ vadd(d5, d6, d7); | |
| 8351 } else if (Token::SUB == op_) { | |
| 8352 __ vsub(d5, d6, d7); | |
| 8353 } else { | |
| 8354 UNREACHABLE(); | |
| 8355 } | |
| 8356 __ sub(r0, r5, Operand(kHeapObjectTag)); | |
| 8357 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
| 8358 __ add(r0, r0, Operand(kHeapObjectTag)); | |
| 8359 __ mov(pc, lr); | |
| 8360 } else { | |
| 8361 // If we did not inline the operation, then the arguments are in: | |
| 8362 // r0: Left value (least significant part of mantissa). | |
| 8363 // r1: Left value (sign, exponent, top of mantissa). | |
| 8364 // r2: Right value (least significant part of mantissa). | |
| 8365 // r3: Right value (sign, exponent, top of mantissa). | |
| 8366 // r5: Address of heap number for result. | |
| 8367 | |
| 8368 __ push(lr); // For later. | |
| 8369 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. | |
| 8370 // Call C routine that may not cause GC or other trouble. r5 is callee | |
| 8371 // save. | |
| 8372 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | |
| 8373 // Store answer in the overwritable heap number. | |
| 8374 #if !defined(USE_ARM_EABI) | |
| 8375 // Double returned in fp coprocessor register 0 and 1, encoded as | |
| 8376 // register cr8. Offsets must be divisible by 4 for coprocessor so we | |
| 8377 // need to substract the tag from r5. | |
| 8378 __ sub(r4, r5, Operand(kHeapObjectTag)); | |
| 8379 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); | |
| 8380 #else | |
| 8381 // Double returned in registers 0 and 1. | |
| 8382 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); | |
| 8383 #endif | |
| 8384 __ mov(r0, Operand(r5)); | |
| 8385 // And we are done. | |
| 8386 __ pop(pc); | |
| 8387 } | |
| 8388 } | |
| 8389 } | |
| 8390 | |
| 8391 if (!generate_code_to_calculate_answer && | |
| 8392 !slow_reverse.is_linked() && | |
| 8393 !slow.is_linked()) { | |
| 8394 return; | |
| 8395 } | |
| 8396 | |
| 8397 if (lhs.is(r0)) { | |
| 8398 __ b(&slow); | |
| 8399 __ bind(&slow_reverse); | |
| 8400 __ Swap(r0, r1, ip); | |
| 8401 } | |
| 8402 | |
| 8403 heap_number_map = no_reg; // Don't use this any more from here on. | |
| 8404 | |
| 8405 // We jump to here if something goes wrong (one param is not a number of any | |
| 8406 // sort or new-space allocation fails). | |
| 8407 __ bind(&slow); | |
| 8408 | |
| 8409 // Push arguments to the stack | |
| 8410 __ Push(r1, r0); | |
| 8411 | |
| 8412 if (Token::ADD == op_) { | |
| 8413 // Test for string arguments before calling runtime. | |
| 8414 // r1 : first argument | |
| 8415 // r0 : second argument | |
| 8416 // sp[0] : second argument | |
| 8417 // sp[4] : first argument | |
| 8418 | |
| 8419 Label not_strings, not_string1, string1, string1_smi2; | |
| 8420 __ tst(r1, Operand(kSmiTagMask)); | |
| 8421 __ b(eq, ¬_string1); | |
| 8422 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); | |
| 8423 __ b(ge, ¬_string1); | |
| 8424 | |
| 8425 // First argument is a a string, test second. | |
| 8426 __ tst(r0, Operand(kSmiTagMask)); | |
| 8427 __ b(eq, &string1_smi2); | |
| 8428 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | |
| 8429 __ b(ge, &string1); | |
| 8430 | |
| 8431 // First and second argument are strings. | |
| 8432 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | |
| 8433 __ TailCallStub(&string_add_stub); | |
| 8434 | |
| 8435 __ bind(&string1_smi2); | |
| 8436 // First argument is a string, second is a smi. Try to lookup the number | |
| 8437 // string for the smi in the number string cache. | |
| 8438 NumberToStringStub::GenerateLookupNumberStringCache( | |
| 8439 masm, r0, r2, r4, r5, r6, true, &string1); | |
| 8440 | |
| 8441 // Replace second argument on stack and tailcall string add stub to make | |
| 8442 // the result. | |
| 8443 __ str(r2, MemOperand(sp, 0)); | |
| 8444 __ TailCallStub(&string_add_stub); | |
| 8445 | |
| 8446 // Only first argument is a string. | |
| 8447 __ bind(&string1); | |
| 8448 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); | |
| 8449 | |
| 8450 // First argument was not a string, test second. | |
| 8451 __ bind(¬_string1); | |
| 8452 __ tst(r0, Operand(kSmiTagMask)); | |
| 8453 __ b(eq, ¬_strings); | |
| 8454 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | |
| 8455 __ b(ge, ¬_strings); | |
| 8456 | |
| 8457 // Only second argument is a string. | |
| 8458 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); | |
| 8459 | |
| 8460 __ bind(¬_strings); | |
| 8461 } | |
| 8462 | |
| 8463 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | |
| 8464 } | |
| 8465 | |
| 8466 | |
| 8467 // Tries to get a signed int32 out of a double precision floating point heap | |
| 8468 // number. Rounds towards 0. Fastest for doubles that are in the ranges | |
| 8469 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | |
| 8470 // almost to the range of signed int32 values that are not Smis. Jumps to the | |
| 8471 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 | |
| 8472 // (excluding the endpoints). | |
| 8473 static void GetInt32(MacroAssembler* masm, | |
| 8474 Register source, | |
| 8475 Register dest, | |
| 8476 Register scratch, | |
| 8477 Register scratch2, | |
| 8478 Label* slow) { | |
| 8479 Label right_exponent, done; | |
| 8480 // Get exponent word. | |
| 8481 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); | |
| 8482 // Get exponent alone in scratch2. | |
| 8483 __ Ubfx(scratch2, | |
| 8484 scratch, | |
| 8485 HeapNumber::kExponentShift, | |
| 8486 HeapNumber::kExponentBits); | |
| 8487 // Load dest with zero. We use this either for the final shift or | |
| 8488 // for the answer. | |
| 8489 __ mov(dest, Operand(0)); | |
| 8490 // Check whether the exponent matches a 32 bit signed int that is not a Smi. | |
| 8491 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is | |
| 8492 // the exponent that we are fastest at and also the highest exponent we can | |
| 8493 // handle here. | |
| 8494 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; | |
| 8495 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we | |
| 8496 // split it up to avoid a constant pool entry. You can't do that in general | |
| 8497 // for cmp because of the overflow flag, but we know the exponent is in the | |
| 8498 // range 0-2047 so there is no overflow. | |
| 8499 int fudge_factor = 0x400; | |
| 8500 __ sub(scratch2, scratch2, Operand(fudge_factor)); | |
| 8501 __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); | |
| 8502 // If we have a match of the int32-but-not-Smi exponent then skip some logic. | |
| 8503 __ b(eq, &right_exponent); | |
| 8504 // If the exponent is higher than that then go to slow case. This catches | |
| 8505 // numbers that don't fit in a signed int32, infinities and NaNs. | |
| 8506 __ b(gt, slow); | |
| 8507 | |
| 8508 // We know the exponent is smaller than 30 (biased). If it is less than | |
| 8509 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
| 8510 // it rounds to zero. | |
| 8511 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; | |
| 8512 __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); | |
| 8513 // Dest already has a Smi zero. | |
| 8514 __ b(lt, &done); | |
| 8515 if (!CpuFeatures::IsSupported(VFP3)) { | |
| 8516 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to | |
| 8517 // get how much to shift down. | |
| 8518 __ rsb(dest, scratch2, Operand(30)); | |
| 8519 } | |
| 8520 __ bind(&right_exponent); | |
| 8521 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8522 CpuFeatures::Scope scope(VFP3); | |
| 8523 // ARMv7 VFP3 instructions implementing double precision to integer | |
| 8524 // conversion using round to zero. | |
| 8525 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | |
| 8526 __ vmov(d7, scratch2, scratch); | |
| 8527 __ vcvt_s32_f64(s15, d7); | |
| 8528 __ vmov(dest, s15); | |
| 8529 } else { | |
| 8530 // Get the top bits of the mantissa. | |
| 8531 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | |
| 8532 // Put back the implicit 1. | |
| 8533 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | |
| 8534 // Shift up the mantissa bits to take up the space the exponent used to | |
| 8535 // take. We just orred in the implicit bit so that took care of one and | |
| 8536 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | |
| 8537 // distance. | |
| 8538 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
| 8539 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | |
| 8540 // Put sign in zero flag. | |
| 8541 __ tst(scratch, Operand(HeapNumber::kSignMask)); | |
| 8542 // Get the second half of the double. For some exponents we don't | |
| 8543 // actually need this because the bits get shifted out again, but | |
| 8544 // it's probably slower to test than just to do it. | |
| 8545 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | |
| 8546 // Shift down 22 bits to get the last 10 bits. | |
| 8547 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | |
| 8548 // Move down according to the exponent. | |
| 8549 __ mov(dest, Operand(scratch, LSR, dest)); | |
| 8550 // Fix sign if sign bit was set. | |
| 8551 __ rsb(dest, dest, Operand(0), LeaveCC, ne); | |
| 8552 } | |
| 8553 __ bind(&done); | |
| 8554 } | |
| 8555 | |
| 8556 // For bitwise ops where the inputs are not both Smis we here try to determine | |
| 8557 // whether both inputs are either Smis or at least heap numbers that can be | |
| 8558 // represented by a 32 bit signed value. We truncate towards zero as required | |
| 8559 // by the ES spec. If this is the case we do the bitwise op and see if the | |
| 8560 // result is a Smi. If so, great, otherwise we try to find a heap number to | |
| 8561 // write the answer into (either by allocating or by overwriting). | |
| 8562 // On entry the operands are in lhs and rhs. On exit the answer is in r0. | |
| 8563 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, | |
| 8564 Register lhs, | |
| 8565 Register rhs) { | |
| 8566 Label slow, result_not_a_smi; | |
| 8567 Label rhs_is_smi, lhs_is_smi; | |
| 8568 Label done_checking_rhs, done_checking_lhs; | |
| 8569 | |
| 8570 Register heap_number_map = r6; | |
| 8571 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 8572 | |
| 8573 __ tst(lhs, Operand(kSmiTagMask)); | |
| 8574 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. | |
| 8575 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); | |
| 8576 __ cmp(r4, heap_number_map); | |
| 8577 __ b(ne, &slow); | |
| 8578 GetInt32(masm, lhs, r3, r5, r4, &slow); | |
| 8579 __ jmp(&done_checking_lhs); | |
| 8580 __ bind(&lhs_is_smi); | |
| 8581 __ mov(r3, Operand(lhs, ASR, 1)); | |
| 8582 __ bind(&done_checking_lhs); | |
| 8583 | |
| 8584 __ tst(rhs, Operand(kSmiTagMask)); | |
| 8585 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. | |
| 8586 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); | |
| 8587 __ cmp(r4, heap_number_map); | |
| 8588 __ b(ne, &slow); | |
| 8589 GetInt32(masm, rhs, r2, r5, r4, &slow); | |
| 8590 __ jmp(&done_checking_rhs); | |
| 8591 __ bind(&rhs_is_smi); | |
| 8592 __ mov(r2, Operand(rhs, ASR, 1)); | |
| 8593 __ bind(&done_checking_rhs); | |
| 8594 | |
| 8595 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); | |
| 8596 | |
| 8597 // r0 and r1: Original operands (Smi or heap numbers). | |
| 8598 // r2 and r3: Signed int32 operands. | |
| 8599 switch (op_) { | |
| 8600 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | |
| 8601 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | |
| 8602 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; | |
| 8603 case Token::SAR: | |
| 8604 // Use only the 5 least significant bits of the shift count. | |
| 8605 __ and_(r2, r2, Operand(0x1f)); | |
| 8606 __ mov(r2, Operand(r3, ASR, r2)); | |
| 8607 break; | |
| 8608 case Token::SHR: | |
| 8609 // Use only the 5 least significant bits of the shift count. | |
| 8610 __ and_(r2, r2, Operand(0x1f)); | |
| 8611 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
| 8612 // SHR is special because it is required to produce a positive answer. | |
| 8613 // The code below for writing into heap numbers isn't capable of writing | |
| 8614 // the register as an unsigned int so we go to slow case if we hit this | |
| 8615 // case. | |
| 8616 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8617 __ b(mi, &result_not_a_smi); | |
| 8618 } else { | |
| 8619 __ b(mi, &slow); | |
| 8620 } | |
| 8621 break; | |
| 8622 case Token::SHL: | |
| 8623 // Use only the 5 least significant bits of the shift count. | |
| 8624 __ and_(r2, r2, Operand(0x1f)); | |
| 8625 __ mov(r2, Operand(r3, LSL, r2)); | |
| 8626 break; | |
| 8627 default: UNREACHABLE(); | |
| 8628 } | |
| 8629 // check that the *signed* result fits in a smi | |
| 8630 __ add(r3, r2, Operand(0x40000000), SetCC); | |
| 8631 __ b(mi, &result_not_a_smi); | |
| 8632 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | |
| 8633 __ Ret(); | |
| 8634 | |
| 8635 Label have_to_allocate, got_a_heap_number; | |
| 8636 __ bind(&result_not_a_smi); | |
| 8637 switch (mode_) { | |
| 8638 case OVERWRITE_RIGHT: { | |
| 8639 __ tst(rhs, Operand(kSmiTagMask)); | |
| 8640 __ b(eq, &have_to_allocate); | |
| 8641 __ mov(r5, Operand(rhs)); | |
| 8642 break; | |
| 8643 } | |
| 8644 case OVERWRITE_LEFT: { | |
| 8645 __ tst(lhs, Operand(kSmiTagMask)); | |
| 8646 __ b(eq, &have_to_allocate); | |
| 8647 __ mov(r5, Operand(lhs)); | |
| 8648 break; | |
| 8649 } | |
| 8650 case NO_OVERWRITE: { | |
| 8651 // Get a new heap number in r5. r4 and r7 are scratch. | |
| 8652 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
| 8653 } | |
| 8654 default: break; | |
| 8655 } | |
| 8656 __ bind(&got_a_heap_number); | |
| 8657 // r2: Answer as signed int32. | |
| 8658 // r5: Heap number to write answer into. | |
| 8659 | |
| 8660 // Nothing can go wrong now, so move the heap number to r0, which is the | |
| 8661 // result. | |
| 8662 __ mov(r0, Operand(r5)); | |
| 8663 | |
| 8664 if (CpuFeatures::IsSupported(VFP3)) { | |
| 8665 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. | |
| 8666 CpuFeatures::Scope scope(VFP3); | |
| 8667 __ vmov(s0, r2); | |
| 8668 if (op_ == Token::SHR) { | |
| 8669 __ vcvt_f64_u32(d0, s0); | |
| 8670 } else { | |
| 8671 __ vcvt_f64_s32(d0, s0); | |
| 8672 } | |
| 8673 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
| 8674 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
| 8675 __ Ret(); | |
| 8676 } else { | |
| 8677 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
| 8678 // r3 as scratch. r0 is preserved and returned. | |
| 8679 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
| 8680 __ TailCallStub(&stub); | |
| 8681 } | |
| 8682 | |
| 8683 if (mode_ != NO_OVERWRITE) { | |
| 8684 __ bind(&have_to_allocate); | |
| 8685 // Get a new heap number in r5. r4 and r7 are scratch. | |
| 8686 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
| 8687 __ jmp(&got_a_heap_number); | |
| 8688 } | |
| 8689 | |
| 8690 // If all else failed then we go to the runtime system. | |
| 8691 __ bind(&slow); | |
| 8692 __ Push(lhs, rhs); // Restore stack. | |
| 8693 switch (op_) { | |
| 8694 case Token::BIT_OR: | |
| 8695 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | |
| 8696 break; | |
| 8697 case Token::BIT_AND: | |
| 8698 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | |
| 8699 break; | |
| 8700 case Token::BIT_XOR: | |
| 8701 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | |
| 8702 break; | |
| 8703 case Token::SAR: | |
| 8704 __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | |
| 8705 break; | |
| 8706 case Token::SHR: | |
| 8707 __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | |
| 8708 break; | |
| 8709 case Token::SHL: | |
| 8710 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | |
| 8711 break; | |
| 8712 default: | |
| 8713 UNREACHABLE(); | |
| 8714 } | |
| 8715 } | |
| 8716 | |
| 8717 | |
| 8718 // Can we multiply by x with max two shifts and an add. | |
| 8719 // This answers yes to all integers from 2 to 10. | |
| 8720 static bool IsEasyToMultiplyBy(int x) { | |
| 8721 if (x < 2) return false; // Avoid special cases. | |
| 8722 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. | |
| 8723 if (IsPowerOf2(x)) return true; // Simple shift. | |
| 8724 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. | |
| 8725 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. | |
| 8726 return false; | |
| 8727 } | |
| 8728 | |
| 8729 | |
| 8730 // Can multiply by anything that IsEasyToMultiplyBy returns true for. | |
| 8731 // Source and destination may be the same register. This routine does | |
| 8732 // not set carry and overflow the way a mul instruction would. | |
| 8733 static void MultiplyByKnownInt(MacroAssembler* masm, | |
| 8734 Register source, | |
| 8735 Register destination, | |
| 8736 int known_int) { | |
| 8737 if (IsPowerOf2(known_int)) { | |
| 8738 __ mov(destination, Operand(source, LSL, BitPosition(known_int))); | |
| 8739 } else if (PopCountLessThanEqual2(known_int)) { | |
| 8740 int first_bit = BitPosition(known_int); | |
| 8741 int second_bit = BitPosition(known_int ^ (1 << first_bit)); | |
| 8742 __ add(destination, source, Operand(source, LSL, second_bit - first_bit)); | |
| 8743 if (first_bit != 0) { | |
| 8744 __ mov(destination, Operand(destination, LSL, first_bit)); | |
| 8745 } | |
| 8746 } else { | |
| 8747 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. | |
| 8748 int the_bit = BitPosition(known_int + 1); | |
| 8749 __ rsb(destination, source, Operand(source, LSL, the_bit)); | |
| 8750 } | |
| 8751 } | |
| 8752 | |
| 8753 | |
| 8754 // This function (as opposed to MultiplyByKnownInt) takes the known int in a | |
| 8755 // a register for the cases where it doesn't know a good trick, and may deliver | |
| 8756 // a result that needs shifting. | |
| 8757 static void MultiplyByKnownInt2( | |
| 8758 MacroAssembler* masm, | |
| 8759 Register result, | |
| 8760 Register source, | |
| 8761 Register known_int_register, // Smi tagged. | |
| 8762 int known_int, | |
| 8763 int* required_shift) { // Including Smi tag shift | |
| 8764 switch (known_int) { | |
| 8765 case 3: | |
| 8766 __ add(result, source, Operand(source, LSL, 1)); | |
| 8767 *required_shift = 1; | |
| 8768 break; | |
| 8769 case 5: | |
| 8770 __ add(result, source, Operand(source, LSL, 2)); | |
| 8771 *required_shift = 1; | |
| 8772 break; | |
| 8773 case 6: | |
| 8774 __ add(result, source, Operand(source, LSL, 1)); | |
| 8775 *required_shift = 2; | |
| 8776 break; | |
| 8777 case 7: | |
| 8778 __ rsb(result, source, Operand(source, LSL, 3)); | |
| 8779 *required_shift = 1; | |
| 8780 break; | |
| 8781 case 9: | |
| 8782 __ add(result, source, Operand(source, LSL, 3)); | |
| 8783 *required_shift = 1; | |
| 8784 break; | |
| 8785 case 10: | |
| 8786 __ add(result, source, Operand(source, LSL, 2)); | |
| 8787 *required_shift = 2; | |
| 8788 break; | |
| 8789 default: | |
| 8790 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. | |
| 8791 __ mul(result, source, known_int_register); | |
| 8792 *required_shift = 0; | |
| 8793 } | |
| 8794 } | |
| 8795 | |
| 8796 | |
| 8797 // This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 | |
| 8798 // trick. See http://en.wikipedia.org/wiki/Divisibility_rule | |
| 8799 // Takes the sum of the digits base (mask + 1) repeatedly until we have a | |
| 8800 // number from 0 to mask. On exit the 'eq' condition flags are set if the | |
| 8801 // answer is exactly the mask. | |
| 8802 void IntegerModStub::DigitSum(MacroAssembler* masm, | |
| 8803 Register lhs, | |
| 8804 int mask, | |
| 8805 int shift, | |
| 8806 Label* entry) { | |
| 8807 ASSERT(mask > 0); | |
| 8808 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. | |
| 8809 Label loop; | |
| 8810 __ bind(&loop); | |
| 8811 __ and_(ip, lhs, Operand(mask)); | |
| 8812 __ add(lhs, ip, Operand(lhs, LSR, shift)); | |
| 8813 __ bind(entry); | |
| 8814 __ cmp(lhs, Operand(mask)); | |
| 8815 __ b(gt, &loop); | |
| 8816 } | |
| 8817 | |
| 8818 | |
| 8819 void IntegerModStub::DigitSum(MacroAssembler* masm, | |
| 8820 Register lhs, | |
| 8821 Register scratch, | |
| 8822 int mask, | |
| 8823 int shift1, | |
| 8824 int shift2, | |
| 8825 Label* entry) { | |
| 8826 ASSERT(mask > 0); | |
| 8827 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. | |
| 8828 Label loop; | |
| 8829 __ bind(&loop); | |
| 8830 __ bic(scratch, lhs, Operand(mask)); | |
| 8831 __ and_(ip, lhs, Operand(mask)); | |
| 8832 __ add(lhs, ip, Operand(lhs, LSR, shift1)); | |
| 8833 __ add(lhs, lhs, Operand(scratch, LSR, shift2)); | |
| 8834 __ bind(entry); | |
| 8835 __ cmp(lhs, Operand(mask)); | |
| 8836 __ b(gt, &loop); | |
| 8837 } | |
| 8838 | |
| 8839 | |
| 8840 // Splits the number into two halves (bottom half has shift bits). The top | |
| 8841 // half is subtracted from the bottom half. If the result is negative then | |
| 8842 // rhs is added. | |
| 8843 void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, | |
| 8844 Register lhs, | |
| 8845 int shift, | |
| 8846 int rhs) { | |
| 8847 int mask = (1 << shift) - 1; | |
| 8848 __ and_(ip, lhs, Operand(mask)); | |
| 8849 __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); | |
| 8850 __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); | |
| 8851 } | |
| 8852 | |
| 8853 | |
| 8854 void IntegerModStub::ModReduce(MacroAssembler* masm, | |
| 8855 Register lhs, | |
| 8856 int max, | |
| 8857 int denominator) { | |
| 8858 int limit = denominator; | |
| 8859 while (limit * 2 <= max) limit *= 2; | |
| 8860 while (limit >= denominator) { | |
| 8861 __ cmp(lhs, Operand(limit)); | |
| 8862 __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); | |
| 8863 limit >>= 1; | |
| 8864 } | |
| 8865 } | |
| 8866 | |
| 8867 | |
| 8868 void IntegerModStub::ModAnswer(MacroAssembler* masm, | |
| 8869 Register result, | |
| 8870 Register shift_distance, | |
| 8871 Register mask_bits, | |
| 8872 Register sum_of_digits) { | |
| 8873 __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); | |
| 8874 __ Ret(); | |
| 8875 } | |
| 8876 | |
| 8877 | |
| 8878 // See comment for class. | |
| 8879 void IntegerModStub::Generate(MacroAssembler* masm) { | |
| 8880 __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); | |
| 8881 __ bic(odd_number_, odd_number_, Operand(1)); | |
| 8882 __ mov(odd_number_, Operand(odd_number_, LSL, 1)); | |
| 8883 // We now have (odd_number_ - 1) * 2 in the register. | |
| 8884 // Build a switch out of branches instead of data because it avoids | |
| 8885 // having to teach the assembler about intra-code-object pointers | |
| 8886 // that are not in relative branch instructions. | |
| 8887 Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; | |
| 8888 Label mod21, mod23, mod25; | |
| 8889 { Assembler::BlockConstPoolScope block_const_pool(masm); | |
| 8890 __ add(pc, pc, Operand(odd_number_)); | |
| 8891 // When you read pc it is always 8 ahead, but when you write it you always | |
| 8892 // write the actual value. So we put in two nops to take up the slack. | |
| 8893 __ nop(); | |
| 8894 __ nop(); | |
| 8895 __ b(&mod3); | |
| 8896 __ b(&mod5); | |
| 8897 __ b(&mod7); | |
| 8898 __ b(&mod9); | |
| 8899 __ b(&mod11); | |
| 8900 __ b(&mod13); | |
| 8901 __ b(&mod15); | |
| 8902 __ b(&mod17); | |
| 8903 __ b(&mod19); | |
| 8904 __ b(&mod21); | |
| 8905 __ b(&mod23); | |
| 8906 __ b(&mod25); | |
| 8907 } | |
| 8908 | |
| 8909 // For each denominator we find a multiple that is almost only ones | |
| 8910 // when expressed in binary. Then we do the sum-of-digits trick for | |
| 8911 // that number. If the multiple is not 1 then we have to do a little | |
| 8912 // more work afterwards to get the answer into the 0-denominator-1 | |
| 8913 // range. | |
| 8914 DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. | |
| 8915 __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); | |
| 8916 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8917 | |
| 8918 DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. | |
| 8919 ModGetInRangeBySubtraction(masm, lhs_, 2, 5); | |
| 8920 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8921 | |
| 8922 DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. | |
| 8923 __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); | |
| 8924 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8925 | |
| 8926 DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. | |
| 8927 ModGetInRangeBySubtraction(masm, lhs_, 3, 9); | |
| 8928 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8929 | |
| 8930 DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. | |
| 8931 ModReduce(masm, lhs_, 0x3f, 11); | |
| 8932 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8933 | |
| 8934 DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. | |
| 8935 ModReduce(masm, lhs_, 0xff, 13); | |
| 8936 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8937 | |
| 8938 DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. | |
| 8939 __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); | |
| 8940 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8941 | |
| 8942 DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. | |
| 8943 ModGetInRangeBySubtraction(masm, lhs_, 4, 17); | |
| 8944 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8945 | |
| 8946 DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. | |
| 8947 ModReduce(masm, lhs_, 0xff, 19); | |
| 8948 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8949 | |
| 8950 DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. | |
| 8951 ModReduce(masm, lhs_, 0x3f, 21); | |
| 8952 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8953 | |
| 8954 DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. | |
| 8955 ModReduce(masm, lhs_, 0xff, 23); | |
| 8956 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8957 | |
| 8958 DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. | |
| 8959 ModReduce(masm, lhs_, 0x7f, 25); | |
| 8960 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | |
| 8961 } | |
| 8962 | |
| 8963 | |
| 8964 const char* GenericBinaryOpStub::GetName() { | 7076 const char* GenericBinaryOpStub::GetName() { |
| 8965 if (name_ != NULL) return name_; | 7077 if (name_ != NULL) return name_; |
| 8966 const int len = 100; | 7078 const int len = 100; |
| 8967 name_ = Bootstrapper::AllocateAutoDeletedArray(len); | 7079 name_ = Bootstrapper::AllocateAutoDeletedArray(len); |
| 8968 if (name_ == NULL) return "OOM"; | 7080 if (name_ == NULL) return "OOM"; |
| 8969 const char* op_name = Token::Name(op_); | 7081 const char* op_name = Token::Name(op_); |
| 8970 const char* overwrite_name; | 7082 const char* overwrite_name; |
| 8971 switch (mode_) { | 7083 switch (mode_) { |
| 8972 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | 7084 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 8973 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | 7085 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
| 8974 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | 7086 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| 8975 default: overwrite_name = "UnknownOverwrite"; break; | 7087 default: overwrite_name = "UnknownOverwrite"; break; |
| 8976 } | 7088 } |
| 8977 | 7089 |
| 8978 OS::SNPrintF(Vector<char>(name_, len), | 7090 OS::SNPrintF(Vector<char>(name_, len), |
| 8979 "GenericBinaryOpStub_%s_%s%s_%s", | 7091 "GenericBinaryOpStub_%s_%s%s_%s", |
| 8980 op_name, | 7092 op_name, |
| 8981 overwrite_name, | 7093 overwrite_name, |
| 8982 specialized_on_rhs_ ? "_ConstantRhs" : "", | 7094 specialized_on_rhs_ ? "_ConstantRhs" : "", |
| 8983 BinaryOpIC::GetName(runtime_operands_type_)); | 7095 BinaryOpIC::GetName(runtime_operands_type_)); |
| 8984 return name_; | 7096 return name_; |
| 8985 } | 7097 } |
| 8986 | 7098 |
| 8987 | 7099 |
| 8988 | |
| 8989 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | |
| 8990 // lhs_ : x | |
| 8991 // rhs_ : y | |
| 8992 // r0 : result | |
| 8993 | |
| 8994 Register result = r0; | |
| 8995 Register lhs = lhs_; | |
| 8996 Register rhs = rhs_; | |
| 8997 | |
| 8998 // This code can't cope with other register allocations yet. | |
| 8999 ASSERT(result.is(r0) && | |
| 9000 ((lhs.is(r0) && rhs.is(r1)) || | |
| 9001 (lhs.is(r1) && rhs.is(r0)))); | |
| 9002 | |
| 9003 Register smi_test_reg = VirtualFrame::scratch0(); | |
| 9004 Register scratch = VirtualFrame::scratch1(); | |
| 9005 | |
| 9006 // All ops need to know whether we are dealing with two Smis. Set up | |
| 9007 // smi_test_reg to tell us that. | |
| 9008 if (ShouldGenerateSmiCode()) { | |
| 9009 __ orr(smi_test_reg, lhs, Operand(rhs)); | |
| 9010 } | |
| 9011 | |
| 9012 switch (op_) { | |
| 9013 case Token::ADD: { | |
| 9014 Label not_smi; | |
| 9015 // Fast path. | |
| 9016 if (ShouldGenerateSmiCode()) { | |
| 9017 STATIC_ASSERT(kSmiTag == 0); // Adjust code below. | |
| 9018 __ tst(smi_test_reg, Operand(kSmiTagMask)); | |
| 9019 __ b(ne, ¬_smi); | |
| 9020 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. | |
| 9021 // Return if no overflow. | |
| 9022 __ Ret(vc); | |
| 9023 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. | |
| 9024 } | |
| 9025 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); | |
| 9026 break; | |
| 9027 } | |
| 9028 | |
| 9029 case Token::SUB: { | |
| 9030 Label not_smi; | |
| 9031 // Fast path. | |
| 9032 if (ShouldGenerateSmiCode()) { | |
| 9033 STATIC_ASSERT(kSmiTag == 0); // Adjust code below. | |
| 9034 __ tst(smi_test_reg, Operand(kSmiTagMask)); | |
| 9035 __ b(ne, ¬_smi); | |
| 9036 if (lhs.is(r1)) { | |
| 9037 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. | |
| 9038 // Return if no overflow. | |
| 9039 __ Ret(vc); | |
| 9040 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. | |
| 9041 } else { | |
| 9042 __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. | |
| 9043 // Return if no overflow. | |
| 9044 __ Ret(vc); | |
| 9045 __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. | |
| 9046 } | |
| 9047 } | |
| 9048 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); | |
| 9049 break; | |
| 9050 } | |
| 9051 | |
| 9052 case Token::MUL: { | |
| 9053 Label not_smi, slow; | |
| 9054 if (ShouldGenerateSmiCode()) { | |
| 9055 STATIC_ASSERT(kSmiTag == 0); // adjust code below | |
| 9056 __ tst(smi_test_reg, Operand(kSmiTagMask)); | |
| 9057 Register scratch2 = smi_test_reg; | |
| 9058 smi_test_reg = no_reg; | |
| 9059 __ b(ne, ¬_smi); | |
| 9060 // Remove tag from one operand (but keep sign), so that result is Smi. | |
| 9061 __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); | |
| 9062 // Do multiplication | |
| 9063 // scratch = lower 32 bits of ip * lhs. | |
| 9064 __ smull(scratch, scratch2, lhs, ip); | |
| 9065 // Go slow on overflows (overflow bit is not set). | |
| 9066 __ mov(ip, Operand(scratch, ASR, 31)); | |
| 9067 // No overflow if higher 33 bits are identical. | |
| 9068 __ cmp(ip, Operand(scratch2)); | |
| 9069 __ b(ne, &slow); | |
| 9070 // Go slow on zero result to handle -0. | |
| 9071 __ tst(scratch, Operand(scratch)); | |
| 9072 __ mov(result, Operand(scratch), LeaveCC, ne); | |
| 9073 __ Ret(ne); | |
| 9074 // We need -0 if we were multiplying a negative number with 0 to get 0. | |
| 9075 // We know one of them was zero. | |
| 9076 __ add(scratch2, rhs, Operand(lhs), SetCC); | |
| 9077 __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); | |
| 9078 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. | |
| 9079 // Slow case. We fall through here if we multiplied a negative number | |
| 9080 // with 0, because that would mean we should produce -0. | |
| 9081 __ bind(&slow); | |
| 9082 } | |
| 9083 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); | |
| 9084 break; | |
| 9085 } | |
| 9086 | |
| 9087 case Token::DIV: | |
| 9088 case Token::MOD: { | |
| 9089 Label not_smi; | |
| 9090 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { | |
| 9091 Label lhs_is_unsuitable; | |
| 9092 __ BranchOnNotSmi(lhs, ¬_smi); | |
| 9093 if (IsPowerOf2(constant_rhs_)) { | |
| 9094 if (op_ == Token::MOD) { | |
| 9095 __ and_(rhs, | |
| 9096 lhs, | |
| 9097 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | |
| 9098 SetCC); | |
| 9099 // We now have the answer, but if the input was negative we also | |
| 9100 // have the sign bit. Our work is done if the result is | |
| 9101 // positive or zero: | |
| 9102 if (!rhs.is(r0)) { | |
| 9103 __ mov(r0, rhs, LeaveCC, pl); | |
| 9104 } | |
| 9105 __ Ret(pl); | |
| 9106 // A mod of a negative left hand side must return a negative number. | |
| 9107 // Unfortunately if the answer is 0 then we must return -0. And we | |
| 9108 // already optimistically trashed rhs so we may need to restore it. | |
| 9109 __ eor(rhs, rhs, Operand(0x80000000u), SetCC); | |
| 9110 // Next two instructions are conditional on the answer being -0. | |
| 9111 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); | |
| 9112 __ b(eq, &lhs_is_unsuitable); | |
| 9113 // We need to subtract the dividend. Eg. -3 % 4 == -3. | |
| 9114 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); | |
| 9115 } else { | |
| 9116 ASSERT(op_ == Token::DIV); | |
| 9117 __ tst(lhs, | |
| 9118 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); | |
| 9119 __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. | |
| 9120 int shift = 0; | |
| 9121 int d = constant_rhs_; | |
| 9122 while ((d & 1) == 0) { | |
| 9123 d >>= 1; | |
| 9124 shift++; | |
| 9125 } | |
| 9126 __ mov(r0, Operand(lhs, LSR, shift)); | |
| 9127 __ bic(r0, r0, Operand(kSmiTagMask)); | |
| 9128 } | |
| 9129 } else { | |
| 9130 // Not a power of 2. | |
| 9131 __ tst(lhs, Operand(0x80000000u)); | |
| 9132 __ b(ne, &lhs_is_unsuitable); | |
| 9133 // Find a fixed point reciprocal of the divisor so we can divide by | |
| 9134 // multiplying. | |
| 9135 double divisor = 1.0 / constant_rhs_; | |
| 9136 int shift = 32; | |
| 9137 double scale = 4294967296.0; // 1 << 32. | |
| 9138 uint32_t mul; | |
| 9139 // Maximise the precision of the fixed point reciprocal. | |
| 9140 while (true) { | |
| 9141 mul = static_cast<uint32_t>(scale * divisor); | |
| 9142 if (mul >= 0x7fffffff) break; | |
| 9143 scale *= 2.0; | |
| 9144 shift++; | |
| 9145 } | |
| 9146 mul++; | |
| 9147 Register scratch2 = smi_test_reg; | |
| 9148 smi_test_reg = no_reg; | |
| 9149 __ mov(scratch2, Operand(mul)); | |
| 9150 __ umull(scratch, scratch2, scratch2, lhs); | |
| 9151 __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); | |
| 9152 // scratch2 is lhs / rhs. scratch2 is not Smi tagged. | |
| 9153 // rhs is still the known rhs. rhs is Smi tagged. | |
| 9154 // lhs is still the unkown lhs. lhs is Smi tagged. | |
| 9155 int required_scratch_shift = 0; // Including the Smi tag shift of 1. | |
| 9156 // scratch = scratch2 * rhs. | |
| 9157 MultiplyByKnownInt2(masm, | |
| 9158 scratch, | |
| 9159 scratch2, | |
| 9160 rhs, | |
| 9161 constant_rhs_, | |
| 9162 &required_scratch_shift); | |
| 9163 // scratch << required_scratch_shift is now the Smi tagged rhs * | |
| 9164 // (lhs / rhs) where / indicates integer division. | |
| 9165 if (op_ == Token::DIV) { | |
| 9166 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); | |
| 9167 __ b(ne, &lhs_is_unsuitable); // There was a remainder. | |
| 9168 __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); | |
| 9169 } else { | |
| 9170 ASSERT(op_ == Token::MOD); | |
| 9171 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); | |
| 9172 } | |
| 9173 } | |
| 9174 __ Ret(); | |
| 9175 __ bind(&lhs_is_unsuitable); | |
| 9176 } else if (op_ == Token::MOD && | |
| 9177 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && | |
| 9178 runtime_operands_type_ != BinaryOpIC::STRINGS) { | |
| 9179 // Do generate a bit of smi code for modulus even though the default for | |
| 9180 // modulus is not to do it, but as the ARM processor has no coprocessor | |
| 9181 // support for modulus checking for smis makes sense. We can handle | |
| 9182 // 1 to 25 times any power of 2. This covers over half the numbers from | |
| 9183 // 1 to 100 including all of the first 25. (Actually the constants < 10 | |
| 9184 // are handled above by reciprocal multiplication. We only get here for | |
| 9185 // those cases if the right hand side is not a constant or for cases | |
| 9186 // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod | |
| 9187 // stub.) | |
| 9188 Label slow; | |
| 9189 Label not_power_of_2; | |
| 9190 ASSERT(!ShouldGenerateSmiCode()); | |
| 9191 STATIC_ASSERT(kSmiTag == 0); // Adjust code below. | |
| 9192 // Check for two positive smis. | |
| 9193 __ orr(smi_test_reg, lhs, Operand(rhs)); | |
| 9194 __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); | |
| 9195 __ b(ne, &slow); | |
| 9196 // Check that rhs is a power of two and not zero. | |
| 9197 Register mask_bits = r3; | |
| 9198 __ sub(scratch, rhs, Operand(1), SetCC); | |
| 9199 __ b(mi, &slow); | |
| 9200 __ and_(mask_bits, rhs, Operand(scratch), SetCC); | |
| 9201 __ b(ne, ¬_power_of_2); | |
| 9202 // Calculate power of two modulus. | |
| 9203 __ and_(result, lhs, Operand(scratch)); | |
| 9204 __ Ret(); | |
| 9205 | |
| 9206 __ bind(¬_power_of_2); | |
| 9207 __ eor(scratch, scratch, Operand(mask_bits)); | |
| 9208 // At least two bits are set in the modulus. The high one(s) are in | |
| 9209 // mask_bits and the low one is scratch + 1. | |
| 9210 __ and_(mask_bits, scratch, Operand(lhs)); | |
| 9211 Register shift_distance = scratch; | |
| 9212 scratch = no_reg; | |
| 9213 | |
| 9214 // The rhs consists of a power of 2 multiplied by some odd number. | |
| 9215 // The power-of-2 part we handle by putting the corresponding bits | |
| 9216 // from the lhs in the mask_bits register, and the power in the | |
| 9217 // shift_distance register. Shift distance is never 0 due to Smi | |
| 9218 // tagging. | |
| 9219 __ CountLeadingZeros(r4, shift_distance, shift_distance); | |
| 9220 __ rsb(shift_distance, r4, Operand(32)); | |
| 9221 | |
| 9222 // Now we need to find out what the odd number is. The last bit is | |
| 9223 // always 1. | |
| 9224 Register odd_number = r4; | |
| 9225 __ mov(odd_number, Operand(rhs, LSR, shift_distance)); | |
| 9226 __ cmp(odd_number, Operand(25)); | |
| 9227 __ b(gt, &slow); | |
| 9228 | |
| 9229 IntegerModStub stub( | |
| 9230 result, shift_distance, odd_number, mask_bits, lhs, r5); | |
| 9231 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. | |
| 9232 | |
| 9233 __ bind(&slow); | |
| 9234 } | |
| 9235 HandleBinaryOpSlowCases( | |
| 9236 masm, | |
| 9237 ¬_smi, | |
| 9238 lhs, | |
| 9239 rhs, | |
| 9240 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); | |
| 9241 break; | |
| 9242 } | |
| 9243 | |
| 9244 case Token::BIT_OR: | |
| 9245 case Token::BIT_AND: | |
| 9246 case Token::BIT_XOR: | |
| 9247 case Token::SAR: | |
| 9248 case Token::SHR: | |
| 9249 case Token::SHL: { | |
| 9250 Label slow; | |
| 9251 STATIC_ASSERT(kSmiTag == 0); // adjust code below | |
| 9252 __ tst(smi_test_reg, Operand(kSmiTagMask)); | |
| 9253 __ b(ne, &slow); | |
| 9254 Register scratch2 = smi_test_reg; | |
| 9255 smi_test_reg = no_reg; | |
| 9256 switch (op_) { | |
| 9257 case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; | |
| 9258 case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; | |
| 9259 case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; | |
| 9260 case Token::SAR: | |
| 9261 // Remove tags from right operand. | |
| 9262 __ GetLeastBitsFromSmi(scratch2, rhs, 5); | |
| 9263 __ mov(result, Operand(lhs, ASR, scratch2)); | |
| 9264 // Smi tag result. | |
| 9265 __ bic(result, result, Operand(kSmiTagMask)); | |
| 9266 break; | |
| 9267 case Token::SHR: | |
| 9268 // Remove tags from operands. We can't do this on a 31 bit number | |
| 9269 // because then the 0s get shifted into bit 30 instead of bit 31. | |
| 9270 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x | |
| 9271 __ GetLeastBitsFromSmi(scratch2, rhs, 5); | |
| 9272 __ mov(scratch, Operand(scratch, LSR, scratch2)); | |
| 9273 // Unsigned shift is not allowed to produce a negative number, so | |
| 9274 // check the sign bit and the sign bit after Smi tagging. | |
| 9275 __ tst(scratch, Operand(0xc0000000)); | |
| 9276 __ b(ne, &slow); | |
| 9277 // Smi tag result. | |
| 9278 __ mov(result, Operand(scratch, LSL, kSmiTagSize)); | |
| 9279 break; | |
| 9280 case Token::SHL: | |
| 9281 // Remove tags from operands. | |
| 9282 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x | |
| 9283 __ GetLeastBitsFromSmi(scratch2, rhs, 5); | |
| 9284 __ mov(scratch, Operand(scratch, LSL, scratch2)); | |
| 9285 // Check that the signed result fits in a Smi. | |
| 9286 __ add(scratch2, scratch, Operand(0x40000000), SetCC); | |
| 9287 __ b(mi, &slow); | |
| 9288 __ mov(result, Operand(scratch, LSL, kSmiTagSize)); | |
| 9289 break; | |
| 9290 default: UNREACHABLE(); | |
| 9291 } | |
| 9292 __ Ret(); | |
| 9293 __ bind(&slow); | |
| 9294 HandleNonSmiBitwiseOp(masm, lhs, rhs); | |
| 9295 break; | |
| 9296 } | |
| 9297 | |
| 9298 default: UNREACHABLE(); | |
| 9299 } | |
| 9300 // This code should be unreachable. | |
| 9301 __ stop("Unreachable"); | |
| 9302 | |
| 9303 // Generate an unreachable reference to the DEFAULT stub so that it can be | |
| 9304 // found at the end of this stub when clearing ICs at GC. | |
| 9305 // TODO(kaznacheev): Check performance impact and get rid of this. | |
| 9306 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { | |
| 9307 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); | |
| 9308 __ CallStub(&uninit); | |
| 9309 } | |
| 9310 } | |
| 9311 | |
| 9312 | |
| 9313 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | |
| 9314 Label get_result; | |
| 9315 | |
| 9316 __ Push(r1, r0); | |
| 9317 | |
| 9318 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | |
| 9319 __ mov(r1, Operand(Smi::FromInt(op_))); | |
| 9320 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); | |
| 9321 __ Push(r2, r1, r0); | |
| 9322 | |
| 9323 __ TailCallExternalReference( | |
| 9324 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), | |
| 9325 5, | |
| 9326 1); | |
| 9327 } | |
| 9328 | |
| 9329 | |
| 9330 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | |
| 9331 GenericBinaryOpStub stub(key, type_info); | |
| 9332 return stub.GetCode(); | |
| 9333 } | |
| 9334 | |
| 9335 | |
| 9336 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | |
| 9337 // Argument is a number and is on stack and in r0. | |
| 9338 Label runtime_call; | |
| 9339 Label input_not_smi; | |
| 9340 Label loaded; | |
| 9341 | |
| 9342 if (CpuFeatures::IsSupported(VFP3)) { | |
| 9343 // Load argument and check if it is a smi. | |
| 9344 __ BranchOnNotSmi(r0, &input_not_smi); | |
| 9345 | |
| 9346 CpuFeatures::Scope scope(VFP3); | |
| 9347 // Input is a smi. Convert to double and load the low and high words | |
| 9348 // of the double into r2, r3. | |
| 9349 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | |
| 9350 __ b(&loaded); | |
| 9351 | |
| 9352 __ bind(&input_not_smi); | |
| 9353 // Check if input is a HeapNumber. | |
| 9354 __ CheckMap(r0, | |
| 9355 r1, | |
| 9356 Heap::kHeapNumberMapRootIndex, | |
| 9357 &runtime_call, | |
| 9358 true); | |
| 9359 // Input is a HeapNumber. Load it to a double register and store the | |
| 9360 // low and high words into r2, r3. | |
| 9361 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 9362 | |
| 9363 __ bind(&loaded); | |
| 9364 // r2 = low 32 bits of double value | |
| 9365 // r3 = high 32 bits of double value | |
| 9366 // Compute hash (the shifts are arithmetic): | |
| 9367 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
| 9368 __ eor(r1, r2, Operand(r3)); | |
| 9369 __ eor(r1, r1, Operand(r1, ASR, 16)); | |
| 9370 __ eor(r1, r1, Operand(r1, ASR, 8)); | |
| 9371 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); | |
| 9372 __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); | |
| 9373 | |
| 9374 // r2 = low 32 bits of double value. | |
| 9375 // r3 = high 32 bits of double value. | |
| 9376 // r1 = TranscendentalCache::hash(double value). | |
| 9377 __ mov(r0, | |
| 9378 Operand(ExternalReference::transcendental_cache_array_address())); | |
| 9379 // r0 points to cache array. | |
| 9380 __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); | |
| 9381 // r0 points to the cache for the type type_. | |
| 9382 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
| 9383 __ cmp(r0, Operand(0)); | |
| 9384 __ b(eq, &runtime_call); | |
| 9385 | |
| 9386 #ifdef DEBUG | |
| 9387 // Check that the layout of cache elements match expectations. | |
| 9388 { TranscendentalCache::Element test_elem[2]; | |
| 9389 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | |
| 9390 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | |
| 9391 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | |
| 9392 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | |
| 9393 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | |
| 9394 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | |
| 9395 CHECK_EQ(0, elem_in0 - elem_start); | |
| 9396 CHECK_EQ(kIntSize, elem_in1 - elem_start); | |
| 9397 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | |
| 9398 } | |
| 9399 #endif | |
| 9400 | |
| 9401 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | |
| 9402 __ add(r1, r1, Operand(r1, LSL, 1)); | |
| 9403 __ add(r0, r0, Operand(r1, LSL, 2)); | |
| 9404 // Check if cache matches: Double value is stored in uint32_t[2] array. | |
| 9405 __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); | |
| 9406 __ cmp(r2, r4); | |
| 9407 __ b(ne, &runtime_call); | |
| 9408 __ cmp(r3, r5); | |
| 9409 __ b(ne, &runtime_call); | |
| 9410 // Cache hit. Load result, pop argument and return. | |
| 9411 __ mov(r0, Operand(r6)); | |
| 9412 __ pop(); | |
| 9413 __ Ret(); | |
| 9414 } | |
| 9415 | |
| 9416 __ bind(&runtime_call); | |
| 9417 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); | |
| 9418 } | |
| 9419 | |
| 9420 | |
| 9421 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | |
| 9422 switch (type_) { | |
| 9423 // Add more cases when necessary. | |
| 9424 case TranscendentalCache::SIN: return Runtime::kMath_sin; | |
| 9425 case TranscendentalCache::COS: return Runtime::kMath_cos; | |
| 9426 default: | |
| 9427 UNIMPLEMENTED(); | |
| 9428 return Runtime::kAbort; | |
| 9429 } | |
| 9430 } | |
| 9431 | |
| 9432 | |
| 9433 void StackCheckStub::Generate(MacroAssembler* masm) { | |
| 9434 // Do tail-call to runtime routine. Runtime routines expect at least one | |
| 9435 // argument, so give it a Smi. | |
| 9436 __ mov(r0, Operand(Smi::FromInt(0))); | |
| 9437 __ push(r0); | |
| 9438 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); | |
| 9439 | |
| 9440 __ StubReturn(1); | |
| 9441 } | |
| 9442 | |
| 9443 | |
| 9444 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | |
| 9445 Label slow, done; | |
| 9446 | |
| 9447 Register heap_number_map = r6; | |
| 9448 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 9449 | |
| 9450 if (op_ == Token::SUB) { | |
| 9451 // Check whether the value is a smi. | |
| 9452 Label try_float; | |
| 9453 __ tst(r0, Operand(kSmiTagMask)); | |
| 9454 __ b(ne, &try_float); | |
| 9455 | |
| 9456 // Go slow case if the value of the expression is zero | |
| 9457 // to make sure that we switch between 0 and -0. | |
| 9458 if (negative_zero_ == kStrictNegativeZero) { | |
| 9459 // If we have to check for zero, then we can check for the max negative | |
| 9460 // smi while we are at it. | |
| 9461 __ bic(ip, r0, Operand(0x80000000), SetCC); | |
| 9462 __ b(eq, &slow); | |
| 9463 __ rsb(r0, r0, Operand(0)); | |
| 9464 __ StubReturn(1); | |
| 9465 } else { | |
| 9466 // The value of the expression is a smi and 0 is OK for -0. Try | |
| 9467 // optimistic subtraction '0 - value'. | |
| 9468 __ rsb(r0, r0, Operand(0), SetCC); | |
| 9469 __ StubReturn(1, vc); | |
| 9470 // We don't have to reverse the optimistic neg since the only case | |
| 9471 // where we fall through is the minimum negative Smi, which is the case | |
| 9472 // where the neg leaves the register unchanged. | |
| 9473 __ jmp(&slow); // Go slow on max negative Smi. | |
| 9474 } | |
| 9475 | |
| 9476 __ bind(&try_float); | |
| 9477 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 9478 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 9479 __ cmp(r1, heap_number_map); | |
| 9480 __ b(ne, &slow); | |
| 9481 // r0 is a heap number. Get a new heap number in r1. | |
| 9482 if (overwrite_ == UNARY_OVERWRITE) { | |
| 9483 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 9484 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 9485 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 9486 } else { | |
| 9487 __ AllocateHeapNumber(r1, r2, r3, r6, &slow); | |
| 9488 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
| 9489 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 9490 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | |
| 9491 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 9492 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | |
| 9493 __ mov(r0, Operand(r1)); | |
| 9494 } | |
| 9495 } else if (op_ == Token::BIT_NOT) { | |
| 9496 // Check if the operand is a heap number. | |
| 9497 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 9498 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 9499 __ cmp(r1, heap_number_map); | |
| 9500 __ b(ne, &slow); | |
| 9501 | |
| 9502 // Convert the heap number is r0 to an untagged integer in r1. | |
| 9503 GetInt32(masm, r0, r1, r2, r3, &slow); | |
| 9504 | |
| 9505 // Do the bitwise operation (move negated) and check if the result | |
| 9506 // fits in a smi. | |
| 9507 Label try_float; | |
| 9508 __ mvn(r1, Operand(r1)); | |
| 9509 __ add(r2, r1, Operand(0x40000000), SetCC); | |
| 9510 __ b(mi, &try_float); | |
| 9511 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | |
| 9512 __ b(&done); | |
| 9513 | |
| 9514 __ bind(&try_float); | |
| 9515 if (!overwrite_ == UNARY_OVERWRITE) { | |
| 9516 // Allocate a fresh heap number, but don't overwrite r0 until | |
| 9517 // we're sure we can do it without going through the slow case | |
| 9518 // that needs the value in r0. | |
| 9519 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | |
| 9520 __ mov(r0, Operand(r2)); | |
| 9521 } | |
| 9522 | |
| 9523 if (CpuFeatures::IsSupported(VFP3)) { | |
| 9524 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | |
| 9525 CpuFeatures::Scope scope(VFP3); | |
| 9526 __ vmov(s0, r1); | |
| 9527 __ vcvt_f64_s32(d0, s0); | |
| 9528 __ sub(r2, r0, Operand(kHeapObjectTag)); | |
| 9529 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
| 9530 } else { | |
| 9531 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | |
| 9532 // have to set up a frame. | |
| 9533 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | |
| 9534 __ push(lr); | |
| 9535 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | |
| 9536 __ pop(lr); | |
| 9537 } | |
| 9538 } else { | |
| 9539 UNIMPLEMENTED(); | |
| 9540 } | |
| 9541 | |
| 9542 __ bind(&done); | |
| 9543 __ StubReturn(1); | |
| 9544 | |
| 9545 // Handle the slow case by jumping to the JavaScript builtin. | |
| 9546 __ bind(&slow); | |
| 9547 __ push(r0); | |
| 9548 switch (op_) { | |
| 9549 case Token::SUB: | |
| 9550 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | |
| 9551 break; | |
| 9552 case Token::BIT_NOT: | |
| 9553 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); | |
| 9554 break; | |
| 9555 default: | |
| 9556 UNREACHABLE(); | |
| 9557 } | |
| 9558 } | |
| 9559 | |
| 9560 | |
| 9561 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | |
| 9562 // r0 holds the exception. | |
| 9563 | |
| 9564 // Adjust this code if not the case. | |
| 9565 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 9566 | |
| 9567 // Drop the sp to the top of the handler. | |
| 9568 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 9569 __ ldr(sp, MemOperand(r3)); | |
| 9570 | |
| 9571 // Restore the next handler and frame pointer, discard handler state. | |
| 9572 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 9573 __ pop(r2); | |
| 9574 __ str(r2, MemOperand(r3)); | |
| 9575 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 9576 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. | |
| 9577 | |
| 9578 // Before returning we restore the context from the frame pointer if | |
| 9579 // not NULL. The frame pointer is NULL in the exception handler of a | |
| 9580 // JS entry frame. | |
| 9581 __ cmp(fp, Operand(0)); | |
| 9582 // Set cp to NULL if fp is NULL. | |
| 9583 __ mov(cp, Operand(0), LeaveCC, eq); | |
| 9584 // Restore cp otherwise. | |
| 9585 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 9586 #ifdef DEBUG | |
| 9587 if (FLAG_debug_code) { | |
| 9588 __ mov(lr, Operand(pc)); | |
| 9589 } | |
| 9590 #endif | |
| 9591 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 9592 __ pop(pc); | |
| 9593 } | |
| 9594 | |
| 9595 | |
| 9596 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, | |
| 9597 UncatchableExceptionType type) { | |
| 9598 // Adjust this code if not the case. | |
| 9599 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 9600 | |
| 9601 // Drop sp to the top stack handler. | |
| 9602 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 9603 __ ldr(sp, MemOperand(r3)); | |
| 9604 | |
| 9605 // Unwind the handlers until the ENTRY handler is found. | |
| 9606 Label loop, done; | |
| 9607 __ bind(&loop); | |
| 9608 // Load the type of the current stack handler. | |
| 9609 const int kStateOffset = StackHandlerConstants::kStateOffset; | |
| 9610 __ ldr(r2, MemOperand(sp, kStateOffset)); | |
| 9611 __ cmp(r2, Operand(StackHandler::ENTRY)); | |
| 9612 __ b(eq, &done); | |
| 9613 // Fetch the next handler in the list. | |
| 9614 const int kNextOffset = StackHandlerConstants::kNextOffset; | |
| 9615 __ ldr(sp, MemOperand(sp, kNextOffset)); | |
| 9616 __ jmp(&loop); | |
| 9617 __ bind(&done); | |
| 9618 | |
| 9619 // Set the top handler address to next handler past the current ENTRY handler. | |
| 9620 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 9621 __ pop(r2); | |
| 9622 __ str(r2, MemOperand(r3)); | |
| 9623 | |
| 9624 if (type == OUT_OF_MEMORY) { | |
| 9625 // Set external caught exception to false. | |
| 9626 ExternalReference external_caught(Top::k_external_caught_exception_address); | |
| 9627 __ mov(r0, Operand(false)); | |
| 9628 __ mov(r2, Operand(external_caught)); | |
| 9629 __ str(r0, MemOperand(r2)); | |
| 9630 | |
| 9631 // Set pending exception and r0 to out of memory exception. | |
| 9632 Failure* out_of_memory = Failure::OutOfMemoryException(); | |
| 9633 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 9634 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 9635 __ str(r0, MemOperand(r2)); | |
| 9636 } | |
| 9637 | |
| 9638 // Stack layout at this point. See also StackHandlerConstants. | |
| 9639 // sp -> state (ENTRY) | |
| 9640 // fp | |
| 9641 // lr | |
| 9642 | |
| 9643 // Discard handler state (r2 is not used) and restore frame pointer. | |
| 9644 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 9645 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. | |
| 9646 // Before returning we restore the context from the frame pointer if | |
| 9647 // not NULL. The frame pointer is NULL in the exception handler of a | |
| 9648 // JS entry frame. | |
| 9649 __ cmp(fp, Operand(0)); | |
| 9650 // Set cp to NULL if fp is NULL. | |
| 9651 __ mov(cp, Operand(0), LeaveCC, eq); | |
| 9652 // Restore cp otherwise. | |
| 9653 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 9654 #ifdef DEBUG | |
| 9655 if (FLAG_debug_code) { | |
| 9656 __ mov(lr, Operand(pc)); | |
| 9657 } | |
| 9658 #endif | |
| 9659 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 9660 __ pop(pc); | |
| 9661 } | |
| 9662 | |
| 9663 | |
| 9664 void CEntryStub::GenerateCore(MacroAssembler* masm, | |
| 9665 Label* throw_normal_exception, | |
| 9666 Label* throw_termination_exception, | |
| 9667 Label* throw_out_of_memory_exception, | |
| 9668 bool do_gc, | |
| 9669 bool always_allocate, | |
| 9670 int frame_alignment_skew) { | |
| 9671 // r0: result parameter for PerformGC, if any | |
| 9672 // r4: number of arguments including receiver (C callee-saved) | |
| 9673 // r5: pointer to builtin function (C callee-saved) | |
| 9674 // r6: pointer to the first argument (C callee-saved) | |
| 9675 | |
| 9676 if (do_gc) { | |
| 9677 // Passing r0. | |
| 9678 __ PrepareCallCFunction(1, r1); | |
| 9679 __ CallCFunction(ExternalReference::perform_gc_function(), 1); | |
| 9680 } | |
| 9681 | |
| 9682 ExternalReference scope_depth = | |
| 9683 ExternalReference::heap_always_allocate_scope_depth(); | |
| 9684 if (always_allocate) { | |
| 9685 __ mov(r0, Operand(scope_depth)); | |
| 9686 __ ldr(r1, MemOperand(r0)); | |
| 9687 __ add(r1, r1, Operand(1)); | |
| 9688 __ str(r1, MemOperand(r0)); | |
| 9689 } | |
| 9690 | |
| 9691 // Call C built-in. | |
| 9692 // r0 = argc, r1 = argv | |
| 9693 __ mov(r0, Operand(r4)); | |
| 9694 __ mov(r1, Operand(r6)); | |
| 9695 | |
| 9696 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | |
| 9697 int frame_alignment_mask = frame_alignment - 1; | |
| 9698 #if defined(V8_HOST_ARCH_ARM) | |
| 9699 if (FLAG_debug_code) { | |
| 9700 if (frame_alignment > kPointerSize) { | |
| 9701 Label alignment_as_expected; | |
| 9702 ASSERT(IsPowerOf2(frame_alignment)); | |
| 9703 __ sub(r2, sp, Operand(frame_alignment_skew)); | |
| 9704 __ tst(r2, Operand(frame_alignment_mask)); | |
| 9705 __ b(eq, &alignment_as_expected); | |
| 9706 // Don't use Check here, as it will call Runtime_Abort re-entering here. | |
| 9707 __ stop("Unexpected alignment"); | |
| 9708 __ bind(&alignment_as_expected); | |
| 9709 } | |
| 9710 } | |
| 9711 #endif | |
| 9712 | |
| 9713 // Just before the call (jump) below lr is pushed, so the actual alignment is | |
| 9714 // adding one to the current skew. | |
| 9715 int alignment_before_call = | |
| 9716 (frame_alignment_skew + kPointerSize) & frame_alignment_mask; | |
| 9717 if (alignment_before_call > 0) { | |
| 9718 // Push until the alignment before the call is met. | |
| 9719 __ mov(r2, Operand(0)); | |
| 9720 for (int i = alignment_before_call; | |
| 9721 (i & frame_alignment_mask) != 0; | |
| 9722 i += kPointerSize) { | |
| 9723 __ push(r2); | |
| 9724 } | |
| 9725 } | |
| 9726 | |
| 9727 // TODO(1242173): To let the GC traverse the return address of the exit | |
| 9728 // frames, we need to know where the return address is. Right now, | |
| 9729 // we push it on the stack to be able to find it again, but we never | |
| 9730 // restore from it in case of changes, which makes it impossible to | |
| 9731 // support moving the C entry code stub. This should be fixed, but currently | |
| 9732 // this is OK because the CEntryStub gets generated so early in the V8 boot | |
| 9733 // sequence that it is not moving ever. | |
| 9734 masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 | |
| 9735 masm->push(lr); | |
| 9736 masm->Jump(r5); | |
| 9737 | |
| 9738 // Restore sp back to before aligning the stack. | |
| 9739 if (alignment_before_call > 0) { | |
| 9740 __ add(sp, sp, Operand(alignment_before_call)); | |
| 9741 } | |
| 9742 | |
| 9743 if (always_allocate) { | |
| 9744 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 | |
| 9745 // though (contain the result). | |
| 9746 __ mov(r2, Operand(scope_depth)); | |
| 9747 __ ldr(r3, MemOperand(r2)); | |
| 9748 __ sub(r3, r3, Operand(1)); | |
| 9749 __ str(r3, MemOperand(r2)); | |
| 9750 } | |
| 9751 | |
| 9752 // check for failure result | |
| 9753 Label failure_returned; | |
| 9754 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | |
| 9755 // Lower 2 bits of r2 are 0 iff r0 has failure tag. | |
| 9756 __ add(r2, r0, Operand(1)); | |
| 9757 __ tst(r2, Operand(kFailureTagMask)); | |
| 9758 __ b(eq, &failure_returned); | |
| 9759 | |
| 9760 // Exit C frame and return. | |
| 9761 // r0:r1: result | |
| 9762 // sp: stack pointer | |
| 9763 // fp: frame pointer | |
| 9764 __ LeaveExitFrame(mode_); | |
| 9765 | |
| 9766 // check if we should retry or throw exception | |
| 9767 Label retry; | |
| 9768 __ bind(&failure_returned); | |
| 9769 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | |
| 9770 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | |
| 9771 __ b(eq, &retry); | |
| 9772 | |
| 9773 // Special handling of out of memory exceptions. | |
| 9774 Failure* out_of_memory = Failure::OutOfMemoryException(); | |
| 9775 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 9776 __ b(eq, throw_out_of_memory_exception); | |
| 9777 | |
| 9778 // Retrieve the pending exception and clear the variable. | |
| 9779 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | |
| 9780 __ ldr(r3, MemOperand(ip)); | |
| 9781 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 9782 __ ldr(r0, MemOperand(ip)); | |
| 9783 __ str(r3, MemOperand(ip)); | |
| 9784 | |
| 9785 // Special handling of termination exceptions which are uncatchable | |
| 9786 // by javascript code. | |
| 9787 __ cmp(r0, Operand(Factory::termination_exception())); | |
| 9788 __ b(eq, throw_termination_exception); | |
| 9789 | |
| 9790 // Handle normal exception. | |
| 9791 __ jmp(throw_normal_exception); | |
| 9792 | |
| 9793 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying | |
| 9794 } | |
| 9795 | |
| 9796 | |
| 9797 void CEntryStub::Generate(MacroAssembler* masm) { | |
| 9798 // Called from JavaScript; parameters are on stack as if calling JS function | |
| 9799 // r0: number of arguments including receiver | |
| 9800 // r1: pointer to builtin function | |
| 9801 // fp: frame pointer (restored after C call) | |
| 9802 // sp: stack pointer (restored as callee's sp after C call) | |
| 9803 // cp: current context (C callee-saved) | |
| 9804 | |
| 9805 // Result returned in r0 or r0+r1 by default. | |
| 9806 | |
| 9807 // NOTE: Invocations of builtins may return failure objects | |
| 9808 // instead of a proper result. The builtin entry handles | |
| 9809 // this by performing a garbage collection and retrying the | |
| 9810 // builtin once. | |
| 9811 | |
| 9812 // Enter the exit frame that transitions from JavaScript to C++. | |
| 9813 __ EnterExitFrame(mode_); | |
| 9814 | |
| 9815 // r4: number of arguments (C callee-saved) | |
| 9816 // r5: pointer to builtin function (C callee-saved) | |
| 9817 // r6: pointer to first argument (C callee-saved) | |
| 9818 | |
| 9819 Label throw_normal_exception; | |
| 9820 Label throw_termination_exception; | |
| 9821 Label throw_out_of_memory_exception; | |
| 9822 | |
| 9823 // Call into the runtime system. | |
| 9824 GenerateCore(masm, | |
| 9825 &throw_normal_exception, | |
| 9826 &throw_termination_exception, | |
| 9827 &throw_out_of_memory_exception, | |
| 9828 false, | |
| 9829 false, | |
| 9830 -kPointerSize); | |
| 9831 | |
| 9832 // Do space-specific GC and retry runtime call. | |
| 9833 GenerateCore(masm, | |
| 9834 &throw_normal_exception, | |
| 9835 &throw_termination_exception, | |
| 9836 &throw_out_of_memory_exception, | |
| 9837 true, | |
| 9838 false, | |
| 9839 0); | |
| 9840 | |
| 9841 // Do full GC and retry runtime call one final time. | |
| 9842 Failure* failure = Failure::InternalError(); | |
| 9843 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); | |
| 9844 GenerateCore(masm, | |
| 9845 &throw_normal_exception, | |
| 9846 &throw_termination_exception, | |
| 9847 &throw_out_of_memory_exception, | |
| 9848 true, | |
| 9849 true, | |
| 9850 kPointerSize); | |
| 9851 | |
| 9852 __ bind(&throw_out_of_memory_exception); | |
| 9853 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); | |
| 9854 | |
| 9855 __ bind(&throw_termination_exception); | |
| 9856 GenerateThrowUncatchable(masm, TERMINATION); | |
| 9857 | |
| 9858 __ bind(&throw_normal_exception); | |
| 9859 GenerateThrowTOS(masm); | |
| 9860 } | |
| 9861 | |
| 9862 | |
| 9863 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | |
| 9864 // r0: code entry | |
| 9865 // r1: function | |
| 9866 // r2: receiver | |
| 9867 // r3: argc | |
| 9868 // [sp+0]: argv | |
| 9869 | |
| 9870 Label invoke, exit; | |
| 9871 | |
| 9872 // Called from C, so do not pop argc and args on exit (preserve sp) | |
| 9873 // No need to save register-passed args | |
| 9874 // Save callee-saved registers (incl. cp and fp), sp, and lr | |
| 9875 __ stm(db_w, sp, kCalleeSaved | lr.bit()); | |
| 9876 | |
| 9877 // Get address of argv, see stm above. | |
| 9878 // r0: code entry | |
| 9879 // r1: function | |
| 9880 // r2: receiver | |
| 9881 // r3: argc | |
| 9882 __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv | |
| 9883 | |
| 9884 // Push a frame with special values setup to mark it as an entry frame. | |
| 9885 // r0: code entry | |
| 9886 // r1: function | |
| 9887 // r2: receiver | |
| 9888 // r3: argc | |
| 9889 // r4: argv | |
| 9890 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | |
| 9891 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | |
| 9892 __ mov(r7, Operand(Smi::FromInt(marker))); | |
| 9893 __ mov(r6, Operand(Smi::FromInt(marker))); | |
| 9894 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); | |
| 9895 __ ldr(r5, MemOperand(r5)); | |
| 9896 __ Push(r8, r7, r6, r5); | |
| 9897 | |
| 9898 // Setup frame pointer for the frame to be pushed. | |
| 9899 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | |
| 9900 | |
| 9901 // Call a faked try-block that does the invoke. | |
| 9902 __ bl(&invoke); | |
| 9903 | |
| 9904 // Caught exception: Store result (exception) in the pending | |
| 9905 // exception field in the JSEnv and return a failure sentinel. | |
| 9906 // Coming in here the fp will be invalid because the PushTryHandler below | |
| 9907 // sets it to 0 to signal the existence of the JSEntry frame. | |
| 9908 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 9909 __ str(r0, MemOperand(ip)); | |
| 9910 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | |
| 9911 __ b(&exit); | |
| 9912 | |
| 9913 // Invoke: Link this frame into the handler chain. | |
| 9914 __ bind(&invoke); | |
| 9915 // Must preserve r0-r4, r5-r7 are available. | |
| 9916 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); | |
| 9917 // If an exception not caught by another handler occurs, this handler | |
| 9918 // returns control to the code after the bl(&invoke) above, which | |
| 9919 // restores all kCalleeSaved registers (including cp and fp) to their | |
| 9920 // saved values before returning a failure to C. | |
| 9921 | |
| 9922 // Clear any pending exceptions. | |
| 9923 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | |
| 9924 __ ldr(r5, MemOperand(ip)); | |
| 9925 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 9926 __ str(r5, MemOperand(ip)); | |
| 9927 | |
| 9928 // Invoke the function by calling through JS entry trampoline builtin. | |
| 9929 // Notice that we cannot store a reference to the trampoline code directly in | |
| 9930 // this stub, because runtime stubs are not traversed when doing GC. | |
| 9931 | |
| 9932 // Expected registers by Builtins::JSEntryTrampoline | |
| 9933 // r0: code entry | |
| 9934 // r1: function | |
| 9935 // r2: receiver | |
| 9936 // r3: argc | |
| 9937 // r4: argv | |
| 9938 if (is_construct) { | |
| 9939 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); | |
| 9940 __ mov(ip, Operand(construct_entry)); | |
| 9941 } else { | |
| 9942 ExternalReference entry(Builtins::JSEntryTrampoline); | |
| 9943 __ mov(ip, Operand(entry)); | |
| 9944 } | |
| 9945 __ ldr(ip, MemOperand(ip)); // deref address | |
| 9946 | |
| 9947 // Branch and link to JSEntryTrampoline. We don't use the double underscore | |
| 9948 // macro for the add instruction because we don't want the coverage tool | |
| 9949 // inserting instructions here after we read the pc. | |
| 9950 __ mov(lr, Operand(pc)); | |
| 9951 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 9952 | |
| 9953 // Unlink this frame from the handler chain. When reading the | |
| 9954 // address of the next handler, there is no need to use the address | |
| 9955 // displacement since the current stack pointer (sp) points directly | |
| 9956 // to the stack handler. | |
| 9957 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); | |
| 9958 __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); | |
| 9959 __ str(r3, MemOperand(ip)); | |
| 9960 // No need to restore registers | |
| 9961 __ add(sp, sp, Operand(StackHandlerConstants::kSize)); | |
| 9962 | |
| 9963 | |
| 9964 __ bind(&exit); // r0 holds result | |
| 9965 // Restore the top frame descriptors from the stack. | |
| 9966 __ pop(r3); | |
| 9967 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); | |
| 9968 __ str(r3, MemOperand(ip)); | |
| 9969 | |
| 9970 // Reset the stack to the callee saved registers. | |
| 9971 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | |
| 9972 | |
| 9973 // Restore callee-saved registers and return. | |
| 9974 #ifdef DEBUG | |
| 9975 if (FLAG_debug_code) { | |
| 9976 __ mov(lr, Operand(pc)); | |
| 9977 } | |
| 9978 #endif | |
| 9979 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | |
| 9980 } | |
| 9981 | |
| 9982 | |
| 9983 // This stub performs an instanceof, calling the builtin function if | |
| 9984 // necessary. Uses r1 for the object, r0 for the function that it may | |
| 9985 // be an instance of (these are fetched from the stack). | |
| 9986 void InstanceofStub::Generate(MacroAssembler* masm) { | |
| 9987 // Get the object - slow case for smis (we may need to throw an exception | |
| 9988 // depending on the rhs). | |
| 9989 Label slow, loop, is_instance, is_not_instance; | |
| 9990 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | |
| 9991 __ BranchOnSmi(r0, &slow); | |
| 9992 | |
| 9993 // Check that the left hand is a JS object and put map in r3. | |
| 9994 __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); | |
| 9995 __ b(lt, &slow); | |
| 9996 __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); | |
| 9997 __ b(gt, &slow); | |
| 9998 | |
| 9999 // Get the prototype of the function (r4 is result, r2 is scratch). | |
| 10000 __ ldr(r1, MemOperand(sp, 0)); | |
| 10001 // r1 is function, r3 is map. | |
| 10002 | |
| 10003 // Look up the function and the map in the instanceof cache. | |
| 10004 Label miss; | |
| 10005 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); | |
| 10006 __ cmp(r1, ip); | |
| 10007 __ b(ne, &miss); | |
| 10008 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); | |
| 10009 __ cmp(r3, ip); | |
| 10010 __ b(ne, &miss); | |
| 10011 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | |
| 10012 __ pop(); | |
| 10013 __ pop(); | |
| 10014 __ mov(pc, Operand(lr)); | |
| 10015 | |
| 10016 __ bind(&miss); | |
| 10017 __ TryGetFunctionPrototype(r1, r4, r2, &slow); | |
| 10018 | |
| 10019 // Check that the function prototype is a JS object. | |
| 10020 __ BranchOnSmi(r4, &slow); | |
| 10021 __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); | |
| 10022 __ b(lt, &slow); | |
| 10023 __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); | |
| 10024 __ b(gt, &slow); | |
| 10025 | |
| 10026 __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); | |
| 10027 __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); | |
| 10028 | |
| 10029 // Register mapping: r3 is object map and r4 is function prototype. | |
| 10030 // Get prototype of object into r2. | |
| 10031 __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); | |
| 10032 | |
| 10033 // Loop through the prototype chain looking for the function prototype. | |
| 10034 __ bind(&loop); | |
| 10035 __ cmp(r2, Operand(r4)); | |
| 10036 __ b(eq, &is_instance); | |
| 10037 __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 10038 __ cmp(r2, ip); | |
| 10039 __ b(eq, &is_not_instance); | |
| 10040 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); | |
| 10041 __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); | |
| 10042 __ jmp(&loop); | |
| 10043 | |
| 10044 __ bind(&is_instance); | |
| 10045 __ mov(r0, Operand(Smi::FromInt(0))); | |
| 10046 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | |
| 10047 __ pop(); | |
| 10048 __ pop(); | |
| 10049 __ mov(pc, Operand(lr)); // Return. | |
| 10050 | |
| 10051 __ bind(&is_not_instance); | |
| 10052 __ mov(r0, Operand(Smi::FromInt(1))); | |
| 10053 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | |
| 10054 __ pop(); | |
| 10055 __ pop(); | |
| 10056 __ mov(pc, Operand(lr)); // Return. | |
| 10057 | |
| 10058 // Slow-case. Tail call builtin. | |
| 10059 __ bind(&slow); | |
| 10060 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); | |
| 10061 } | |
| 10062 | |
| 10063 | |
| 10064 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
| 10065 // The displacement is the offset of the last parameter (if any) | |
| 10066 // relative to the frame pointer. | |
| 10067 static const int kDisplacement = | |
| 10068 StandardFrameConstants::kCallerSPOffset - kPointerSize; | |
| 10069 | |
| 10070 // Check that the key is a smi. | |
| 10071 Label slow; | |
| 10072 __ BranchOnNotSmi(r1, &slow); | |
| 10073 | |
| 10074 // Check if the calling frame is an arguments adaptor frame. | |
| 10075 Label adaptor; | |
| 10076 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 10077 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 10078 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 10079 __ b(eq, &adaptor); | |
| 10080 | |
| 10081 // Check index against formal parameters count limit passed in | |
| 10082 // through register r0. Use unsigned comparison to get negative | |
| 10083 // check for free. | |
| 10084 __ cmp(r1, r0); | |
| 10085 __ b(cs, &slow); | |
| 10086 | |
| 10087 // Read the argument from the stack and return it. | |
| 10088 __ sub(r3, r0, r1); | |
| 10089 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 10090 __ ldr(r0, MemOperand(r3, kDisplacement)); | |
| 10091 __ Jump(lr); | |
| 10092 | |
| 10093 // Arguments adaptor case: Check index against actual arguments | |
| 10094 // limit found in the arguments adaptor frame. Use unsigned | |
| 10095 // comparison to get negative check for free. | |
| 10096 __ bind(&adaptor); | |
| 10097 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 10098 __ cmp(r1, r0); | |
| 10099 __ b(cs, &slow); | |
| 10100 | |
| 10101 // Read the argument from the adaptor frame and return it. | |
| 10102 __ sub(r3, r0, r1); | |
| 10103 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 10104 __ ldr(r0, MemOperand(r3, kDisplacement)); | |
| 10105 __ Jump(lr); | |
| 10106 | |
| 10107 // Slow-case: Handle non-smi or out-of-bounds access to arguments | |
| 10108 // by calling the runtime system. | |
| 10109 __ bind(&slow); | |
| 10110 __ push(r1); | |
| 10111 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | |
| 10112 } | |
| 10113 | |
| 10114 | |
| 10115 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | |
| 10116 // sp[0] : number of parameters | |
| 10117 // sp[4] : receiver displacement | |
| 10118 // sp[8] : function | |
| 10119 | |
| 10120 // Check if the calling frame is an arguments adaptor frame. | |
| 10121 Label adaptor_frame, try_allocate, runtime; | |
| 10122 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 10123 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 10124 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 10125 __ b(eq, &adaptor_frame); | |
| 10126 | |
| 10127 // Get the length from the frame. | |
| 10128 __ ldr(r1, MemOperand(sp, 0)); | |
| 10129 __ b(&try_allocate); | |
| 10130 | |
| 10131 // Patch the arguments.length and the parameters pointer. | |
| 10132 __ bind(&adaptor_frame); | |
| 10133 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 10134 __ str(r1, MemOperand(sp, 0)); | |
| 10135 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 10136 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | |
| 10137 __ str(r3, MemOperand(sp, 1 * kPointerSize)); | |
| 10138 | |
| 10139 // Try the new space allocation. Start out with computing the size | |
| 10140 // of the arguments object and the elements array in words. | |
| 10141 Label add_arguments_object; | |
| 10142 __ bind(&try_allocate); | |
| 10143 __ cmp(r1, Operand(0)); | |
| 10144 __ b(eq, &add_arguments_object); | |
| 10145 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | |
| 10146 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); | |
| 10147 __ bind(&add_arguments_object); | |
| 10148 __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); | |
| 10149 | |
| 10150 // Do the allocation of both objects in one go. | |
| 10151 __ AllocateInNewSpace( | |
| 10152 r1, | |
| 10153 r0, | |
| 10154 r2, | |
| 10155 r3, | |
| 10156 &runtime, | |
| 10157 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | |
| 10158 | |
| 10159 // Get the arguments boilerplate from the current (global) context. | |
| 10160 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); | |
| 10161 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 10162 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); | |
| 10163 __ ldr(r4, MemOperand(r4, offset)); | |
| 10164 | |
| 10165 // Copy the JS object part. | |
| 10166 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); | |
| 10167 | |
| 10168 // Setup the callee in-object property. | |
| 10169 STATIC_ASSERT(Heap::arguments_callee_index == 0); | |
| 10170 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | |
| 10171 __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); | |
| 10172 | |
| 10173 // Get the length (smi tagged) and set that as an in-object property too. | |
| 10174 STATIC_ASSERT(Heap::arguments_length_index == 1); | |
| 10175 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | |
| 10176 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); | |
| 10177 | |
| 10178 // If there are no actual arguments, we're done. | |
| 10179 Label done; | |
| 10180 __ cmp(r1, Operand(0)); | |
| 10181 __ b(eq, &done); | |
| 10182 | |
| 10183 // Get the parameters pointer from the stack. | |
| 10184 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); | |
| 10185 | |
| 10186 // Setup the elements pointer in the allocated arguments object and | |
| 10187 // initialize the header in the elements fixed array. | |
| 10188 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); | |
| 10189 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | |
| 10190 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | |
| 10191 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); | |
| 10192 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); | |
| 10193 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. | |
| 10194 | |
| 10195 // Copy the fixed array slots. | |
| 10196 Label loop; | |
| 10197 // Setup r4 to point to the first array slot. | |
| 10198 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 10199 __ bind(&loop); | |
| 10200 // Pre-decrement r2 with kPointerSize on each iteration. | |
| 10201 // Pre-decrement in order to skip receiver. | |
| 10202 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); | |
| 10203 // Post-increment r4 with kPointerSize on each iteration. | |
| 10204 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); | |
| 10205 __ sub(r1, r1, Operand(1)); | |
| 10206 __ cmp(r1, Operand(0)); | |
| 10207 __ b(ne, &loop); | |
| 10208 | |
| 10209 // Return and remove the on-stack parameters. | |
| 10210 __ bind(&done); | |
| 10211 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 10212 __ Ret(); | |
| 10213 | |
| 10214 // Do the runtime call to allocate the arguments object. | |
| 10215 __ bind(&runtime); | |
| 10216 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
| 10217 } | |
| 10218 | |
| 10219 | |
| 10220 void RegExpExecStub::Generate(MacroAssembler* masm) { | |
| 10221 // Just jump directly to runtime if native RegExp is not selected at compile | |
| 10222 // time or if regexp entry in generated code is turned off runtime switch or | |
| 10223 // at compilation. | |
| 10224 #ifdef V8_INTERPRETED_REGEXP | |
| 10225 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
| 10226 #else // V8_INTERPRETED_REGEXP | |
| 10227 if (!FLAG_regexp_entry_native) { | |
| 10228 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
| 10229 return; | |
| 10230 } | |
| 10231 | |
| 10232 // Stack frame on entry. | |
| 10233 // sp[0]: last_match_info (expected JSArray) | |
| 10234 // sp[4]: previous index | |
| 10235 // sp[8]: subject string | |
| 10236 // sp[12]: JSRegExp object | |
| 10237 | |
| 10238 static const int kLastMatchInfoOffset = 0 * kPointerSize; | |
| 10239 static const int kPreviousIndexOffset = 1 * kPointerSize; | |
| 10240 static const int kSubjectOffset = 2 * kPointerSize; | |
| 10241 static const int kJSRegExpOffset = 3 * kPointerSize; | |
| 10242 | |
| 10243 Label runtime, invoke_regexp; | |
| 10244 | |
| 10245 // Allocation of registers for this function. These are in callee save | |
| 10246 // registers and will be preserved by the call to the native RegExp code, as | |
| 10247 // this code is called using the normal C calling convention. When calling | |
| 10248 // directly from generated code the native RegExp code will not do a GC and | |
| 10249 // therefore the content of these registers are safe to use after the call. | |
| 10250 Register subject = r4; | |
| 10251 Register regexp_data = r5; | |
| 10252 Register last_match_info_elements = r6; | |
| 10253 | |
| 10254 // Ensure that a RegExp stack is allocated. | |
| 10255 ExternalReference address_of_regexp_stack_memory_address = | |
| 10256 ExternalReference::address_of_regexp_stack_memory_address(); | |
| 10257 ExternalReference address_of_regexp_stack_memory_size = | |
| 10258 ExternalReference::address_of_regexp_stack_memory_size(); | |
| 10259 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | |
| 10260 __ ldr(r0, MemOperand(r0, 0)); | |
| 10261 __ tst(r0, Operand(r0)); | |
| 10262 __ b(eq, &runtime); | |
| 10263 | |
| 10264 // Check that the first argument is a JSRegExp object. | |
| 10265 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); | |
| 10266 STATIC_ASSERT(kSmiTag == 0); | |
| 10267 __ tst(r0, Operand(kSmiTagMask)); | |
| 10268 __ b(eq, &runtime); | |
| 10269 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | |
| 10270 __ b(ne, &runtime); | |
| 10271 | |
| 10272 // Check that the RegExp has been compiled (data contains a fixed array). | |
| 10273 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | |
| 10274 if (FLAG_debug_code) { | |
| 10275 __ tst(regexp_data, Operand(kSmiTagMask)); | |
| 10276 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); | |
| 10277 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | |
| 10278 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | |
| 10279 } | |
| 10280 | |
| 10281 // regexp_data: RegExp data (FixedArray) | |
| 10282 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | |
| 10283 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | |
| 10284 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | |
| 10285 __ b(ne, &runtime); | |
| 10286 | |
| 10287 // regexp_data: RegExp data (FixedArray) | |
| 10288 // Check that the number of captures fit in the static offsets vector buffer. | |
| 10289 __ ldr(r2, | |
| 10290 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | |
| 10291 // Calculate number of capture registers (number_of_captures + 1) * 2. This | |
| 10292 // uses the asumption that smis are 2 * their untagged value. | |
| 10293 STATIC_ASSERT(kSmiTag == 0); | |
| 10294 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | |
| 10295 __ add(r2, r2, Operand(2)); // r2 was a smi. | |
| 10296 // Check that the static offsets vector buffer is large enough. | |
| 10297 __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); | |
| 10298 __ b(hi, &runtime); | |
| 10299 | |
| 10300 // r2: Number of capture registers | |
| 10301 // regexp_data: RegExp data (FixedArray) | |
| 10302 // Check that the second argument is a string. | |
| 10303 __ ldr(subject, MemOperand(sp, kSubjectOffset)); | |
| 10304 __ tst(subject, Operand(kSmiTagMask)); | |
| 10305 __ b(eq, &runtime); | |
| 10306 Condition is_string = masm->IsObjectStringType(subject, r0); | |
| 10307 __ b(NegateCondition(is_string), &runtime); | |
| 10308 // Get the length of the string to r3. | |
| 10309 __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); | |
| 10310 | |
| 10311 // r2: Number of capture registers | |
| 10312 // r3: Length of subject string as a smi | |
| 10313 // subject: Subject string | |
| 10314 // regexp_data: RegExp data (FixedArray) | |
| 10315 // Check that the third argument is a positive smi less than the subject | |
| 10316 // string length. A negative value will be greater (unsigned comparison). | |
| 10317 __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); | |
| 10318 __ tst(r0, Operand(kSmiTagMask)); | |
| 10319 __ b(ne, &runtime); | |
| 10320 __ cmp(r3, Operand(r0)); | |
| 10321 __ b(ls, &runtime); | |
| 10322 | |
| 10323 // r2: Number of capture registers | |
| 10324 // subject: Subject string | |
| 10325 // regexp_data: RegExp data (FixedArray) | |
| 10326 // Check that the fourth object is a JSArray object. | |
| 10327 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | |
| 10328 __ tst(r0, Operand(kSmiTagMask)); | |
| 10329 __ b(eq, &runtime); | |
| 10330 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); | |
| 10331 __ b(ne, &runtime); | |
| 10332 // Check that the JSArray is in fast case. | |
| 10333 __ ldr(last_match_info_elements, | |
| 10334 FieldMemOperand(r0, JSArray::kElementsOffset)); | |
| 10335 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | |
| 10336 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | |
| 10337 __ cmp(r0, ip); | |
| 10338 __ b(ne, &runtime); | |
| 10339 // Check that the last match info has space for the capture registers and the | |
| 10340 // additional information. | |
| 10341 __ ldr(r0, | |
| 10342 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | |
| 10343 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); | |
| 10344 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); | |
| 10345 __ b(gt, &runtime); | |
| 10346 | |
| 10347 // subject: Subject string | |
| 10348 // regexp_data: RegExp data (FixedArray) | |
| 10349 // Check the representation and encoding of the subject string. | |
| 10350 Label seq_string; | |
| 10351 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
| 10352 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | |
| 10353 // First check for flat string. | |
| 10354 __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); | |
| 10355 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | |
| 10356 __ b(eq, &seq_string); | |
| 10357 | |
| 10358 // subject: Subject string | |
| 10359 // regexp_data: RegExp data (FixedArray) | |
| 10360 // Check for flat cons string. | |
| 10361 // A flat cons string is a cons string where the second part is the empty | |
| 10362 // string. In that case the subject string is just the first part of the cons | |
| 10363 // string. Also in this case the first part of the cons string is known to be | |
| 10364 // a sequential string or an external string. | |
| 10365 STATIC_ASSERT(kExternalStringTag !=0); | |
| 10366 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); | |
| 10367 __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); | |
| 10368 __ b(ne, &runtime); | |
| 10369 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | |
| 10370 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); | |
| 10371 __ cmp(r0, r1); | |
| 10372 __ b(ne, &runtime); | |
| 10373 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | |
| 10374 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
| 10375 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | |
| 10376 // Is first part a flat string? | |
| 10377 STATIC_ASSERT(kSeqStringTag == 0); | |
| 10378 __ tst(r0, Operand(kStringRepresentationMask)); | |
| 10379 __ b(nz, &runtime); | |
| 10380 | |
| 10381 __ bind(&seq_string); | |
| 10382 // subject: Subject string | |
| 10383 // regexp_data: RegExp data (FixedArray) | |
| 10384 // r0: Instance type of subject string | |
| 10385 STATIC_ASSERT(4 == kAsciiStringTag); | |
| 10386 STATIC_ASSERT(kTwoByteStringTag == 0); | |
| 10387 // Find the code object based on the assumptions above. | |
| 10388 __ and_(r0, r0, Operand(kStringEncodingMask)); | |
| 10389 __ mov(r3, Operand(r0, ASR, 2), SetCC); | |
| 10390 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | |
| 10391 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | |
| 10392 | |
| 10393 // Check that the irregexp code has been generated for the actual string | |
| 10394 // encoding. If it has, the field contains a code object otherwise it contains | |
| 10395 // the hole. | |
| 10396 __ CompareObjectType(r7, r0, r0, CODE_TYPE); | |
| 10397 __ b(ne, &runtime); | |
| 10398 | |
| 10399 // r3: encoding of subject string (1 if ascii, 0 if two_byte); | |
| 10400 // r7: code | |
| 10401 // subject: Subject string | |
| 10402 // regexp_data: RegExp data (FixedArray) | |
| 10403 // Load used arguments before starting to push arguments for call to native | |
| 10404 // RegExp code to avoid handling changing stack height. | |
| 10405 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | |
| 10406 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | |
| 10407 | |
| 10408 // r1: previous index | |
| 10409 // r3: encoding of subject string (1 if ascii, 0 if two_byte); | |
| 10410 // r7: code | |
| 10411 // subject: Subject string | |
| 10412 // regexp_data: RegExp data (FixedArray) | |
| 10413 // All checks done. Now push arguments for native regexp code. | |
| 10414 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); | |
| 10415 | |
| 10416 static const int kRegExpExecuteArguments = 7; | |
| 10417 __ push(lr); | |
| 10418 __ PrepareCallCFunction(kRegExpExecuteArguments, r0); | |
| 10419 | |
| 10420 // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. | |
| 10421 __ mov(r0, Operand(1)); | |
| 10422 __ str(r0, MemOperand(sp, 2 * kPointerSize)); | |
| 10423 | |
| 10424 // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. | |
| 10425 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | |
| 10426 __ ldr(r0, MemOperand(r0, 0)); | |
| 10427 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | |
| 10428 __ ldr(r2, MemOperand(r2, 0)); | |
| 10429 __ add(r0, r0, Operand(r2)); | |
| 10430 __ str(r0, MemOperand(sp, 1 * kPointerSize)); | |
| 10431 | |
| 10432 // Argument 5 (sp[0]): static offsets vector buffer. | |
| 10433 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); | |
| 10434 __ str(r0, MemOperand(sp, 0 * kPointerSize)); | |
| 10435 | |
| 10436 // For arguments 4 and 3 get string length, calculate start of string data and | |
| 10437 // calculate the shift of the index (0 for ASCII and 1 for two byte). | |
| 10438 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); | |
| 10439 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); | |
| 10440 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | |
| 10441 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 10442 __ eor(r3, r3, Operand(1)); | |
| 10443 // Argument 4 (r3): End of string data | |
| 10444 // Argument 3 (r2): Start of string data | |
| 10445 __ add(r2, r9, Operand(r1, LSL, r3)); | |
| 10446 __ add(r3, r9, Operand(r0, LSL, r3)); | |
| 10447 | |
| 10448 // Argument 2 (r1): Previous index. | |
| 10449 // Already there | |
| 10450 | |
| 10451 // Argument 1 (r0): Subject string. | |
| 10452 __ mov(r0, subject); | |
| 10453 | |
| 10454 // Locate the code entry and call it. | |
| 10455 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 10456 __ CallCFunction(r7, kRegExpExecuteArguments); | |
| 10457 __ pop(lr); | |
| 10458 | |
| 10459 // r0: result | |
| 10460 // subject: subject string (callee saved) | |
| 10461 // regexp_data: RegExp data (callee saved) | |
| 10462 // last_match_info_elements: Last match info elements (callee saved) | |
| 10463 | |
| 10464 // Check the result. | |
| 10465 Label success; | |
| 10466 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); | |
| 10467 __ b(eq, &success); | |
| 10468 Label failure; | |
| 10469 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); | |
| 10470 __ b(eq, &failure); | |
| 10471 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | |
| 10472 // If not exception it can only be retry. Handle that in the runtime system. | |
| 10473 __ b(ne, &runtime); | |
| 10474 // Result must now be exception. If there is no pending exception already a | |
| 10475 // stack overflow (on the backtrack stack) was detected in RegExp code but | |
| 10476 // haven't created the exception yet. Handle that in the runtime system. | |
| 10477 // TODO(592): Rerunning the RegExp to get the stack overflow exception. | |
| 10478 __ mov(r0, Operand(ExternalReference::the_hole_value_location())); | |
| 10479 __ ldr(r0, MemOperand(r0, 0)); | |
| 10480 __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 10481 __ ldr(r1, MemOperand(r1, 0)); | |
| 10482 __ cmp(r0, r1); | |
| 10483 __ b(eq, &runtime); | |
| 10484 __ bind(&failure); | |
| 10485 // For failure and exception return null. | |
| 10486 __ mov(r0, Operand(Factory::null_value())); | |
| 10487 __ add(sp, sp, Operand(4 * kPointerSize)); | |
| 10488 __ Ret(); | |
| 10489 | |
| 10490 // Process the result from the native regexp code. | |
| 10491 __ bind(&success); | |
| 10492 __ ldr(r1, | |
| 10493 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | |
| 10494 // Calculate number of capture registers (number_of_captures + 1) * 2. | |
| 10495 STATIC_ASSERT(kSmiTag == 0); | |
| 10496 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | |
| 10497 __ add(r1, r1, Operand(2)); // r1 was a smi. | |
| 10498 | |
| 10499 // r1: number of capture registers | |
| 10500 // r4: subject string | |
| 10501 // Store the capture count. | |
| 10502 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. | |
| 10503 __ str(r2, FieldMemOperand(last_match_info_elements, | |
| 10504 RegExpImpl::kLastCaptureCountOffset)); | |
| 10505 // Store last subject and last input. | |
| 10506 __ mov(r3, last_match_info_elements); // Moved up to reduce latency. | |
| 10507 __ str(subject, | |
| 10508 FieldMemOperand(last_match_info_elements, | |
| 10509 RegExpImpl::kLastSubjectOffset)); | |
| 10510 __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); | |
| 10511 __ str(subject, | |
| 10512 FieldMemOperand(last_match_info_elements, | |
| 10513 RegExpImpl::kLastInputOffset)); | |
| 10514 __ mov(r3, last_match_info_elements); | |
| 10515 __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); | |
| 10516 | |
| 10517 // Get the static offsets vector filled by the native regexp code. | |
| 10518 ExternalReference address_of_static_offsets_vector = | |
| 10519 ExternalReference::address_of_static_offsets_vector(); | |
| 10520 __ mov(r2, Operand(address_of_static_offsets_vector)); | |
| 10521 | |
| 10522 // r1: number of capture registers | |
| 10523 // r2: offsets vector | |
| 10524 Label next_capture, done; | |
| 10525 // Capture register counter starts from number of capture registers and | |
| 10526 // counts down until wraping after zero. | |
| 10527 __ add(r0, | |
| 10528 last_match_info_elements, | |
| 10529 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | |
| 10530 __ bind(&next_capture); | |
| 10531 __ sub(r1, r1, Operand(1), SetCC); | |
| 10532 __ b(mi, &done); | |
| 10533 // Read the value from the static offsets vector buffer. | |
| 10534 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); | |
| 10535 // Store the smi value in the last match info. | |
| 10536 __ mov(r3, Operand(r3, LSL, kSmiTagSize)); | |
| 10537 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); | |
| 10538 __ jmp(&next_capture); | |
| 10539 __ bind(&done); | |
| 10540 | |
| 10541 // Return last match info. | |
| 10542 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | |
| 10543 __ add(sp, sp, Operand(4 * kPointerSize)); | |
| 10544 __ Ret(); | |
| 10545 | |
| 10546 // Do the runtime call to execute the regexp. | |
| 10547 __ bind(&runtime); | |
| 10548 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
| 10549 #endif // V8_INTERPRETED_REGEXP | |
| 10550 } | |
| 10551 | |
| 10552 | |
| 10553 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
| 10554 Label slow; | |
| 10555 | |
| 10556 // If the receiver might be a value (string, number or boolean) check for this | |
| 10557 // and box it if it is. | |
| 10558 if (ReceiverMightBeValue()) { | |
| 10559 // Get the receiver from the stack. | |
| 10560 // function, receiver [, arguments] | |
| 10561 Label receiver_is_value, receiver_is_js_object; | |
| 10562 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); | |
| 10563 | |
| 10564 // Check if receiver is a smi (which is a number value). | |
| 10565 __ BranchOnSmi(r1, &receiver_is_value); | |
| 10566 | |
| 10567 // Check if the receiver is a valid JS object. | |
| 10568 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); | |
| 10569 __ b(ge, &receiver_is_js_object); | |
| 10570 | |
| 10571 // Call the runtime to box the value. | |
| 10572 __ bind(&receiver_is_value); | |
| 10573 __ EnterInternalFrame(); | |
| 10574 __ push(r1); | |
| 10575 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); | |
| 10576 __ LeaveInternalFrame(); | |
| 10577 __ str(r0, MemOperand(sp, argc_ * kPointerSize)); | |
| 10578 | |
| 10579 __ bind(&receiver_is_js_object); | |
| 10580 } | |
| 10581 | |
| 10582 // Get the function to call from the stack. | |
| 10583 // function, receiver [, arguments] | |
| 10584 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); | |
| 10585 | |
| 10586 // Check that the function is really a JavaScript function. | |
| 10587 // r1: pushed function (to be verified) | |
| 10588 __ BranchOnSmi(r1, &slow); | |
| 10589 // Get the map of the function object. | |
| 10590 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); | |
| 10591 __ b(ne, &slow); | |
| 10592 | |
| 10593 // Fast-case: Invoke the function now. | |
| 10594 // r1: pushed function | |
| 10595 ParameterCount actual(argc_); | |
| 10596 __ InvokeFunction(r1, actual, JUMP_FUNCTION); | |
| 10597 | |
| 10598 // Slow-case: Non-function called. | |
| 10599 __ bind(&slow); | |
| 10600 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | |
| 10601 // of the original receiver from the call site). | |
| 10602 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); | |
| 10603 __ mov(r0, Operand(argc_)); // Setup the number of arguments. | |
| 10604 __ mov(r2, Operand(0)); | |
| 10605 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | |
| 10606 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | |
| 10607 RelocInfo::CODE_TARGET); | |
| 10608 } | |
| 10609 | |
| 10610 | |
| 10611 // Unfortunately you have to run without snapshots to see most of these | |
| 10612 // names in the profile since most compare stubs end up in the snapshot. | |
| 10613 const char* CompareStub::GetName() { | |
| 10614 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | |
| 10615 (lhs_.is(r1) && rhs_.is(r0))); | |
| 10616 | |
| 10617 if (name_ != NULL) return name_; | |
| 10618 const int kMaxNameLength = 100; | |
| 10619 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | |
| 10620 if (name_ == NULL) return "OOM"; | |
| 10621 | |
| 10622 const char* cc_name; | |
| 10623 switch (cc_) { | |
| 10624 case lt: cc_name = "LT"; break; | |
| 10625 case gt: cc_name = "GT"; break; | |
| 10626 case le: cc_name = "LE"; break; | |
| 10627 case ge: cc_name = "GE"; break; | |
| 10628 case eq: cc_name = "EQ"; break; | |
| 10629 case ne: cc_name = "NE"; break; | |
| 10630 default: cc_name = "UnknownCondition"; break; | |
| 10631 } | |
| 10632 | |
| 10633 const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; | |
| 10634 const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; | |
| 10635 | |
| 10636 const char* strict_name = ""; | |
| 10637 if (strict_ && (cc_ == eq || cc_ == ne)) { | |
| 10638 strict_name = "_STRICT"; | |
| 10639 } | |
| 10640 | |
| 10641 const char* never_nan_nan_name = ""; | |
| 10642 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { | |
| 10643 never_nan_nan_name = "_NO_NAN"; | |
| 10644 } | |
| 10645 | |
| 10646 const char* include_number_compare_name = ""; | |
| 10647 if (!include_number_compare_) { | |
| 10648 include_number_compare_name = "_NO_NUMBER"; | |
| 10649 } | |
| 10650 | |
| 10651 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), | |
| 10652 "CompareStub_%s%s%s%s%s%s", | |
| 10653 cc_name, | |
| 10654 lhs_name, | |
| 10655 rhs_name, | |
| 10656 strict_name, | |
| 10657 never_nan_nan_name, | |
| 10658 include_number_compare_name); | |
| 10659 return name_; | |
| 10660 } | |
| 10661 | |
| 10662 | |
| 10663 int CompareStub::MinorKey() { | |
| 10664 // Encode the three parameters in a unique 16 bit value. To avoid duplicate | |
| 10665 // stubs the never NaN NaN condition is only taken into account if the | |
| 10666 // condition is equals. | |
| 10667 ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12)); | |
| 10668 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | |
| 10669 (lhs_.is(r1) && rhs_.is(r0))); | |
| 10670 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) | |
| 10671 | RegisterField::encode(lhs_.is(r0)) | |
| 10672 | StrictField::encode(strict_) | |
| 10673 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) | |
| 10674 | IncludeNumberCompareField::encode(include_number_compare_); | |
| 10675 } | |
| 10676 | |
| 10677 | |
| 10678 // StringCharCodeAtGenerator | |
| 10679 | |
| 10680 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | |
| 10681 Label flat_string; | |
| 10682 Label ascii_string; | |
| 10683 Label got_char_code; | |
| 10684 | |
| 10685 // If the receiver is a smi trigger the non-string case. | |
| 10686 __ BranchOnSmi(object_, receiver_not_string_); | |
| 10687 | |
| 10688 // Fetch the instance type of the receiver into result register. | |
| 10689 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
| 10690 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
| 10691 // If the receiver is not a string trigger the non-string case. | |
| 10692 __ tst(result_, Operand(kIsNotStringMask)); | |
| 10693 __ b(ne, receiver_not_string_); | |
| 10694 | |
| 10695 // If the index is non-smi trigger the non-smi case. | |
| 10696 __ BranchOnNotSmi(index_, &index_not_smi_); | |
| 10697 | |
| 10698 // Put smi-tagged index into scratch register. | |
| 10699 __ mov(scratch_, index_); | |
| 10700 __ bind(&got_smi_index_); | |
| 10701 | |
| 10702 // Check for index out of range. | |
| 10703 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | |
| 10704 __ cmp(ip, Operand(scratch_)); | |
| 10705 __ b(ls, index_out_of_range_); | |
| 10706 | |
| 10707 // We need special handling for non-flat strings. | |
| 10708 STATIC_ASSERT(kSeqStringTag == 0); | |
| 10709 __ tst(result_, Operand(kStringRepresentationMask)); | |
| 10710 __ b(eq, &flat_string); | |
| 10711 | |
| 10712 // Handle non-flat strings. | |
| 10713 __ tst(result_, Operand(kIsConsStringMask)); | |
| 10714 __ b(eq, &call_runtime_); | |
| 10715 | |
| 10716 // ConsString. | |
| 10717 // Check whether the right hand side is the empty string (i.e. if | |
| 10718 // this is really a flat string in a cons string). If that is not | |
| 10719 // the case we would rather go to the runtime system now to flatten | |
| 10720 // the string. | |
| 10721 __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); | |
| 10722 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); | |
| 10723 __ cmp(result_, Operand(ip)); | |
| 10724 __ b(ne, &call_runtime_); | |
| 10725 // Get the first of the two strings and load its instance type. | |
| 10726 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); | |
| 10727 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
| 10728 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
| 10729 // If the first cons component is also non-flat, then go to runtime. | |
| 10730 STATIC_ASSERT(kSeqStringTag == 0); | |
| 10731 __ tst(result_, Operand(kStringRepresentationMask)); | |
| 10732 __ b(nz, &call_runtime_); | |
| 10733 | |
| 10734 // Check for 1-byte or 2-byte string. | |
| 10735 __ bind(&flat_string); | |
| 10736 STATIC_ASSERT(kAsciiStringTag != 0); | |
| 10737 __ tst(result_, Operand(kStringEncodingMask)); | |
| 10738 __ b(nz, &ascii_string); | |
| 10739 | |
| 10740 // 2-byte string. | |
| 10741 // Load the 2-byte character code into the result register. We can | |
| 10742 // add without shifting since the smi tag size is the log2 of the | |
| 10743 // number of bytes in a two-byte character. | |
| 10744 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); | |
| 10745 __ add(scratch_, object_, Operand(scratch_)); | |
| 10746 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); | |
| 10747 __ jmp(&got_char_code); | |
| 10748 | |
| 10749 // ASCII string. | |
| 10750 // Load the byte into the result register. | |
| 10751 __ bind(&ascii_string); | |
| 10752 __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); | |
| 10753 __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); | |
| 10754 | |
| 10755 __ bind(&got_char_code); | |
| 10756 __ mov(result_, Operand(result_, LSL, kSmiTagSize)); | |
| 10757 __ bind(&exit_); | |
| 10758 } | |
| 10759 | |
| 10760 | |
| 10761 void StringCharCodeAtGenerator::GenerateSlow( | |
| 10762 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | |
| 10763 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); | |
| 10764 | |
| 10765 // Index is not a smi. | |
| 10766 __ bind(&index_not_smi_); | |
| 10767 // If index is a heap number, try converting it to an integer. | |
| 10768 __ CheckMap(index_, | |
| 10769 scratch_, | |
| 10770 Heap::kHeapNumberMapRootIndex, | |
| 10771 index_not_number_, | |
| 10772 true); | |
| 10773 call_helper.BeforeCall(masm); | |
| 10774 __ Push(object_, index_); | |
| 10775 __ push(index_); // Consumed by runtime conversion function. | |
| 10776 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | |
| 10777 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | |
| 10778 } else { | |
| 10779 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | |
| 10780 // NumberToSmi discards numbers that are not exact integers. | |
| 10781 __ CallRuntime(Runtime::kNumberToSmi, 1); | |
| 10782 } | |
| 10783 // Save the conversion result before the pop instructions below | |
| 10784 // have a chance to overwrite it. | |
| 10785 __ Move(scratch_, r0); | |
| 10786 __ pop(index_); | |
| 10787 __ pop(object_); | |
| 10788 // Reload the instance type. | |
| 10789 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
| 10790 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
| 10791 call_helper.AfterCall(masm); | |
| 10792 // If index is still not a smi, it must be out of range. | |
| 10793 __ BranchOnNotSmi(scratch_, index_out_of_range_); | |
| 10794 // Otherwise, return to the fast path. | |
| 10795 __ jmp(&got_smi_index_); | |
| 10796 | |
| 10797 // Call runtime. We get here when the receiver is a string and the | |
| 10798 // index is a number, but the code of getting the actual character | |
| 10799 // is too complex (e.g., when the string needs to be flattened). | |
| 10800 __ bind(&call_runtime_); | |
| 10801 call_helper.BeforeCall(masm); | |
| 10802 __ Push(object_, index_); | |
| 10803 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | |
| 10804 __ Move(result_, r0); | |
| 10805 call_helper.AfterCall(masm); | |
| 10806 __ jmp(&exit_); | |
| 10807 | |
| 10808 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | |
| 10809 } | |
| 10810 | |
| 10811 | |
| 10812 // ------------------------------------------------------------------------- | |
| 10813 // StringCharFromCodeGenerator | |
| 10814 | |
| 10815 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | |
| 10816 // Fast case of Heap::LookupSingleCharacterStringFromCode. | |
| 10817 STATIC_ASSERT(kSmiTag == 0); | |
| 10818 STATIC_ASSERT(kSmiShiftSize == 0); | |
| 10819 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); | |
| 10820 __ tst(code_, | |
| 10821 Operand(kSmiTagMask | | |
| 10822 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | |
| 10823 __ b(nz, &slow_case_); | |
| 10824 | |
| 10825 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | |
| 10826 // At this point code register contains smi tagged ascii char code. | |
| 10827 STATIC_ASSERT(kSmiTag == 0); | |
| 10828 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 10829 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | |
| 10830 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 10831 __ cmp(result_, Operand(ip)); | |
| 10832 __ b(eq, &slow_case_); | |
| 10833 __ bind(&exit_); | |
| 10834 } | |
| 10835 | |
| 10836 | |
| 10837 void StringCharFromCodeGenerator::GenerateSlow( | |
| 10838 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | |
| 10839 __ Abort("Unexpected fallthrough to CharFromCode slow case"); | |
| 10840 | |
| 10841 __ bind(&slow_case_); | |
| 10842 call_helper.BeforeCall(masm); | |
| 10843 __ push(code_); | |
| 10844 __ CallRuntime(Runtime::kCharFromCode, 1); | |
| 10845 __ Move(result_, r0); | |
| 10846 call_helper.AfterCall(masm); | |
| 10847 __ jmp(&exit_); | |
| 10848 | |
| 10849 __ Abort("Unexpected fallthrough from CharFromCode slow case"); | |
| 10850 } | |
| 10851 | |
| 10852 | |
| 10853 // ------------------------------------------------------------------------- | |
| 10854 // StringCharAtGenerator | |
| 10855 | |
| 10856 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { | |
| 10857 char_code_at_generator_.GenerateFast(masm); | |
| 10858 char_from_code_generator_.GenerateFast(masm); | |
| 10859 } | |
| 10860 | |
| 10861 | |
| 10862 void StringCharAtGenerator::GenerateSlow( | |
| 10863 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | |
| 10864 char_code_at_generator_.GenerateSlow(masm, call_helper); | |
| 10865 char_from_code_generator_.GenerateSlow(masm, call_helper); | |
| 10866 } | |
| 10867 | |
| 10868 | |
| 10869 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, | |
| 10870 Register dest, | |
| 10871 Register src, | |
| 10872 Register count, | |
| 10873 Register scratch, | |
| 10874 bool ascii) { | |
| 10875 Label loop; | |
| 10876 Label done; | |
| 10877 // This loop just copies one character at a time, as it is only used for very | |
| 10878 // short strings. | |
| 10879 if (!ascii) { | |
| 10880 __ add(count, count, Operand(count), SetCC); | |
| 10881 } else { | |
| 10882 __ cmp(count, Operand(0)); | |
| 10883 } | |
| 10884 __ b(eq, &done); | |
| 10885 | |
| 10886 __ bind(&loop); | |
| 10887 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); | |
| 10888 // Perform sub between load and dependent store to get the load time to | |
| 10889 // complete. | |
| 10890 __ sub(count, count, Operand(1), SetCC); | |
| 10891 __ strb(scratch, MemOperand(dest, 1, PostIndex)); | |
| 10892 // last iteration. | |
| 10893 __ b(gt, &loop); | |
| 10894 | |
| 10895 __ bind(&done); | |
| 10896 } | |
| 10897 | |
| 10898 | |
| 10899 enum CopyCharactersFlags { | |
| 10900 COPY_ASCII = 1, | |
| 10901 DEST_ALWAYS_ALIGNED = 2 | |
| 10902 }; | |
| 10903 | |
| 10904 | |
| 10905 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, | |
| 10906 Register dest, | |
| 10907 Register src, | |
| 10908 Register count, | |
| 10909 Register scratch1, | |
| 10910 Register scratch2, | |
| 10911 Register scratch3, | |
| 10912 Register scratch4, | |
| 10913 Register scratch5, | |
| 10914 int flags) { | |
| 10915 bool ascii = (flags & COPY_ASCII) != 0; | |
| 10916 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; | |
| 10917 | |
| 10918 if (dest_always_aligned && FLAG_debug_code) { | |
| 10919 // Check that destination is actually word aligned if the flag says | |
| 10920 // that it is. | |
| 10921 __ tst(dest, Operand(kPointerAlignmentMask)); | |
| 10922 __ Check(eq, "Destination of copy not aligned."); | |
| 10923 } | |
| 10924 | |
| 10925 const int kReadAlignment = 4; | |
| 10926 const int kReadAlignmentMask = kReadAlignment - 1; | |
| 10927 // Ensure that reading an entire aligned word containing the last character | |
| 10928 // of a string will not read outside the allocated area (because we pad up | |
| 10929 // to kObjectAlignment). | |
| 10930 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); | |
| 10931 // Assumes word reads and writes are little endian. | |
| 10932 // Nothing to do for zero characters. | |
| 10933 Label done; | |
| 10934 if (!ascii) { | |
| 10935 __ add(count, count, Operand(count), SetCC); | |
| 10936 } else { | |
| 10937 __ cmp(count, Operand(0)); | |
| 10938 } | |
| 10939 __ b(eq, &done); | |
| 10940 | |
| 10941 // Assume that you cannot read (or write) unaligned. | |
| 10942 Label byte_loop; | |
| 10943 // Must copy at least eight bytes, otherwise just do it one byte at a time. | |
| 10944 __ cmp(count, Operand(8)); | |
| 10945 __ add(count, dest, Operand(count)); | |
| 10946 Register limit = count; // Read until src equals this. | |
| 10947 __ b(lt, &byte_loop); | |
| 10948 | |
| 10949 if (!dest_always_aligned) { | |
| 10950 // Align dest by byte copying. Copies between zero and three bytes. | |
| 10951 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); | |
| 10952 Label dest_aligned; | |
| 10953 __ b(eq, &dest_aligned); | |
| 10954 __ cmp(scratch4, Operand(2)); | |
| 10955 __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); | |
| 10956 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); | |
| 10957 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); | |
| 10958 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
| 10959 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); | |
| 10960 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); | |
| 10961 __ bind(&dest_aligned); | |
| 10962 } | |
| 10963 | |
| 10964 Label simple_loop; | |
| 10965 | |
| 10966 __ sub(scratch4, dest, Operand(src)); | |
| 10967 __ and_(scratch4, scratch4, Operand(0x03), SetCC); | |
| 10968 __ b(eq, &simple_loop); | |
| 10969 // Shift register is number of bits in a source word that | |
| 10970 // must be combined with bits in the next source word in order | |
| 10971 // to create a destination word. | |
| 10972 | |
| 10973 // Complex loop for src/dst that are not aligned the same way. | |
| 10974 { | |
| 10975 Label loop; | |
| 10976 __ mov(scratch4, Operand(scratch4, LSL, 3)); | |
| 10977 Register left_shift = scratch4; | |
| 10978 __ and_(src, src, Operand(~3)); // Round down to load previous word. | |
| 10979 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | |
| 10980 // Store the "shift" most significant bits of scratch in the least | |
| 10981 // signficant bits (i.e., shift down by (32-shift)). | |
| 10982 __ rsb(scratch2, left_shift, Operand(32)); | |
| 10983 Register right_shift = scratch2; | |
| 10984 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); | |
| 10985 | |
| 10986 __ bind(&loop); | |
| 10987 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); | |
| 10988 __ sub(scratch5, limit, Operand(dest)); | |
| 10989 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); | |
| 10990 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | |
| 10991 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); | |
| 10992 // Loop if four or more bytes left to copy. | |
| 10993 // Compare to eight, because we did the subtract before increasing dst. | |
| 10994 __ sub(scratch5, scratch5, Operand(8), SetCC); | |
| 10995 __ b(ge, &loop); | |
| 10996 } | |
| 10997 // There is now between zero and three bytes left to copy (negative that | |
| 10998 // number is in scratch5), and between one and three bytes already read into | |
| 10999 // scratch1 (eight times that number in scratch4). We may have read past | |
| 11000 // the end of the string, but because objects are aligned, we have not read | |
| 11001 // past the end of the object. | |
| 11002 // Find the minimum of remaining characters to move and preloaded characters | |
| 11003 // and write those as bytes. | |
| 11004 __ add(scratch5, scratch5, Operand(4), SetCC); | |
| 11005 __ b(eq, &done); | |
| 11006 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); | |
| 11007 // Move minimum of bytes read and bytes left to copy to scratch4. | |
| 11008 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); | |
| 11009 // Between one and three (value in scratch5) characters already read into | |
| 11010 // scratch ready to write. | |
| 11011 __ cmp(scratch5, Operand(2)); | |
| 11012 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
| 11013 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); | |
| 11014 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); | |
| 11015 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); | |
| 11016 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); | |
| 11017 // Copy any remaining bytes. | |
| 11018 __ b(&byte_loop); | |
| 11019 | |
| 11020 // Simple loop. | |
| 11021 // Copy words from src to dst, until less than four bytes left. | |
| 11022 // Both src and dest are word aligned. | |
| 11023 __ bind(&simple_loop); | |
| 11024 { | |
| 11025 Label loop; | |
| 11026 __ bind(&loop); | |
| 11027 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | |
| 11028 __ sub(scratch3, limit, Operand(dest)); | |
| 11029 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | |
| 11030 // Compare to 8, not 4, because we do the substraction before increasing | |
| 11031 // dest. | |
| 11032 __ cmp(scratch3, Operand(8)); | |
| 11033 __ b(ge, &loop); | |
| 11034 } | |
| 11035 | |
| 11036 // Copy bytes from src to dst until dst hits limit. | |
| 11037 __ bind(&byte_loop); | |
| 11038 __ cmp(dest, Operand(limit)); | |
| 11039 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); | |
| 11040 __ b(ge, &done); | |
| 11041 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
| 11042 __ b(&byte_loop); | |
| 11043 | |
| 11044 __ bind(&done); | |
| 11045 } | |
| 11046 | |
| 11047 | |
| 11048 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, | |
| 11049 Register c1, | |
| 11050 Register c2, | |
| 11051 Register scratch1, | |
| 11052 Register scratch2, | |
| 11053 Register scratch3, | |
| 11054 Register scratch4, | |
| 11055 Register scratch5, | |
| 11056 Label* not_found) { | |
| 11057 // Register scratch3 is the general scratch register in this function. | |
| 11058 Register scratch = scratch3; | |
| 11059 | |
| 11060 // Make sure that both characters are not digits as such strings has a | |
| 11061 // different hash algorithm. Don't try to look for these in the symbol table. | |
| 11062 Label not_array_index; | |
| 11063 __ sub(scratch, c1, Operand(static_cast<int>('0'))); | |
| 11064 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | |
| 11065 __ b(hi, ¬_array_index); | |
| 11066 __ sub(scratch, c2, Operand(static_cast<int>('0'))); | |
| 11067 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | |
| 11068 | |
| 11069 // If check failed combine both characters into single halfword. | |
| 11070 // This is required by the contract of the method: code at the | |
| 11071 // not_found branch expects this combination in c1 register | |
| 11072 __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); | |
| 11073 __ b(ls, not_found); | |
| 11074 | |
| 11075 __ bind(¬_array_index); | |
| 11076 // Calculate the two character string hash. | |
| 11077 Register hash = scratch1; | |
| 11078 StringHelper::GenerateHashInit(masm, hash, c1); | |
| 11079 StringHelper::GenerateHashAddCharacter(masm, hash, c2); | |
| 11080 StringHelper::GenerateHashGetHash(masm, hash); | |
| 11081 | |
| 11082 // Collect the two characters in a register. | |
| 11083 Register chars = c1; | |
| 11084 __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); | |
| 11085 | |
| 11086 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | |
| 11087 // hash: hash of two character string. | |
| 11088 | |
| 11089 // Load symbol table | |
| 11090 // Load address of first element of the symbol table. | |
| 11091 Register symbol_table = c2; | |
| 11092 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); | |
| 11093 | |
| 11094 // Load undefined value | |
| 11095 Register undefined = scratch4; | |
| 11096 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | |
| 11097 | |
| 11098 // Calculate capacity mask from the symbol table capacity. | |
| 11099 Register mask = scratch2; | |
| 11100 __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); | |
| 11101 __ mov(mask, Operand(mask, ASR, 1)); | |
| 11102 __ sub(mask, mask, Operand(1)); | |
| 11103 | |
| 11104 // Calculate untagged address of the first element of the symbol table. | |
| 11105 Register first_symbol_table_element = symbol_table; | |
| 11106 __ add(first_symbol_table_element, symbol_table, | |
| 11107 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); | |
| 11108 | |
| 11109 // Registers | |
| 11110 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | |
| 11111 // hash: hash of two character string | |
| 11112 // mask: capacity mask | |
| 11113 // first_symbol_table_element: address of the first element of | |
| 11114 // the symbol table | |
| 11115 // scratch: - | |
| 11116 | |
| 11117 // Perform a number of probes in the symbol table. | |
| 11118 static const int kProbes = 4; | |
| 11119 Label found_in_symbol_table; | |
| 11120 Label next_probe[kProbes]; | |
| 11121 for (int i = 0; i < kProbes; i++) { | |
| 11122 Register candidate = scratch5; // Scratch register contains candidate. | |
| 11123 | |
| 11124 // Calculate entry in symbol table. | |
| 11125 if (i > 0) { | |
| 11126 __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); | |
| 11127 } else { | |
| 11128 __ mov(candidate, hash); | |
| 11129 } | |
| 11130 | |
| 11131 __ and_(candidate, candidate, Operand(mask)); | |
| 11132 | |
| 11133 // Load the entry from the symble table. | |
| 11134 STATIC_ASSERT(SymbolTable::kEntrySize == 1); | |
| 11135 __ ldr(candidate, | |
| 11136 MemOperand(first_symbol_table_element, | |
| 11137 candidate, | |
| 11138 LSL, | |
| 11139 kPointerSizeLog2)); | |
| 11140 | |
| 11141 // If entry is undefined no string with this hash can be found. | |
| 11142 __ cmp(candidate, undefined); | |
| 11143 __ b(eq, not_found); | |
| 11144 | |
| 11145 // If length is not 2 the string is not a candidate. | |
| 11146 __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); | |
| 11147 __ cmp(scratch, Operand(Smi::FromInt(2))); | |
| 11148 __ b(ne, &next_probe[i]); | |
| 11149 | |
| 11150 // Check that the candidate is a non-external ascii string. | |
| 11151 __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); | |
| 11152 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
| 11153 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, | |
| 11154 &next_probe[i]); | |
| 11155 | |
| 11156 // Check if the two characters match. | |
| 11157 // Assumes that word load is little endian. | |
| 11158 __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); | |
| 11159 __ cmp(chars, scratch); | |
| 11160 __ b(eq, &found_in_symbol_table); | |
| 11161 __ bind(&next_probe[i]); | |
| 11162 } | |
| 11163 | |
| 11164 // No matching 2 character string found by probing. | |
| 11165 __ jmp(not_found); | |
| 11166 | |
| 11167 // Scratch register contains result when we fall through to here. | |
| 11168 Register result = scratch; | |
| 11169 __ bind(&found_in_symbol_table); | |
| 11170 __ Move(r0, result); | |
| 11171 } | |
| 11172 | |
| 11173 | |
| 11174 void StringHelper::GenerateHashInit(MacroAssembler* masm, | |
| 11175 Register hash, | |
| 11176 Register character) { | |
| 11177 // hash = character + (character << 10); | |
| 11178 __ add(hash, character, Operand(character, LSL, 10)); | |
| 11179 // hash ^= hash >> 6; | |
| 11180 __ eor(hash, hash, Operand(hash, ASR, 6)); | |
| 11181 } | |
| 11182 | |
| 11183 | |
| 11184 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | |
| 11185 Register hash, | |
| 11186 Register character) { | |
| 11187 // hash += character; | |
| 11188 __ add(hash, hash, Operand(character)); | |
| 11189 // hash += hash << 10; | |
| 11190 __ add(hash, hash, Operand(hash, LSL, 10)); | |
| 11191 // hash ^= hash >> 6; | |
| 11192 __ eor(hash, hash, Operand(hash, ASR, 6)); | |
| 11193 } | |
| 11194 | |
| 11195 | |
| 11196 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | |
| 11197 Register hash) { | |
| 11198 // hash += hash << 3; | |
| 11199 __ add(hash, hash, Operand(hash, LSL, 3)); | |
| 11200 // hash ^= hash >> 11; | |
| 11201 __ eor(hash, hash, Operand(hash, ASR, 11)); | |
| 11202 // hash += hash << 15; | |
| 11203 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); | |
| 11204 | |
| 11205 // if (hash == 0) hash = 27; | |
| 11206 __ mov(hash, Operand(27), LeaveCC, nz); | |
| 11207 } | |
| 11208 | |
| 11209 | |
| 11210 void SubStringStub::Generate(MacroAssembler* masm) { | |
| 11211 Label runtime; | |
| 11212 | |
| 11213 // Stack frame on entry. | |
| 11214 // lr: return address | |
| 11215 // sp[0]: to | |
| 11216 // sp[4]: from | |
| 11217 // sp[8]: string | |
| 11218 | |
| 11219 // This stub is called from the native-call %_SubString(...), so | |
| 11220 // nothing can be assumed about the arguments. It is tested that: | |
| 11221 // "string" is a sequential string, | |
| 11222 // both "from" and "to" are smis, and | |
| 11223 // 0 <= from <= to <= string.length. | |
| 11224 // If any of these assumptions fail, we call the runtime system. | |
| 11225 | |
| 11226 static const int kToOffset = 0 * kPointerSize; | |
| 11227 static const int kFromOffset = 1 * kPointerSize; | |
| 11228 static const int kStringOffset = 2 * kPointerSize; | |
| 11229 | |
| 11230 | |
| 11231 // Check bounds and smi-ness. | |
| 11232 __ ldr(r7, MemOperand(sp, kToOffset)); | |
| 11233 __ ldr(r6, MemOperand(sp, kFromOffset)); | |
| 11234 STATIC_ASSERT(kSmiTag == 0); | |
| 11235 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | |
| 11236 // I.e., arithmetic shift right by one un-smi-tags. | |
| 11237 __ mov(r2, Operand(r7, ASR, 1), SetCC); | |
| 11238 __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); | |
| 11239 // If either r2 or r6 had the smi tag bit set, then carry is set now. | |
| 11240 __ b(cs, &runtime); // Either "from" or "to" is not a smi. | |
| 11241 __ b(mi, &runtime); // From is negative. | |
| 11242 | |
| 11243 __ sub(r2, r2, Operand(r3), SetCC); | |
| 11244 __ b(mi, &runtime); // Fail if from > to. | |
| 11245 // Special handling of sub-strings of length 1 and 2. One character strings | |
| 11246 // are handled in the runtime system (looked up in the single character | |
| 11247 // cache). Two character strings are looked for in the symbol cache. | |
| 11248 __ cmp(r2, Operand(2)); | |
| 11249 __ b(lt, &runtime); | |
| 11250 | |
| 11251 // r2: length | |
| 11252 // r3: from index (untaged smi) | |
| 11253 // r6: from (smi) | |
| 11254 // r7: to (smi) | |
| 11255 | |
| 11256 // Make sure first argument is a sequential (or flat) string. | |
| 11257 __ ldr(r5, MemOperand(sp, kStringOffset)); | |
| 11258 STATIC_ASSERT(kSmiTag == 0); | |
| 11259 __ tst(r5, Operand(kSmiTagMask)); | |
| 11260 __ b(eq, &runtime); | |
| 11261 Condition is_string = masm->IsObjectStringType(r5, r1); | |
| 11262 __ b(NegateCondition(is_string), &runtime); | |
| 11263 | |
| 11264 // r1: instance type | |
| 11265 // r2: length | |
| 11266 // r3: from index (untaged smi) | |
| 11267 // r5: string | |
| 11268 // r6: from (smi) | |
| 11269 // r7: to (smi) | |
| 11270 Label seq_string; | |
| 11271 __ and_(r4, r1, Operand(kStringRepresentationMask)); | |
| 11272 STATIC_ASSERT(kSeqStringTag < kConsStringTag); | |
| 11273 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | |
| 11274 __ cmp(r4, Operand(kConsStringTag)); | |
| 11275 __ b(gt, &runtime); // External strings go to runtime. | |
| 11276 __ b(lt, &seq_string); // Sequential strings are handled directly. | |
| 11277 | |
| 11278 // Cons string. Try to recurse (once) on the first substring. | |
| 11279 // (This adds a little more generality than necessary to handle flattened | |
| 11280 // cons strings, but not much). | |
| 11281 __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); | |
| 11282 __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); | |
| 11283 __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | |
| 11284 __ tst(r1, Operand(kStringRepresentationMask)); | |
| 11285 STATIC_ASSERT(kSeqStringTag == 0); | |
| 11286 __ b(ne, &runtime); // Cons and External strings go to runtime. | |
| 11287 | |
| 11288 // Definitly a sequential string. | |
| 11289 __ bind(&seq_string); | |
| 11290 | |
| 11291 // r1: instance type. | |
| 11292 // r2: length | |
| 11293 // r3: from index (untaged smi) | |
| 11294 // r5: string | |
| 11295 // r6: from (smi) | |
| 11296 // r7: to (smi) | |
| 11297 __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); | |
| 11298 __ cmp(r4, Operand(r7)); | |
| 11299 __ b(lt, &runtime); // Fail if to > length. | |
| 11300 | |
| 11301 // r1: instance type. | |
| 11302 // r2: result string length. | |
| 11303 // r3: from index (untaged smi) | |
| 11304 // r5: string. | |
| 11305 // r6: from offset (smi) | |
| 11306 // Check for flat ascii string. | |
| 11307 Label non_ascii_flat; | |
| 11308 __ tst(r1, Operand(kStringEncodingMask)); | |
| 11309 STATIC_ASSERT(kTwoByteStringTag == 0); | |
| 11310 __ b(eq, &non_ascii_flat); | |
| 11311 | |
| 11312 Label result_longer_than_two; | |
| 11313 __ cmp(r2, Operand(2)); | |
| 11314 __ b(gt, &result_longer_than_two); | |
| 11315 | |
| 11316 // Sub string of length 2 requested. | |
| 11317 // Get the two characters forming the sub string. | |
| 11318 __ add(r5, r5, Operand(r3)); | |
| 11319 __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); | |
| 11320 __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); | |
| 11321 | |
| 11322 // Try to lookup two character string in symbol table. | |
| 11323 Label make_two_character_string; | |
| 11324 StringHelper::GenerateTwoCharacterSymbolTableProbe( | |
| 11325 masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); | |
| 11326 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | |
| 11327 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 11328 __ Ret(); | |
| 11329 | |
| 11330 // r2: result string length. | |
| 11331 // r3: two characters combined into halfword in little endian byte order. | |
| 11332 __ bind(&make_two_character_string); | |
| 11333 __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); | |
| 11334 __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | |
| 11335 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | |
| 11336 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 11337 __ Ret(); | |
| 11338 | |
| 11339 __ bind(&result_longer_than_two); | |
| 11340 | |
| 11341 // Allocate the result. | |
| 11342 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); | |
| 11343 | |
| 11344 // r0: result string. | |
| 11345 // r2: result string length. | |
| 11346 // r5: string. | |
| 11347 // r6: from offset (smi) | |
| 11348 // Locate first character of result. | |
| 11349 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11350 // Locate 'from' character of string. | |
| 11351 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11352 __ add(r5, r5, Operand(r6, ASR, 1)); | |
| 11353 | |
| 11354 // r0: result string. | |
| 11355 // r1: first character of result string. | |
| 11356 // r2: result string length. | |
| 11357 // r5: first character of sub string to copy. | |
| 11358 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 11359 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | |
| 11360 COPY_ASCII | DEST_ALWAYS_ALIGNED); | |
| 11361 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | |
| 11362 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 11363 __ Ret(); | |
| 11364 | |
| 11365 __ bind(&non_ascii_flat); | |
| 11366 // r2: result string length. | |
| 11367 // r5: string. | |
| 11368 // r6: from offset (smi) | |
| 11369 // Check for flat two byte string. | |
| 11370 | |
| 11371 // Allocate the result. | |
| 11372 __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); | |
| 11373 | |
| 11374 // r0: result string. | |
| 11375 // r2: result string length. | |
| 11376 // r5: string. | |
| 11377 // Locate first character of result. | |
| 11378 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
| 11379 // Locate 'from' character of string. | |
| 11380 __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
| 11381 // As "from" is a smi it is 2 times the value which matches the size of a two | |
| 11382 // byte character. | |
| 11383 __ add(r5, r5, Operand(r6)); | |
| 11384 | |
| 11385 // r0: result string. | |
| 11386 // r1: first character of result. | |
| 11387 // r2: result length. | |
| 11388 // r5: first character of string to copy. | |
| 11389 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 11390 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | |
| 11391 DEST_ALWAYS_ALIGNED); | |
| 11392 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | |
| 11393 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 11394 __ Ret(); | |
| 11395 | |
| 11396 // Just jump to runtime to create the sub string. | |
| 11397 __ bind(&runtime); | |
| 11398 __ TailCallRuntime(Runtime::kSubString, 3, 1); | |
| 11399 } | |
| 11400 | |
| 11401 | |
| 11402 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | |
| 11403 Register left, | |
| 11404 Register right, | |
| 11405 Register scratch1, | |
| 11406 Register scratch2, | |
| 11407 Register scratch3, | |
| 11408 Register scratch4) { | |
| 11409 Label compare_lengths; | |
| 11410 // Find minimum length and length difference. | |
| 11411 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | |
| 11412 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | |
| 11413 __ sub(scratch3, scratch1, Operand(scratch2), SetCC); | |
| 11414 Register length_delta = scratch3; | |
| 11415 __ mov(scratch1, scratch2, LeaveCC, gt); | |
| 11416 Register min_length = scratch1; | |
| 11417 STATIC_ASSERT(kSmiTag == 0); | |
| 11418 __ tst(min_length, Operand(min_length)); | |
| 11419 __ b(eq, &compare_lengths); | |
| 11420 | |
| 11421 // Untag smi. | |
| 11422 __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); | |
| 11423 | |
| 11424 // Setup registers so that we only need to increment one register | |
| 11425 // in the loop. | |
| 11426 __ add(scratch2, min_length, | |
| 11427 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11428 __ add(left, left, Operand(scratch2)); | |
| 11429 __ add(right, right, Operand(scratch2)); | |
| 11430 // Registers left and right points to the min_length character of strings. | |
| 11431 __ rsb(min_length, min_length, Operand(-1)); | |
| 11432 Register index = min_length; | |
| 11433 // Index starts at -min_length. | |
| 11434 | |
| 11435 { | |
| 11436 // Compare loop. | |
| 11437 Label loop; | |
| 11438 __ bind(&loop); | |
| 11439 // Compare characters. | |
| 11440 __ add(index, index, Operand(1), SetCC); | |
| 11441 __ ldrb(scratch2, MemOperand(left, index), ne); | |
| 11442 __ ldrb(scratch4, MemOperand(right, index), ne); | |
| 11443 // Skip to compare lengths with eq condition true. | |
| 11444 __ b(eq, &compare_lengths); | |
| 11445 __ cmp(scratch2, scratch4); | |
| 11446 __ b(eq, &loop); | |
| 11447 // Fallthrough with eq condition false. | |
| 11448 } | |
| 11449 // Compare lengths - strings up to min-length are equal. | |
| 11450 __ bind(&compare_lengths); | |
| 11451 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | |
| 11452 // Use zero length_delta as result. | |
| 11453 __ mov(r0, Operand(length_delta), SetCC, eq); | |
| 11454 // Fall through to here if characters compare not-equal. | |
| 11455 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); | |
| 11456 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); | |
| 11457 __ Ret(); | |
| 11458 } | |
| 11459 | |
| 11460 | |
| 11461 void StringCompareStub::Generate(MacroAssembler* masm) { | |
| 11462 Label runtime; | |
| 11463 | |
| 11464 // Stack frame on entry. | |
| 11465 // sp[0]: right string | |
| 11466 // sp[4]: left string | |
| 11467 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left | |
| 11468 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right | |
| 11469 | |
| 11470 Label not_same; | |
| 11471 __ cmp(r0, r1); | |
| 11472 __ b(ne, ¬_same); | |
| 11473 STATIC_ASSERT(EQUAL == 0); | |
| 11474 STATIC_ASSERT(kSmiTag == 0); | |
| 11475 __ mov(r0, Operand(Smi::FromInt(EQUAL))); | |
| 11476 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); | |
| 11477 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11478 __ Ret(); | |
| 11479 | |
| 11480 __ bind(¬_same); | |
| 11481 | |
| 11482 // Check that both objects are sequential ascii strings. | |
| 11483 __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); | |
| 11484 | |
| 11485 // Compare flat ascii strings natively. Remove arguments from stack first. | |
| 11486 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | |
| 11487 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11488 GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); | |
| 11489 | |
| 11490 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | |
| 11491 // tagged as a small integer. | |
| 11492 __ bind(&runtime); | |
| 11493 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | |
| 11494 } | |
| 11495 | |
| 11496 | |
| 11497 void StringAddStub::Generate(MacroAssembler* masm) { | |
| 11498 Label string_add_runtime; | |
| 11499 // Stack on entry: | |
| 11500 // sp[0]: second argument. | |
| 11501 // sp[4]: first argument. | |
| 11502 | |
| 11503 // Load the two arguments. | |
| 11504 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. | |
| 11505 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. | |
| 11506 | |
| 11507 // Make sure that both arguments are strings if not known in advance. | |
| 11508 if (string_check_) { | |
| 11509 STATIC_ASSERT(kSmiTag == 0); | |
| 11510 __ JumpIfEitherSmi(r0, r1, &string_add_runtime); | |
| 11511 // Load instance types. | |
| 11512 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 11513 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 11514 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | |
| 11515 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | |
| 11516 STATIC_ASSERT(kStringTag == 0); | |
| 11517 // If either is not a string, go to runtime. | |
| 11518 __ tst(r4, Operand(kIsNotStringMask)); | |
| 11519 __ tst(r5, Operand(kIsNotStringMask), eq); | |
| 11520 __ b(ne, &string_add_runtime); | |
| 11521 } | |
| 11522 | |
| 11523 // Both arguments are strings. | |
| 11524 // r0: first string | |
| 11525 // r1: second string | |
| 11526 // r4: first string instance type (if string_check_) | |
| 11527 // r5: second string instance type (if string_check_) | |
| 11528 { | |
| 11529 Label strings_not_empty; | |
| 11530 // Check if either of the strings are empty. In that case return the other. | |
| 11531 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); | |
| 11532 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); | |
| 11533 STATIC_ASSERT(kSmiTag == 0); | |
| 11534 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. | |
| 11535 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. | |
| 11536 STATIC_ASSERT(kSmiTag == 0); | |
| 11537 // Else test if second string is empty. | |
| 11538 __ cmp(r3, Operand(Smi::FromInt(0)), ne); | |
| 11539 __ b(ne, &strings_not_empty); // If either string was empty, return r0. | |
| 11540 | |
| 11541 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11542 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11543 __ Ret(); | |
| 11544 | |
| 11545 __ bind(&strings_not_empty); | |
| 11546 } | |
| 11547 | |
| 11548 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); | |
| 11549 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); | |
| 11550 // Both strings are non-empty. | |
| 11551 // r0: first string | |
| 11552 // r1: second string | |
| 11553 // r2: length of first string | |
| 11554 // r3: length of second string | |
| 11555 // r4: first string instance type (if string_check_) | |
| 11556 // r5: second string instance type (if string_check_) | |
| 11557 // Look at the length of the result of adding the two strings. | |
| 11558 Label string_add_flat_result, longer_than_two; | |
| 11559 // Adding two lengths can't overflow. | |
| 11560 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); | |
| 11561 __ add(r6, r2, Operand(r3)); | |
| 11562 // Use the runtime system when adding two one character strings, as it | |
| 11563 // contains optimizations for this specific case using the symbol table. | |
| 11564 __ cmp(r6, Operand(2)); | |
| 11565 __ b(ne, &longer_than_two); | |
| 11566 | |
| 11567 // Check that both strings are non-external ascii strings. | |
| 11568 if (!string_check_) { | |
| 11569 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 11570 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 11571 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | |
| 11572 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | |
| 11573 } | |
| 11574 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, | |
| 11575 &string_add_runtime); | |
| 11576 | |
| 11577 // Get the two characters forming the sub string. | |
| 11578 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | |
| 11579 __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); | |
| 11580 | |
| 11581 // Try to lookup two character string in symbol table. If it is not found | |
| 11582 // just allocate a new one. | |
| 11583 Label make_two_character_string; | |
| 11584 StringHelper::GenerateTwoCharacterSymbolTableProbe( | |
| 11585 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); | |
| 11586 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11587 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11588 __ Ret(); | |
| 11589 | |
| 11590 __ bind(&make_two_character_string); | |
| 11591 // Resulting string has length 2 and first chars of two strings | |
| 11592 // are combined into single halfword in r2 register. | |
| 11593 // So we can fill resulting string without two loops by a single | |
| 11594 // halfword store instruction (which assumes that processor is | |
| 11595 // in a little endian mode) | |
| 11596 __ mov(r6, Operand(2)); | |
| 11597 __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); | |
| 11598 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | |
| 11599 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11600 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11601 __ Ret(); | |
| 11602 | |
| 11603 __ bind(&longer_than_two); | |
| 11604 // Check if resulting string will be flat. | |
| 11605 __ cmp(r6, Operand(String::kMinNonFlatLength)); | |
| 11606 __ b(lt, &string_add_flat_result); | |
| 11607 // Handle exceptionally long strings in the runtime system. | |
| 11608 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); | |
| 11609 ASSERT(IsPowerOf2(String::kMaxLength + 1)); | |
| 11610 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. | |
| 11611 __ cmp(r6, Operand(String::kMaxLength + 1)); | |
| 11612 __ b(hs, &string_add_runtime); | |
| 11613 | |
| 11614 // If result is not supposed to be flat, allocate a cons string object. | |
| 11615 // If both strings are ascii the result is an ascii cons string. | |
| 11616 if (!string_check_) { | |
| 11617 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 11618 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 11619 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | |
| 11620 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | |
| 11621 } | |
| 11622 Label non_ascii, allocated, ascii_data; | |
| 11623 STATIC_ASSERT(kTwoByteStringTag == 0); | |
| 11624 __ tst(r4, Operand(kStringEncodingMask)); | |
| 11625 __ tst(r5, Operand(kStringEncodingMask), ne); | |
| 11626 __ b(eq, &non_ascii); | |
| 11627 | |
| 11628 // Allocate an ASCII cons string. | |
| 11629 __ bind(&ascii_data); | |
| 11630 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); | |
| 11631 __ bind(&allocated); | |
| 11632 // Fill the fields of the cons string. | |
| 11633 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | |
| 11634 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | |
| 11635 __ mov(r0, Operand(r7)); | |
| 11636 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11637 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11638 __ Ret(); | |
| 11639 | |
| 11640 __ bind(&non_ascii); | |
| 11641 // At least one of the strings is two-byte. Check whether it happens | |
| 11642 // to contain only ascii characters. | |
| 11643 // r4: first instance type. | |
| 11644 // r5: second instance type. | |
| 11645 __ tst(r4, Operand(kAsciiDataHintMask)); | |
| 11646 __ tst(r5, Operand(kAsciiDataHintMask), ne); | |
| 11647 __ b(ne, &ascii_data); | |
| 11648 __ eor(r4, r4, Operand(r5)); | |
| 11649 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); | |
| 11650 __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | |
| 11651 __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | |
| 11652 __ b(eq, &ascii_data); | |
| 11653 | |
| 11654 // Allocate a two byte cons string. | |
| 11655 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); | |
| 11656 __ jmp(&allocated); | |
| 11657 | |
| 11658 // Handle creating a flat result. First check that both strings are | |
| 11659 // sequential and that they have the same encoding. | |
| 11660 // r0: first string | |
| 11661 // r1: second string | |
| 11662 // r2: length of first string | |
| 11663 // r3: length of second string | |
| 11664 // r4: first string instance type (if string_check_) | |
| 11665 // r5: second string instance type (if string_check_) | |
| 11666 // r6: sum of lengths. | |
| 11667 __ bind(&string_add_flat_result); | |
| 11668 if (!string_check_) { | |
| 11669 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 11670 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 11671 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | |
| 11672 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | |
| 11673 } | |
| 11674 // Check that both strings are sequential. | |
| 11675 STATIC_ASSERT(kSeqStringTag == 0); | |
| 11676 __ tst(r4, Operand(kStringRepresentationMask)); | |
| 11677 __ tst(r5, Operand(kStringRepresentationMask), eq); | |
| 11678 __ b(ne, &string_add_runtime); | |
| 11679 // Now check if both strings have the same encoding (ASCII/Two-byte). | |
| 11680 // r0: first string. | |
| 11681 // r1: second string. | |
| 11682 // r2: length of first string. | |
| 11683 // r3: length of second string. | |
| 11684 // r6: sum of lengths.. | |
| 11685 Label non_ascii_string_add_flat_result; | |
| 11686 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. | |
| 11687 __ eor(r7, r4, Operand(r5)); | |
| 11688 __ tst(r7, Operand(kStringEncodingMask)); | |
| 11689 __ b(ne, &string_add_runtime); | |
| 11690 // And see if it's ASCII or two-byte. | |
| 11691 __ tst(r4, Operand(kStringEncodingMask)); | |
| 11692 __ b(eq, &non_ascii_string_add_flat_result); | |
| 11693 | |
| 11694 // Both strings are sequential ASCII strings. We also know that they are | |
| 11695 // short (since the sum of the lengths is less than kMinNonFlatLength). | |
| 11696 // r6: length of resulting flat string | |
| 11697 __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); | |
| 11698 // Locate first character of result. | |
| 11699 __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11700 // Locate first character of first argument. | |
| 11701 __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11702 // r0: first character of first string. | |
| 11703 // r1: second string. | |
| 11704 // r2: length of first string. | |
| 11705 // r3: length of second string. | |
| 11706 // r6: first character of result. | |
| 11707 // r7: result string. | |
| 11708 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); | |
| 11709 | |
| 11710 // Load second argument and locate first character. | |
| 11711 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | |
| 11712 // r1: first character of second string. | |
| 11713 // r3: length of second string. | |
| 11714 // r6: next character of result. | |
| 11715 // r7: result string. | |
| 11716 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); | |
| 11717 __ mov(r0, Operand(r7)); | |
| 11718 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11719 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11720 __ Ret(); | |
| 11721 | |
| 11722 __ bind(&non_ascii_string_add_flat_result); | |
| 11723 // Both strings are sequential two byte strings. | |
| 11724 // r0: first string. | |
| 11725 // r1: second string. | |
| 11726 // r2: length of first string. | |
| 11727 // r3: length of second string. | |
| 11728 // r6: sum of length of strings. | |
| 11729 __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); | |
| 11730 // r0: first string. | |
| 11731 // r1: second string. | |
| 11732 // r2: length of first string. | |
| 11733 // r3: length of second string. | |
| 11734 // r7: result string. | |
| 11735 | |
| 11736 // Locate first character of result. | |
| 11737 __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
| 11738 // Locate first character of first argument. | |
| 11739 __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
| 11740 | |
| 11741 // r0: first character of first string. | |
| 11742 // r1: second string. | |
| 11743 // r2: length of first string. | |
| 11744 // r3: length of second string. | |
| 11745 // r6: first character of result. | |
| 11746 // r7: result string. | |
| 11747 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); | |
| 11748 | |
| 11749 // Locate first character of second argument. | |
| 11750 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
| 11751 | |
| 11752 // r1: first character of second string. | |
| 11753 // r3: length of second string. | |
| 11754 // r6: next character of result (after copy of first string). | |
| 11755 // r7: result string. | |
| 11756 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); | |
| 11757 | |
| 11758 __ mov(r0, Operand(r7)); | |
| 11759 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | |
| 11760 __ add(sp, sp, Operand(2 * kPointerSize)); | |
| 11761 __ Ret(); | |
| 11762 | |
| 11763 // Just jump to runtime to add the two strings. | |
| 11764 __ bind(&string_add_runtime); | |
| 11765 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | |
| 11766 } | |
| 11767 | |
| 11768 | |
| 11769 #undef __ | 7100 #undef __ |
| 11770 | 7101 |
| 11771 } } // namespace v8::internal | 7102 } } // namespace v8::internal |
| 11772 | 7103 |
| 11773 #endif // V8_TARGET_ARCH_ARM | 7104 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |