| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2011-2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include <limits.h> // For LONG_MIN, LONG_MAX. | |
| 29 | |
| 30 #include "v8.h" | 28 #include "v8.h" |
| 31 | 29 |
| 32 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_SH4) |
| 33 | 31 |
| 34 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 35 #include "codegen.h" | 33 #include "codegen.h" |
| 36 #include "debug.h" | 34 #include "debug.h" |
| 37 #include "runtime.h" | 35 #include "runtime.h" |
| 38 | 36 |
| 37 #include "map-sh4.h" // Define register map |
| 38 |
| 39 namespace v8 { | 39 namespace v8 { |
| 40 namespace internal { | 40 namespace internal { |
| 41 | 41 |
| 42 #ifdef DEBUG |
| 43 #define RECORD_LINE() RecordFunctionLine(__FUNCTION__, __LINE__) |
| 44 #else |
| 45 #define RECORD_LINE() ((void)0) |
| 46 #endif |
| 47 |
| 42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) | 48 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) |
| 43 : Assembler(arg_isolate, buffer, size), | 49 : Assembler(arg_isolate, buffer, size), |
| 44 generating_stub_(false), | 50 generating_stub_(false), |
| 45 allow_stub_calls_(true), | 51 allow_stub_calls_(true), |
| 46 has_frame_(false) { | 52 has_frame_(false) { |
| 47 if (isolate() != NULL) { | 53 if (isolate() != NULL) { |
| 48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 54 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
| 49 isolate()); | 55 isolate()); |
| 50 } | 56 } |
| 51 } | 57 } |
| 52 | 58 |
| 53 | 59 |
| 54 // We always generate arm code, never thumb code, even if V8 is compiled to | 60 void MacroAssembler::Jump(Register Rd) { |
| 55 // thumb, so we require inter-working support | 61 jmp(Rd); |
| 56 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK) | |
| 57 #error "flag -mthumb-interwork missing" | |
| 58 #endif | |
| 59 | |
| 60 | |
| 61 // We do not support thumb inter-working with an arm architecture not supporting | |
| 62 // the blx instruction (below v5t). If you know what CPU you are compiling for | |
| 63 // you can use -march=armv7 or similar. | |
| 64 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS) | |
| 65 # error "For thumb inter-working we require an architecture which supports blx" | |
| 66 #endif | |
| 67 | |
| 68 | |
| 69 // Using bx does not yield better code, so use it only when required | |
| 70 #if defined(USE_THUMB_INTERWORK) | |
| 71 #define USE_BX 1 | |
| 72 #endif | |
| 73 | |
| 74 | |
| 75 void MacroAssembler::Jump(Register target, Condition cond) { | |
| 76 #if USE_BX | |
| 77 bx(target, cond); | |
| 78 #else | |
| 79 mov(pc, Operand(target), LeaveCC, cond); | |
| 80 #endif | |
| 81 } | 62 } |
| 82 | 63 |
| 83 | 64 |
| 84 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 65 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { |
| 85 Condition cond) { | 66 RECORD_LINE(); |
| 86 #if USE_BX | 67 mov(sh4_ip, Operand(target, rmode)); |
| 87 mov(ip, Operand(target, rmode)); | 68 jmp(sh4_ip); |
| 88 bx(ip, cond); | |
| 89 #else | |
| 90 mov(pc, Operand(target, rmode), LeaveCC, cond); | |
| 91 #endif | |
| 92 } | 69 } |
| 93 | 70 |
| 94 | 71 |
| 95 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, | 72 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { |
| 96 Condition cond) { | |
| 97 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | |
| 98 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); | |
| 99 } | |
| 100 | |
| 101 | |
| 102 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | |
| 103 Condition cond) { | |
| 104 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 73 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 105 // 'code' is always generated ARM code, never THUMB code | 74 RECORD_LINE(); |
| 106 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | 75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); |
| 107 } | |
| 108 | |
| 109 | |
| 110 int MacroAssembler::CallSize(Register target, Condition cond) { | |
| 111 #ifdef USE_BLX | |
| 112 return kInstrSize; | |
| 113 #else | |
| 114 return 2 * kInstrSize; | |
| 115 #endif | |
| 116 } | |
| 117 | |
| 118 | |
| 119 void MacroAssembler::Call(Register target, Condition cond) { | |
| 120 // Block constant pool for the call instruction sequence. | |
| 121 BlockConstPoolScope block_const_pool(this); | |
| 122 Label start; | |
| 123 bind(&start); | |
| 124 #ifdef USE_BLX | |
| 125 blx(target, cond); | |
| 126 #else | |
| 127 // set lr for return at current pc + 8 | |
| 128 mov(lr, Operand(pc), LeaveCC, cond); | |
| 129 mov(pc, Operand(target), LeaveCC, cond); | |
| 130 #endif | |
| 131 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); | |
| 132 } | |
| 133 | |
| 134 | |
| 135 int MacroAssembler::CallSize( | |
| 136 Address target, RelocInfo::Mode rmode, Condition cond) { | |
| 137 int size = 2 * kInstrSize; | |
| 138 Instr mov_instr = cond | MOV | LeaveCC; | |
| 139 intptr_t immediate = reinterpret_cast<intptr_t>(target); | |
| 140 if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) { | |
| 141 size += kInstrSize; | |
| 142 } | |
| 143 return size; | |
| 144 } | |
| 145 | |
| 146 | |
| 147 int MacroAssembler::CallSizeNotPredictableCodeSize( | |
| 148 Address target, RelocInfo::Mode rmode, Condition cond) { | |
| 149 int size = 2 * kInstrSize; | |
| 150 Instr mov_instr = cond | MOV | LeaveCC; | |
| 151 intptr_t immediate = reinterpret_cast<intptr_t>(target); | |
| 152 if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) { | |
| 153 size += kInstrSize; | |
| 154 } | |
| 155 return size; | |
| 156 } | |
| 157 | |
| 158 | |
| 159 void MacroAssembler::Call(Address target, | |
| 160 RelocInfo::Mode rmode, | |
| 161 Condition cond, | |
| 162 TargetAddressStorageMode mode) { | |
| 163 // Block constant pool for the call instruction sequence. | |
| 164 BlockConstPoolScope block_const_pool(this); | |
| 165 Label start; | |
| 166 bind(&start); | |
| 167 | |
| 168 bool old_predictable_code_size = predictable_code_size(); | |
| 169 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
| 170 set_predictable_code_size(true); | |
| 171 } | |
| 172 | |
| 173 #ifdef USE_BLX | |
| 174 // Call sequence on V7 or later may be : | |
| 175 // movw ip, #... @ call address low 16 | |
| 176 // movt ip, #... @ call address high 16 | |
| 177 // blx ip | |
| 178 // @ return address | |
| 179 // Or for pre-V7 or values that may be back-patched | |
| 180 // to avoid ICache flushes: | |
| 181 // ldr ip, [pc, #...] @ call address | |
| 182 // blx ip | |
| 183 // @ return address | |
| 184 | |
| 185 // Statement positions are expected to be recorded when the target | |
| 186 // address is loaded. The mov method will automatically record | |
| 187 // positions when pc is the target, since this is not the case here | |
| 188 // we have to do it explicitly. | |
| 189 positions_recorder()->WriteRecordedPositions(); | |
| 190 | |
| 191 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); | |
| 192 blx(ip, cond); | |
| 193 | |
| 194 #else | |
| 195 // Set lr for return at current pc + 8. | |
| 196 mov(lr, Operand(pc), LeaveCC, cond); | |
| 197 // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. | |
| 198 mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond); | |
| 199 #endif | |
| 200 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start)); | |
| 201 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
| 202 set_predictable_code_size(old_predictable_code_size); | |
| 203 } | |
| 204 } | |
| 205 | |
| 206 | |
| 207 int MacroAssembler::CallSize(Handle<Code> code, | |
| 208 RelocInfo::Mode rmode, | |
| 209 TypeFeedbackId ast_id, | |
| 210 Condition cond) { | |
| 211 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); | |
| 212 } | 76 } |
| 213 | 77 |
| 214 | 78 |
| 215 void MacroAssembler::Call(Handle<Code> code, | 79 void MacroAssembler::Call(Handle<Code> code, |
| 216 RelocInfo::Mode rmode, | 80 RelocInfo::Mode rmode, |
| 217 TypeFeedbackId ast_id, | 81 TypeFeedbackId ast_id) { |
| 218 Condition cond, | 82 // TODO(stm): check whether this is necessary |
| 219 TargetAddressStorageMode mode) { | 83 // Label start; |
| 220 Label start; | 84 // bind(&start); |
| 221 bind(&start); | 85 |
| 86 RECORD_LINE(); |
| 222 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 87 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 223 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { | 88 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { |
| 224 SetRecordedAstId(ast_id); | 89 SetRecordedAstId(ast_id); |
| 225 rmode = RelocInfo::CODE_TARGET_WITH_ID; | 90 rmode = RelocInfo::CODE_TARGET_WITH_ID; |
| 226 } | 91 } |
| 227 // 'code' is always generated ARM code, never THUMB code | 92 jsr(code, rmode, sh4_ip); |
| 228 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); | 93 |
| 94 // TODO(stm): check whether this is necessary |
| 95 // ASSERT_EQ(CallSize(code, rmode, ast_id, cond), |
| 96 // SizeOfCodeGeneratedSince(&start)); |
| 229 } | 97 } |
| 230 | 98 |
| 231 | 99 |
| 232 void MacroAssembler::Ret(Condition cond) { | 100 void MacroAssembler::Ret(int drop) { |
| 233 #if USE_BX | 101 Drop(drop); |
| 234 bx(lr, cond); | 102 Ret(); |
| 235 #else | |
| 236 mov(pc, Operand(lr), LeaveCC, cond); | |
| 237 #endif | |
| 238 } | 103 } |
| 239 | 104 |
| 240 | 105 |
| 241 void MacroAssembler::Drop(int count, Condition cond) { | 106 void MacroAssembler::Move(Register dst, Handle<Object> value) { |
| 242 if (count > 0) { | 107 RECORD_LINE(); |
| 243 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); | 108 mov(dst, Operand(value)); |
| 109 } |
| 110 |
| 111 |
| 112 void MacroAssembler::Move(Register dst, Register src) { |
| 113 if (!dst.is(src)) { |
| 114 RECORD_LINE(); |
| 115 mov(dst, src); |
| 244 } | 116 } |
| 245 } | 117 } |
| 246 | 118 |
| 247 | 119 |
| 248 void MacroAssembler::Ret(int drop, Condition cond) { | 120 void MacroAssembler::Ubfx(Register dst, Register src, int lsb, int width) { |
| 249 Drop(drop, cond); | 121 ASSERT(!dst.is(sh4_rtmp) && !src.is(sh4_rtmp)); |
| 250 Ret(cond); | 122 ASSERT(lsb >= 0 && lsb < 32); |
| 251 } | 123 ASSERT(width > 0 && width <= 32); |
| 252 | 124 ASSERT(width + lsb <= 32); |
| 253 | 125 // Extract unsigned value from bits src1[lsb..lsb+width-1] into dst |
| 254 void MacroAssembler::Swap(Register reg1, | 126 int32_t mask1 = width < 32 ? (1<<width)-1 : -1; |
| 255 Register reg2, | 127 int32_t mask = mask1 << lsb; |
| 256 Register scratch, | 128 RECORD_LINE(); |
| 257 Condition cond) { | 129 land(dst, src, Operand(mask)); |
| 258 if (scratch.is(no_reg)) { | 130 if (lsb != 0) { |
| 259 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | 131 RECORD_LINE(); |
| 260 eor(reg2, reg2, Operand(reg1), LeaveCC, cond); | 132 lsr(dst, dst, Operand(lsb)); |
| 261 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | |
| 262 } else { | |
| 263 mov(scratch, reg1, LeaveCC, cond); | |
| 264 mov(reg1, reg2, LeaveCC, cond); | |
| 265 mov(reg2, scratch, LeaveCC, cond); | |
| 266 } | 133 } |
| 267 } | 134 } |
| 268 | 135 |
| 269 | 136 |
| 270 void MacroAssembler::Call(Label* target) { | 137 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width) { |
| 271 bl(target); | 138 ASSERT(!dst.is(sh4_rtmp) && !src1.is(sh4_rtmp)); |
| 272 } | 139 ASSERT(lsb >= 0 && lsb < 32); |
| 273 | 140 ASSERT(width > 0 && width <= 32); |
| 274 | 141 ASSERT(width + lsb <= 32); |
| 275 void MacroAssembler::Push(Handle<Object> handle) { | 142 int32_t mask1 = width < 32 ? (1<<width)-1 : -1; |
| 276 mov(ip, Operand(handle)); | 143 int32_t mask = mask1 << lsb; |
| 277 push(ip); | 144 land(dst, src1, Operand(mask)); |
| 278 } | 145 int shift_up = 32 - lsb - width; |
| 279 | 146 int shift_down = lsb + shift_up; |
| 280 | 147 if (shift_up != 0) { |
| 281 void MacroAssembler::Move(Register dst, Handle<Object> value) { | 148 lsl(dst, dst, Operand(shift_up)); |
| 282 mov(dst, Operand(value)); | 149 } |
| 283 } | 150 if (shift_down != 0) { |
| 284 | 151 asr(dst, dst, Operand(shift_down)); |
| 285 | |
| 286 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | |
| 287 if (!dst.is(src)) { | |
| 288 mov(dst, src, LeaveCC, cond); | |
| 289 } | 152 } |
| 290 } | 153 } |
| 291 | 154 |
| 292 | |
| 293 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { | |
| 294 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 295 CpuFeatures::Scope scope(VFP2); | |
| 296 if (!dst.is(src)) { | |
| 297 vmov(dst, src); | |
| 298 } | |
| 299 } | |
| 300 | |
| 301 | |
| 302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | |
| 303 Condition cond) { | |
| 304 if (!src2.is_reg() && | |
| 305 !src2.must_output_reloc_info(this) && | |
| 306 src2.immediate() == 0) { | |
| 307 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); | |
| 308 } else if (!src2.is_single_instruction(this) && | |
| 309 !src2.must_output_reloc_info(this) && | |
| 310 CpuFeatures::IsSupported(ARMv7) && | |
| 311 IsPowerOf2(src2.immediate() + 1)) { | |
| 312 ubfx(dst, src1, 0, | |
| 313 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); | |
| 314 } else { | |
| 315 and_(dst, src1, src2, LeaveCC, cond); | |
| 316 } | |
| 317 } | |
| 318 | |
| 319 | |
| 320 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, | |
| 321 Condition cond) { | |
| 322 ASSERT(lsb < 32); | |
| 323 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
| 324 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
| 325 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
| 326 if (lsb != 0) { | |
| 327 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); | |
| 328 } | |
| 329 } else { | |
| 330 ubfx(dst, src1, lsb, width, cond); | |
| 331 } | |
| 332 } | |
| 333 | |
| 334 | |
| 335 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, | |
| 336 Condition cond) { | |
| 337 ASSERT(lsb < 32); | |
| 338 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
| 339 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
| 340 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
| 341 int shift_up = 32 - lsb - width; | |
| 342 int shift_down = lsb + shift_up; | |
| 343 if (shift_up != 0) { | |
| 344 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); | |
| 345 } | |
| 346 if (shift_down != 0) { | |
| 347 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | |
| 348 } | |
| 349 } else { | |
| 350 sbfx(dst, src1, lsb, width, cond); | |
| 351 } | |
| 352 } | |
| 353 | |
| 354 | 155 |
| 355 void MacroAssembler::Bfi(Register dst, | 156 void MacroAssembler::Bfi(Register dst, |
| 356 Register src, | 157 Register src, |
| 357 Register scratch, | 158 Register scratch, |
| 358 int lsb, | 159 int lsb, |
| 359 int width, | 160 int width) { |
| 360 Condition cond) { | |
| 361 ASSERT(0 <= lsb && lsb < 32); | 161 ASSERT(0 <= lsb && lsb < 32); |
| 362 ASSERT(0 <= width && width < 32); | 162 ASSERT(0 <= width && width <= 32); |
| 363 ASSERT(lsb + width < 32); | 163 ASSERT(lsb + width <= 32); |
| 364 ASSERT(!scratch.is(dst)); | 164 ASSERT(!dst.is(src) && !dst.is(scratch)); |
| 365 if (width == 0) return; | 165 if (width == 0) return; |
| 366 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 166 if (width == 32) { |
| 367 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 167 mov(dst, src); |
| 368 bic(dst, dst, Operand(mask)); | 168 return; |
| 369 and_(scratch, src, Operand((1 << width) - 1)); | |
| 370 mov(scratch, Operand(scratch, LSL, lsb)); | |
| 371 orr(dst, dst, scratch); | |
| 372 } else { | |
| 373 bfi(dst, src, lsb, width, cond); | |
| 374 } | 169 } |
| 170 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 171 bic(dst, dst, Operand(mask)); |
| 172 land(scratch, src, Operand((1 << width) - 1)); |
| 173 lsl(scratch, scratch, Operand(lsb)); |
| 174 orr(dst, dst, scratch); |
| 375 } | 175 } |
| 376 | 176 |
| 377 | 177 |
| 378 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, | 178 void MacroAssembler::Bfc(Register dst, int lsb, int width) { |
| 379 Condition cond) { | 179 ASSERT(!dst.is(sh4_rtmp)); |
| 380 ASSERT(lsb < 32); | 180 ASSERT(lsb >= 0 && lsb < 32); |
| 381 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 181 ASSERT(width > 0 && width <= 32); |
| 382 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 182 ASSERT(width + lsb <= 32); |
| 383 bic(dst, src, Operand(mask)); | 183 // Clear bits [lsb..lsb+width-1] of dst |
| 384 } else { | 184 int32_t mask1 = width < 32 ? (1<<width)-1 : -1; |
| 385 Move(dst, src, cond); | 185 int32_t mask = mask1 << lsb; |
| 386 bfc(dst, lsb, width, cond); | 186 RECORD_LINE(); |
| 387 } | 187 land(dst, dst, Operand(~mask)); |
| 388 } | 188 } |
| 389 | 189 |
| 390 | 190 |
| 391 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, | 191 void MacroAssembler::Usat(Register dst, int satpos, Register src) { |
| 392 Condition cond) { | 192 ASSERT((satpos > 0) && (satpos <= 31)); |
| 393 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
| 394 ASSERT(!dst.is(pc) && !src.rm().is(pc)); | |
| 395 ASSERT((satpos >= 0) && (satpos <= 31)); | |
| 396 | 193 |
| 397 // These asserts are required to ensure compatibility with the ARMv7 | |
| 398 // implementation. | |
| 399 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); | |
| 400 ASSERT(src.rs().is(no_reg)); | |
| 401 | |
| 402 Label done; | |
| 403 int satval = (1 << satpos) - 1; | 194 int satval = (1 << satpos) - 1; |
| 404 | 195 |
| 405 if (cond != al) { | 196 if (!src.is(dst)) { |
| 406 b(NegateCondition(cond), &done); // Skip saturate if !condition. | |
| 407 } | |
| 408 if (!(src.is_reg() && dst.is(src.rm()))) { | |
| 409 mov(dst, src); | 197 mov(dst, src); |
| 410 } | 198 } |
| 411 tst(dst, Operand(~satval)); | 199 cmpge(dst, Operand(0)); |
| 412 b(eq, &done); | 200 mov(dst, Operand(0), f); // 0 if negative. |
| 413 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative. | 201 cmpgt(dst, Operand(satval)); |
| 414 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. | 202 mov(dst, Operand(satval), t); // satval if > satval |
| 415 bind(&done); | |
| 416 } else { | |
| 417 usat(dst, satpos, src, cond); | |
| 418 } | |
| 419 } | 203 } |
| 420 | 204 |
| 421 | 205 |
| 422 void MacroAssembler::LoadRoot(Register destination, | 206 void MacroAssembler::LoadRoot(Register destination, |
| 423 Heap::RootListIndex index, | 207 Heap::RootListIndex index) { |
| 424 Condition cond) { | 208 RECORD_LINE(); |
| 425 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 209 ldr(destination, MemOperand(roots, index << kPointerSizeLog2)); |
| 426 } | 210 } |
| 427 | 211 |
| 428 | 212 |
| 429 void MacroAssembler::StoreRoot(Register source, | 213 void MacroAssembler::StoreRoot(Register source, |
| 430 Heap::RootListIndex index, | 214 Heap::RootListIndex index) { |
| 431 Condition cond) { | 215 RECORD_LINE(); |
| 432 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 216 str(source, MemOperand(roots, index << kPointerSizeLog2)); |
| 433 } | 217 } |
| 434 | 218 |
| 435 | 219 |
| 436 void MacroAssembler::LoadHeapObject(Register result, | 220 void MacroAssembler::LoadHeapObject(Register result, |
| 437 Handle<HeapObject> object) { | 221 Handle<HeapObject> object) { |
| 438 if (isolate()->heap()->InNewSpace(*object)) { | 222 if (isolate()->heap()->InNewSpace(*object)) { |
| 439 Handle<JSGlobalPropertyCell> cell = | 223 Handle<JSGlobalPropertyCell> cell = |
| 440 isolate()->factory()->NewJSGlobalPropertyCell(object); | 224 isolate()->factory()->NewJSGlobalPropertyCell(object); |
| 441 mov(result, Operand(cell)); | 225 mov(result, Operand(cell)); |
| 442 ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); | 226 ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); |
| 443 } else { | 227 } else { |
| 444 mov(result, Operand(object)); | 228 mov(result, Operand(object)); |
| 445 } | 229 } |
| 446 } | 230 } |
| 447 | 231 |
| 448 | 232 |
| 449 void MacroAssembler::InNewSpace(Register object, | 233 void MacroAssembler::InNewSpace(Register object, |
| 450 Register scratch, | 234 Register scratch, |
| 451 Condition cond, | 235 Condition cond, |
| 452 Label* branch) { | 236 Label* branch) { |
| 237 ASSERT(!object.is(sh4_ip) && !scratch.is(sh4_ip)); |
| 238 ASSERT(!object.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 453 ASSERT(cond == eq || cond == ne); | 239 ASSERT(cond == eq || cond == ne); |
| 454 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); | 240 RECORD_LINE(); |
| 455 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); | 241 land(scratch, object, |
| 242 Operand(ExternalReference::new_space_mask(isolate()))); |
| 243 mov(sh4_ip, Operand(ExternalReference::new_space_start(isolate()))); |
| 244 cmpeq(scratch, sh4_ip); |
| 456 b(cond, branch); | 245 b(cond, branch); |
| 457 } | 246 } |
| 458 | 247 |
| 459 | 248 |
| 460 void MacroAssembler::RecordWriteField( | 249 void MacroAssembler::RecordWriteField( |
| 461 Register object, | 250 Register object, |
| 462 int offset, | 251 int offset, |
| 463 Register value, | 252 Register value, |
| 464 Register dst, | 253 Register dst, |
| 465 LinkRegisterStatus lr_status, | 254 LinkRegisterStatus lr_status, |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 513 void MacroAssembler::RecordWrite(Register object, | 302 void MacroAssembler::RecordWrite(Register object, |
| 514 Register address, | 303 Register address, |
| 515 Register value, | 304 Register value, |
| 516 LinkRegisterStatus lr_status, | 305 LinkRegisterStatus lr_status, |
| 517 SaveFPRegsMode fp_mode, | 306 SaveFPRegsMode fp_mode, |
| 518 RememberedSetAction remembered_set_action, | 307 RememberedSetAction remembered_set_action, |
| 519 SmiCheck smi_check) { | 308 SmiCheck smi_check) { |
| 520 // The compiled code assumes that record write doesn't change the | 309 // The compiled code assumes that record write doesn't change the |
| 521 // context register, so we check that none of the clobbered | 310 // context register, so we check that none of the clobbered |
| 522 // registers are cp. | 311 // registers are cp. |
| 523 ASSERT(!address.is(cp) && !value.is(cp)); | 312 ASSERT(!object.is(cp) && !address.is(cp) && !value.is(cp)); |
| 313 ASSERT(!object.is(sh4_rtmp) && !address.is(sh4_rtmp) && !value.is(sh4_rtmp)); |
| 314 ASSERT(!object.is(sh4_ip) && !address.is(sh4_ip) && !value.is(sh4_ip)); |
| 524 | 315 |
| 525 if (emit_debug_code()) { | 316 if (emit_debug_code()) { |
| 526 ldr(ip, MemOperand(address)); | 317 ldr(ip, MemOperand(address)); |
| 527 cmp(ip, value); | 318 cmp(ip, value); |
| 528 Check(eq, "Wrong address or value passed to RecordWrite"); | 319 Check(eq, "Wrong address or value passed to RecordWrite"); |
| 529 } | 320 } |
| 530 | 321 |
| 531 Label done; | 322 Label done; |
| 532 | 323 |
| 533 if (smi_check == INLINE_SMI_CHECK) { | 324 if (smi_check == INLINE_SMI_CHECK) { |
| 534 ASSERT_EQ(0, kSmiTag); | 325 ASSERT_EQ(0, kSmiTag); |
| 535 tst(value, Operand(kSmiTagMask)); | 326 tst(value, Operand(kSmiTagMask)); |
| 536 b(eq, &done); | 327 b(eq, &done); |
| 537 } | 328 } |
| 538 | 329 |
| 539 CheckPageFlag(value, | 330 CheckPageFlag(value, |
| 540 value, // Used as scratch. | 331 value, // Used as scratch. |
| 541 MemoryChunk::kPointersToHereAreInterestingMask, | 332 MemoryChunk::kPointersToHereAreInterestingMask, |
| 542 eq, | 333 eq, |
| 543 &done); | 334 &done); |
| 544 CheckPageFlag(object, | 335 CheckPageFlag(object, |
| 545 value, // Used as scratch. | 336 value, // Used as scratch. |
| 546 MemoryChunk::kPointersFromHereAreInterestingMask, | 337 MemoryChunk::kPointersFromHereAreInterestingMask, |
| 547 eq, | 338 eq, |
| 548 &done); | 339 &done); |
| 549 | 340 |
| 550 // Record the actual write. | 341 // Record the actual write. |
| 551 if (lr_status == kLRHasNotBeenSaved) { | 342 if (lr_status == kLRHasNotBeenSaved) { |
| 552 push(lr); | 343 push(pr); |
| 553 } | 344 } |
| 554 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); | 345 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); |
| 555 CallStub(&stub); | 346 CallStub(&stub); |
| 556 if (lr_status == kLRHasNotBeenSaved) { | 347 if (lr_status == kLRHasNotBeenSaved) { |
| 557 pop(lr); | 348 pop(pr); |
| 558 } | 349 } |
| 559 | 350 |
| 560 bind(&done); | 351 bind(&done); |
| 561 | 352 |
| 562 // Clobber clobbered registers when running with the debug-code flag | 353 // Clobber clobbered registers when running with the debug-code flag |
| 563 // turned on to provoke errors. | 354 // turned on to provoke errors. |
| 564 if (emit_debug_code()) { | 355 if (emit_debug_code()) { |
| 565 mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); | 356 mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); |
| 566 mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); | 357 mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); |
| 567 } | 358 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 591 str(scratch, MemOperand(ip)); | 382 str(scratch, MemOperand(ip)); |
| 592 // Call stub on end of buffer. | 383 // Call stub on end of buffer. |
| 593 // Check for end of buffer. | 384 // Check for end of buffer. |
| 594 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); | 385 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
| 595 if (and_then == kFallThroughAtEnd) { | 386 if (and_then == kFallThroughAtEnd) { |
| 596 b(eq, &done); | 387 b(eq, &done); |
| 597 } else { | 388 } else { |
| 598 ASSERT(and_then == kReturnAtEnd); | 389 ASSERT(and_then == kReturnAtEnd); |
| 599 Ret(eq); | 390 Ret(eq); |
| 600 } | 391 } |
| 601 push(lr); | 392 push(pr); |
| 602 StoreBufferOverflowStub store_buffer_overflow = | 393 StoreBufferOverflowStub store_buffer_overflow = |
| 603 StoreBufferOverflowStub(fp_mode); | 394 StoreBufferOverflowStub(fp_mode); |
| 604 CallStub(&store_buffer_overflow); | 395 CallStub(&store_buffer_overflow); |
| 605 pop(lr); | 396 pop(pr); |
| 606 bind(&done); | 397 bind(&done); |
| 607 if (and_then == kReturnAtEnd) { | 398 if (and_then == kReturnAtEnd) { |
| 608 Ret(); | 399 Ret(); |
| 609 } | 400 } |
| 610 } | 401 } |
| 611 | 402 |
| 612 | 403 |
| 613 // Push and pop all registers that can hold pointers. | |
| 614 void MacroAssembler::PushSafepointRegisters() { | |
| 615 // Safepoints expect a block of contiguous register values starting with r0: | |
| 616 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); | |
| 617 // Safepoints expect a block of kNumSafepointRegisters values on the | |
| 618 // stack, so adjust the stack for unsaved registers. | |
| 619 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
| 620 ASSERT(num_unsaved >= 0); | |
| 621 sub(sp, sp, Operand(num_unsaved * kPointerSize)); | |
| 622 stm(db_w, sp, kSafepointSavedRegisters); | |
| 623 } | |
| 624 | |
| 625 | |
| 626 void MacroAssembler::PopSafepointRegisters() { | |
| 627 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
| 628 ldm(ia_w, sp, kSafepointSavedRegisters); | |
| 629 add(sp, sp, Operand(num_unsaved * kPointerSize)); | |
| 630 } | |
| 631 | |
| 632 | |
| 633 void MacroAssembler::PushSafepointRegistersAndDoubles() { | |
| 634 PushSafepointRegisters(); | |
| 635 sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * | |
| 636 kDoubleSize)); | |
| 637 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { | |
| 638 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); | |
| 639 } | |
| 640 } | |
| 641 | |
| 642 | |
| 643 void MacroAssembler::PopSafepointRegistersAndDoubles() { | |
| 644 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { | |
| 645 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); | |
| 646 } | |
| 647 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * | |
| 648 kDoubleSize)); | |
| 649 PopSafepointRegisters(); | |
| 650 } | |
| 651 | |
| 652 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, | |
| 653 Register dst) { | |
| 654 str(src, SafepointRegistersAndDoublesSlot(dst)); | |
| 655 } | |
| 656 | |
| 657 | |
| 658 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | |
| 659 str(src, SafepointRegisterSlot(dst)); | |
| 660 } | |
| 661 | |
| 662 | |
| 663 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 404 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
| 664 ldr(dst, SafepointRegisterSlot(src)); | 405 ldr(dst, SafepointRegisterSlot(src)); |
| 665 } | 406 } |
| 666 | 407 |
| 667 | 408 |
| 668 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 409 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| 669 // The registers are pushed starting with the highest encoding, | 410 UNIMPLEMENTED(); |
| 670 // which means that lowest encodings are closest to the stack pointer. | 411 return 0; |
| 671 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | |
| 672 return reg_code; | |
| 673 } | 412 } |
| 674 | 413 |
| 675 | 414 |
| 676 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 415 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
| 677 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 416 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
| 678 } | 417 } |
| 679 | 418 |
| 680 | 419 |
| 681 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | |
| 682 // General purpose registers are pushed last on the stack. | |
| 683 int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize; | |
| 684 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | |
| 685 return MemOperand(sp, doubles_size + register_offset); | |
| 686 } | |
| 687 | |
| 688 | |
| 689 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 420 void MacroAssembler::Ldrd(Register dst1, Register dst2, |
| 690 const MemOperand& src, Condition cond) { | 421 const MemOperand& src) { |
| 691 ASSERT(src.rm().is(no_reg)); | 422 ASSERT(src.rn().is(no_reg)); |
| 692 ASSERT(!dst1.is(lr)); // r14. | |
| 693 ASSERT_EQ(0, dst1.code() % 2); | 423 ASSERT_EQ(0, dst1.code() % 2); |
| 694 ASSERT_EQ(dst1.code() + 1, dst2.code()); | 424 ASSERT_EQ(dst1.code() + 1, dst2.code()); |
| 695 | 425 ASSERT(!dst1.is(sh4_ip) && !dst2.is(sh4_ip)); |
| 696 // V8 does not use this addressing mode, so the fallback code | 426 ASSERT(!dst1.is(sh4_rtmp) && !dst2.is(sh4_rtmp)); |
| 697 // below doesn't support it yet. | |
| 698 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); | |
| 699 | 427 |
| 700 // Generate two ldr instructions if ldrd is not available. | 428 // Generate two ldr instructions if ldrd is not available. |
| 701 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 429 // TODO(STM): FPU |
| 702 CpuFeatures::Scope scope(ARMv7); | 430 { |
| 703 ldrd(dst1, dst2, src, cond); | 431 MemOperand src2(src); |
| 704 } else { | 432 src2.set_offset(src2.offset() + 4); |
| 705 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 433 if (dst1.is(src.rm())) { |
| 706 MemOperand src2(src); | 434 ldr(dst2, src2); |
| 707 src2.set_offset(src2.offset() + 4); | 435 ldr(dst1, src); |
| 708 if (dst1.is(src.rn())) { | 436 } else { |
| 709 ldr(dst2, src2, cond); | 437 ldr(dst1, src); |
| 710 ldr(dst1, src, cond); | 438 ldr(dst2, src2); |
| 711 } else { | |
| 712 ldr(dst1, src, cond); | |
| 713 ldr(dst2, src2, cond); | |
| 714 } | |
| 715 } else { // PostIndex or NegPostIndex. | |
| 716 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); | |
| 717 if (dst1.is(src.rn())) { | |
| 718 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); | |
| 719 ldr(dst1, src, cond); | |
| 720 } else { | |
| 721 MemOperand src2(src); | |
| 722 src2.set_offset(src2.offset() - 4); | |
| 723 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); | |
| 724 ldr(dst2, src2, cond); | |
| 725 } | |
| 726 } | 439 } |
| 727 } | 440 } |
| 728 } | 441 } |
| 729 | 442 |
| 730 | 443 |
| 731 void MacroAssembler::Strd(Register src1, Register src2, | 444 void MacroAssembler::Strd(Register src1, Register src2, |
| 732 const MemOperand& dst, Condition cond) { | 445 const MemOperand& dst) { |
| 733 ASSERT(dst.rm().is(no_reg)); | 446 ASSERT(dst.rn().is(no_reg)); |
| 734 ASSERT(!src1.is(lr)); // r14. | |
| 735 ASSERT_EQ(0, src1.code() % 2); | 447 ASSERT_EQ(0, src1.code() % 2); |
| 736 ASSERT_EQ(src1.code() + 1, src2.code()); | 448 ASSERT_EQ(src1.code() + 1, src2.code()); |
| 737 | 449 ASSERT(!src1.is(sh4_ip) && !src2.is(sh4_ip)); |
| 738 // V8 does not use this addressing mode, so the fallback code | 450 ASSERT(!src1.is(sh4_rtmp) && !src2.is(sh4_rtmp)); |
| 739 // below doesn't support it yet. | |
| 740 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | |
| 741 | 451 |
| 742 // Generate two str instructions if strd is not available. | 452 // Generate two str instructions if strd is not available. |
| 743 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 453 // TODO(STM): FPU |
| 744 CpuFeatures::Scope scope(ARMv7); | 454 { |
| 745 strd(src1, src2, dst, cond); | |
| 746 } else { | |
| 747 MemOperand dst2(dst); | 455 MemOperand dst2(dst); |
| 748 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 456 dst2.set_offset(dst2.offset() + 4); |
| 749 dst2.set_offset(dst2.offset() + 4); | 457 str(src1, dst); |
| 750 str(src1, dst, cond); | 458 str(src2, dst2); |
| 751 str(src2, dst2, cond); | |
| 752 } else { // PostIndex or NegPostIndex. | |
| 753 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | |
| 754 dst2.set_offset(dst2.offset() - 4); | |
| 755 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | |
| 756 str(src2, dst2, cond); | |
| 757 } | |
| 758 } | |
| 759 } | |
| 760 | |
| 761 | |
| 762 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear, | |
| 763 const Register scratch, | |
| 764 const Condition cond) { | |
| 765 vmrs(scratch, cond); | |
| 766 bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond); | |
| 767 vmsr(scratch, cond); | |
| 768 } | |
| 769 | |
| 770 | |
| 771 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
| 772 const DwVfpRegister src2, | |
| 773 const Condition cond) { | |
| 774 // Compare and move FPSCR flags to the normal condition flags. | |
| 775 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
| 776 } | |
| 777 | |
| 778 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
| 779 const double src2, | |
| 780 const Condition cond) { | |
| 781 // Compare and move FPSCR flags to the normal condition flags. | |
| 782 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
| 783 } | |
| 784 | |
| 785 | |
| 786 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
| 787 const DwVfpRegister src2, | |
| 788 const Register fpscr_flags, | |
| 789 const Condition cond) { | |
| 790 // Compare and load FPSCR. | |
| 791 vcmp(src1, src2, cond); | |
| 792 vmrs(fpscr_flags, cond); | |
| 793 } | |
| 794 | |
| 795 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
| 796 const double src2, | |
| 797 const Register fpscr_flags, | |
| 798 const Condition cond) { | |
| 799 // Compare and load FPSCR. | |
| 800 vcmp(src1, src2, cond); | |
| 801 vmrs(fpscr_flags, cond); | |
| 802 } | |
| 803 | |
| 804 void MacroAssembler::Vmov(const DwVfpRegister dst, | |
| 805 const double imm, | |
| 806 const Register scratch, | |
| 807 const Condition cond) { | |
| 808 ASSERT(CpuFeatures::IsEnabled(VFP2)); | |
| 809 static const DoubleRepresentation minus_zero(-0.0); | |
| 810 static const DoubleRepresentation zero(0.0); | |
| 811 DoubleRepresentation value(imm); | |
| 812 // Handle special values first. | |
| 813 if (value.bits == zero.bits) { | |
| 814 vmov(dst, kDoubleRegZero, cond); | |
| 815 } else if (value.bits == minus_zero.bits) { | |
| 816 vneg(dst, kDoubleRegZero, cond); | |
| 817 } else { | |
| 818 vmov(dst, imm, scratch, cond); | |
| 819 } | 459 } |
| 820 } | 460 } |
| 821 | 461 |
| 822 | 462 |
| 823 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 463 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 824 // r0-r3: preserved | 464 // r0-r3: must be preserved |
| 825 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); | 465 RECORD_LINE(); |
| 826 mov(ip, Operand(Smi::FromInt(type))); | 466 Push(pr, fp, cp); |
| 827 push(ip); | 467 mov(sh4_ip, Operand(Smi::FromInt(type))); |
| 828 mov(ip, Operand(CodeObject())); | 468 push(sh4_ip); |
| 829 push(ip); | 469 mov(sh4_ip, Operand(CodeObject())); |
| 470 push(sh4_ip); |
| 830 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP. | 471 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP. |
| 831 } | 472 } |
| 832 | 473 |
| 833 | 474 |
| 834 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 475 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 835 // r0: preserved | 476 // r0: preserved |
| 836 // r1: preserved | 477 // r1: preserved |
| 837 // r2: preserved | 478 // r2: preserved |
| 838 | 479 |
| 839 // Drop the execution stack down to the frame pointer and restore | 480 // Drop the execution stack down to the frame pointer and restore |
| 840 // the caller frame pointer and return address. | 481 // the caller frame pointer and return address. |
| 482 RECORD_LINE(); |
| 841 mov(sp, fp); | 483 mov(sp, fp); |
| 842 ldm(ia_w, sp, fp.bit() | lr.bit()); | 484 Pop(pr, fp); |
| 843 } | 485 } |
| 844 | 486 |
| 845 | 487 |
| 846 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { | 488 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, |
| 847 // Set up the frame structure on the stack. | 489 Register scratch) { |
| 490 // Parameters are on stack as if calling JS function |
| 491 // ARM -> ST40 mapping: ip -> scratch (defaults sh4_ip) |
| 492 |
| 493 // r0, r1, cp: must be preserved |
| 494 // sp, fp: input/output |
| 495 // Actual clobbers: scratch (r2 by default) |
| 496 |
| 497 // Setup the frame structure on the stack |
| 848 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); | 498 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); |
| 849 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); | 499 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); |
| 850 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); | 500 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); |
| 851 Push(lr, fp); | 501 |
| 852 mov(fp, Operand(sp)); // Set up new frame pointer. | 502 RECORD_LINE(); |
| 853 // Reserve room for saved entry sp and code object. | 503 // Save PR and FP |
| 854 sub(sp, sp, Operand(2 * kPointerSize)); | 504 Push(pr, fp); |
| 505 // Setup a new frame pointer |
| 506 mov(fp, sp); |
| 507 |
| 508 // Reserve room for saved entry sp and code object |
| 509 sub(sp, sp, Operand(2*kPointerSize)); |
| 855 if (emit_debug_code()) { | 510 if (emit_debug_code()) { |
| 856 mov(ip, Operand(0)); | 511 mov(scratch, Operand(0)); |
| 857 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 512 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 858 } | 513 } |
| 859 mov(ip, Operand(CodeObject())); | 514 |
| 860 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 515 mov(scratch, Operand(CodeObject())); |
| 516 str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
| 861 | 517 |
| 862 // Save the frame pointer and the context in top. | 518 // Save the frame pointer and the context in top. |
| 863 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 519 mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
| 864 str(fp, MemOperand(ip)); | 520 isolate()))); |
| 865 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 521 str(fp, MemOperand(scratch)); |
| 866 str(cp, MemOperand(ip)); | 522 mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
| 523 str(cp, MemOperand(scratch)); |
| 867 | 524 |
| 868 // Optionally save all double registers. | 525 // Optionally save all double registers. |
| 869 if (save_doubles) { | 526 if (save_doubles) { |
| 870 DwVfpRegister first = d0; | 527 RECORD_LINE(); |
| 871 DwVfpRegister last = | 528 UNIMPLEMENTED_BREAK(); |
| 872 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); | |
| 873 vstm(db_w, sp, first, last); | |
| 874 // Note that d0 will be accessible at | |
| 875 // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize, | |
| 876 // since the sp slot and code slot were pushed after the fp. | |
| 877 } | 529 } |
| 878 | 530 |
| 879 // Reserve place for the return address and stack space and align the frame | 531 // Reserve place for the return address and stack space and align the frame |
| 880 // preparing for calling the runtime function. | 532 // preparing for calling the runtime function. |
| 881 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 533 const int frame_alignment = OS::ActivationFrameAlignment(); |
| 882 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); | 534 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); |
| 883 if (frame_alignment > 0) { | 535 if (frame_alignment > 0) { |
| 884 ASSERT(IsPowerOf2(frame_alignment)); | 536 ASSERT(IsPowerOf2(frame_alignment)); |
| 885 and_(sp, sp, Operand(-frame_alignment)); | 537 land(sp, sp, Operand(-frame_alignment)); |
| 886 } | 538 } |
| 887 | 539 |
| 888 // Set the exit frame sp value to point just before the return address | 540 // Set the exit frame sp value to point just before the return address |
| 889 // location. | 541 // location. |
| 890 add(ip, sp, Operand(kPointerSize)); | 542 add(scratch, sp, Operand(kPointerSize)); |
| 891 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 543 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 544 } |
| 545 |
| 546 |
| 547 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
| 548 Register argument_count) { |
| 549 ASSERT(!argument_count.is(sh4_ip)); |
| 550 ASSERT(!argument_count.is(sh4_rtmp)); |
| 551 // input: argument_count |
| 552 // r0, r1: results must be preserved |
| 553 // sp: stack pointer |
| 554 // fp: frame pointer |
| 555 |
| 556 // Actual clobbers: r3 and ip |
| 557 // r4 should be preserved: see the end of RegExpExecStub::Generate |
| 558 |
| 559 RECORD_LINE(); |
| 560 if (save_doubles) { |
| 561 RECORD_LINE(); |
| 562 UNIMPLEMENTED_BREAK(); |
| 563 } |
| 564 |
| 565 // Clear top frame. |
| 566 mov(r3, Operand(0, RelocInfo::NONE)); |
| 567 mov(sh4_ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
| 568 str(r3, MemOperand(sh4_ip)); |
| 569 |
| 570 // Restore current context from top and clear it in debug mode. |
| 571 mov(sh4_ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
| 572 ldr(cp, MemOperand(sh4_ip)); |
| 573 #ifdef DEBUG |
| 574 str(r3, MemOperand(sh4_ip)); |
| 575 #endif |
| 576 |
| 577 // Tear down the exit frame, pop the arguments, and return. |
| 578 mov(sp, fp); |
| 579 |
| 580 Pop(pr, fp); |
| 581 if (argument_count.is_valid()) { |
| 582 ASSERT(!argument_count.is(r3)); |
| 583 lsl(r3, argument_count, Operand(kPointerSizeLog2)); |
| 584 add(sp, sp, r3); |
| 585 } |
| 892 } | 586 } |
| 893 | 587 |
| 894 | 588 |
| 895 void MacroAssembler::InitializeNewString(Register string, | 589 void MacroAssembler::InitializeNewString(Register string, |
| 896 Register length, | 590 Register length, |
| 897 Heap::RootListIndex map_index, | 591 Heap::RootListIndex map_index, |
| 898 Register scratch1, | 592 Register scratch1, |
| 899 Register scratch2) { | 593 Register scratch2) { |
| 900 mov(scratch1, Operand(length, LSL, kSmiTagSize)); | 594 RECORD_LINE(); |
| 595 lsl(scratch1, length, Operand(kSmiTagSize)); |
| 901 LoadRoot(scratch2, map_index); | 596 LoadRoot(scratch2, map_index); |
| 902 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | 597 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
| 903 mov(scratch1, Operand(String::kEmptyHashField)); | 598 mov(scratch1, Operand(String::kEmptyHashField)); |
| 904 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | 599 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 905 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); | 600 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); |
| 906 } | 601 } |
| 907 | 602 |
| 908 | 603 |
| 909 int MacroAssembler::ActivationFrameAlignment() { | |
| 910 #if defined(V8_HOST_ARCH_ARM) | |
| 911 // Running on the real platform. Use the alignment as mandated by the local | |
| 912 // environment. | |
| 913 // Note: This will break if we ever start generating snapshots on one ARM | |
| 914 // platform for another ARM platform with a different alignment. | |
| 915 return OS::ActivationFrameAlignment(); | |
| 916 #else // defined(V8_HOST_ARCH_ARM) | |
| 917 // If we are using the simulator then we should always align to the expected | |
| 918 // alignment. As the simulator is used to generate snapshots we do not know | |
| 919 // if the target platform will need alignment, so this is controlled from a | |
| 920 // flag. | |
| 921 return FLAG_sim_stack_alignment; | |
| 922 #endif // defined(V8_HOST_ARCH_ARM) | |
| 923 } | |
| 924 | |
| 925 | |
| 926 void MacroAssembler::LeaveExitFrame(bool save_doubles, | |
| 927 Register argument_count) { | |
| 928 // Optionally restore all double registers. | |
| 929 if (save_doubles) { | |
| 930 // Calculate the stack location of the saved doubles and restore them. | |
| 931 const int offset = 2 * kPointerSize; | |
| 932 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); | |
| 933 DwVfpRegister first = d0; | |
| 934 DwVfpRegister last = | |
| 935 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); | |
| 936 vldm(ia, r3, first, last); | |
| 937 } | |
| 938 | |
| 939 // Clear top frame. | |
| 940 mov(r3, Operand(0, RelocInfo::NONE)); | |
| 941 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | |
| 942 str(r3, MemOperand(ip)); | |
| 943 | |
| 944 // Restore current context from top and clear it in debug mode. | |
| 945 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | |
| 946 ldr(cp, MemOperand(ip)); | |
| 947 #ifdef DEBUG | |
| 948 str(r3, MemOperand(ip)); | |
| 949 #endif | |
| 950 | |
| 951 // Tear down the exit frame, pop the arguments, and return. | |
| 952 mov(sp, Operand(fp)); | |
| 953 ldm(ia_w, sp, fp.bit() | lr.bit()); | |
| 954 if (argument_count.is_valid()) { | |
| 955 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); | |
| 956 } | |
| 957 } | |
| 958 | |
| 959 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { | |
| 960 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 961 if (use_eabi_hardfloat()) { | |
| 962 Move(dst, d0); | |
| 963 } else { | |
| 964 vmov(dst, r0, r1); | |
| 965 } | |
| 966 } | |
| 967 | |
| 968 | |
| 969 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { | 604 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { |
| 970 // This macro takes the dst register to make the code more readable | 605 // This macro takes the dst register to make the code more readable |
| 971 // at the call sites. However, the dst register has to be r5 to | 606 // at the call sites. However, the dst register has to be r5 to |
| 972 // follow the calling convention which requires the call type to be | 607 // follow the calling convention which requires the call type to be |
| 973 // in r5. | 608 // in r5. |
| 974 ASSERT(dst.is(r5)); | 609 ASSERT(dst.is(r5)); |
| 975 if (call_kind == CALL_AS_FUNCTION) { | 610 if (call_kind == CALL_AS_FUNCTION) { |
| 976 mov(dst, Operand(Smi::FromInt(1))); | 611 mov(dst, Operand(Smi::FromInt(1))); |
| 977 } else { | 612 } else { |
| 978 mov(dst, Operand(Smi::FromInt(0))); | 613 mov(dst, Operand(Smi::FromInt(0))); |
| 979 } | 614 } |
| 980 } | 615 } |
| 981 | 616 |
| 982 | 617 |
| 983 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 618 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 984 const ParameterCount& actual, | 619 const ParameterCount& actual, |
| 985 Handle<Code> code_constant, | 620 Handle<Code> code_constant, |
| 986 Register code_reg, | 621 Register code_reg, |
| 987 Label* done, | 622 Label* done, |
| 988 bool* definitely_mismatches, | 623 bool* definitely_mismatches, |
| 989 InvokeFlag flag, | 624 InvokeFlag flag, |
| 990 const CallWrapper& call_wrapper, | 625 const CallWrapper& call_wrapper, |
| 991 CallKind call_kind) { | 626 CallKind call_kind) { |
| 627 ASSERT(!code_reg.is(sh4_ip)); |
| 628 ASSERT(!code_reg.is(sh4_rtmp)); |
| 992 bool definitely_matches = false; | 629 bool definitely_matches = false; |
| 993 *definitely_mismatches = false; | 630 *definitely_mismatches = false; |
| 994 Label regular_invoke; | 631 Label regular_invoke; |
| 995 | 632 |
| 996 // Check whether the expected and actual arguments count match. If not, | 633 // Check whether the expected and actual arguments count match. If not, |
| 997 // setup registers according to contract with ArgumentsAdaptorTrampoline: | 634 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
| 635 // ARM -> SH4 |
| 998 // r0: actual arguments count | 636 // r0: actual arguments count |
| 999 // r1: function (passed through to callee) | 637 // r1: function (passed through to callee) |
| 1000 // r2: expected arguments count | 638 // r2: expected arguments count |
| 1001 // r3: callee code entry | 639 // r3: callee code entry |
| 1002 | 640 |
| 1003 // The code below is made a lot easier because the calling code already sets | 641 // The code below is made a lot easier because the calling code already sets |
| 1004 // up actual and expected registers according to the contract if values are | 642 // up actual and expected registers according to the contract if values are |
| 1005 // passed in registers. | 643 // passed in registers. |
| 1006 ASSERT(actual.is_immediate() || actual.reg().is(r0)); | 644 ASSERT(actual.is_immediate() || actual.reg().is(r0)); |
| 1007 ASSERT(expected.is_immediate() || expected.reg().is(r2)); | 645 ASSERT(expected.is_immediate() || expected.reg().is(r2)); |
| 1008 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); | 646 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); |
| 1009 | 647 |
| 648 RECORD_LINE(); |
| 1010 if (expected.is_immediate()) { | 649 if (expected.is_immediate()) { |
| 1011 ASSERT(actual.is_immediate()); | 650 ASSERT(actual.is_immediate()); |
| 1012 if (expected.immediate() == actual.immediate()) { | 651 if (expected.immediate() == actual.immediate()) { |
| 1013 definitely_matches = true; | 652 definitely_matches = true; |
| 1014 } else { | 653 } else { |
| 1015 mov(r0, Operand(actual.immediate())); | 654 mov(r0, Operand(actual.immediate())); |
| 1016 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 655 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
| 1017 if (expected.immediate() == sentinel) { | 656 if (expected.immediate() == sentinel) { |
| 1018 // Don't worry about adapting arguments for builtins that | 657 // Don't worry about adapting arguments for builtins that |
| 1019 // don't want that done. Skip adaption code by making it look | 658 // don't want that done. Skip adaption code by making it look |
| 1020 // like we have a match between expected and actual number of | 659 // like we have a match between expected and actual number of |
| 1021 // arguments. | 660 // arguments. |
| 1022 definitely_matches = true; | 661 definitely_matches = true; |
| 1023 } else { | 662 } else { |
| 1024 *definitely_mismatches = true; | 663 *definitely_mismatches = true; |
| 1025 mov(r2, Operand(expected.immediate())); | 664 mov(r2, Operand(expected.immediate())); |
| 1026 } | 665 } |
| 1027 } | 666 } |
| 1028 } else { | 667 } else { |
| 1029 if (actual.is_immediate()) { | 668 if (actual.is_immediate()) { |
| 1030 cmp(expected.reg(), Operand(actual.immediate())); | 669 cmpeq(expected.reg(), Operand((actual.immediate()))); |
| 1031 b(eq, ®ular_invoke); | 670 bt(®ular_invoke); |
| 1032 mov(r0, Operand(actual.immediate())); | 671 mov(r0, Operand(actual.immediate())); |
| 1033 } else { | 672 } else { |
| 1034 cmp(expected.reg(), Operand(actual.reg())); | 673 cmpeq(expected.reg(), actual.reg()); |
| 1035 b(eq, ®ular_invoke); | 674 bt(®ular_invoke); |
| 1036 } | 675 } |
| 1037 } | 676 } |
| 1038 | 677 |
| 678 RECORD_LINE(); |
| 1039 if (!definitely_matches) { | 679 if (!definitely_matches) { |
| 1040 if (!code_constant.is_null()) { | 680 if (!code_constant.is_null()) { |
| 1041 mov(r3, Operand(code_constant)); | 681 mov(r3, Operand(code_constant)); |
| 1042 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 682 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1043 } | 683 } |
| 1044 | 684 |
| 1045 Handle<Code> adaptor = | 685 Handle<Code> adaptor = |
| 1046 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 686 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 1047 if (flag == CALL_FUNCTION) { | 687 if (flag == CALL_FUNCTION) { |
| 1048 call_wrapper.BeforeCall(CallSize(adaptor)); | 688 call_wrapper.BeforeCall(2 * kInstrSize); |
| 1049 SetCallKind(r5, call_kind); | 689 SetCallKind(r5, call_kind); |
| 1050 Call(adaptor); | 690 Call(adaptor); |
| 1051 call_wrapper.AfterCall(); | 691 call_wrapper.AfterCall(); |
| 1052 if (!*definitely_mismatches) { | 692 if (!*definitely_mismatches) { |
| 1053 b(done); | 693 b(done); |
| 1054 } | 694 } |
| 1055 } else { | 695 } else { |
| 1056 SetCallKind(r5, call_kind); | 696 SetCallKind(r5, call_kind); |
| 1057 Jump(adaptor, RelocInfo::CODE_TARGET); | 697 Jump(adaptor, RelocInfo::CODE_TARGET); |
| 1058 } | 698 } |
| 1059 bind(®ular_invoke); | 699 bind(®ular_invoke); |
| 1060 } | 700 } |
| 1061 } | 701 } |
| 1062 | 702 |
| 1063 | 703 |
| 1064 void MacroAssembler::InvokeCode(Register code, | 704 void MacroAssembler::InvokeCode(Register code, |
| 1065 const ParameterCount& expected, | 705 const ParameterCount& expected, |
| 1066 const ParameterCount& actual, | 706 const ParameterCount& actual, |
| 1067 InvokeFlag flag, | 707 InvokeFlag flag, |
| 1068 const CallWrapper& call_wrapper, | 708 const CallWrapper& call_wrapper, |
| 1069 CallKind call_kind) { | 709 CallKind call_kind) { |
| 1070 // You can't call a function without a valid frame. | 710 // You can't call a function without a valid frame. |
| 1071 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 711 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 1072 | 712 |
| 1073 Label done; | 713 Label done; |
| 1074 bool definitely_mismatches = false; | 714 bool definitely_mismatches = false; |
| 715 // r1: must hold function pointer |
| 716 // actual: must be r0 if register |
| 717 ASSERT(actual.is_immediate() || actual.reg().is(r0)); |
| 718 ASSERT(!code.is(sh4_ip) && !code.is(sh4_rtmp) && !code.is(r5)); |
| 719 |
| 720 RECORD_LINE(); |
| 1075 InvokePrologue(expected, actual, Handle<Code>::null(), code, | 721 InvokePrologue(expected, actual, Handle<Code>::null(), code, |
| 1076 &done, &definitely_mismatches, flag, | 722 &done, &definitely_mismatches, flag, |
| 1077 call_wrapper, call_kind); | 723 call_wrapper, call_kind); |
| 1078 if (!definitely_mismatches) { | 724 if (!definitely_mismatches) { |
| 1079 if (flag == CALL_FUNCTION) { | 725 if (flag == CALL_FUNCTION) { |
| 1080 call_wrapper.BeforeCall(CallSize(code)); | 726 call_wrapper.BeforeCall(2 * kInstrSize); |
| 1081 SetCallKind(r5, call_kind); | 727 SetCallKind(r5, call_kind); |
| 1082 Call(code); | 728 jsr(code); |
| 1083 call_wrapper.AfterCall(); | 729 call_wrapper.AfterCall(); |
| 1084 } else { | 730 } else { |
| 1085 ASSERT(flag == JUMP_FUNCTION); | 731 ASSERT(flag == JUMP_FUNCTION); |
| 1086 SetCallKind(r5, call_kind); | 732 SetCallKind(r5, call_kind); |
| 1087 Jump(code); | 733 Jump(code); |
| 1088 } | 734 } |
| 1089 | 735 |
| 1090 // Continue here if InvokePrologue does handle the invocation due to | 736 // Continue here if InvokePrologue does handle the invocation due to |
| 1091 // mismatched parameter counts. | 737 // mismatched parameter counts. |
| 1092 bind(&done); | 738 bind(&done); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1131 CallKind call_kind) { | 777 CallKind call_kind) { |
| 1132 // You can't call a function without a valid frame. | 778 // You can't call a function without a valid frame. |
| 1133 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 779 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 1134 | 780 |
| 1135 // Contract with called JS functions requires that function is passed in r1. | 781 // Contract with called JS functions requires that function is passed in r1. |
| 1136 ASSERT(fun.is(r1)); | 782 ASSERT(fun.is(r1)); |
| 1137 | 783 |
| 1138 Register expected_reg = r2; | 784 Register expected_reg = r2; |
| 1139 Register code_reg = r3; | 785 Register code_reg = r3; |
| 1140 | 786 |
| 787 RECORD_LINE(); |
| 1141 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 788 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
| 1142 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 789 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 1143 ldr(expected_reg, | 790 ldr(expected_reg, |
| 1144 FieldMemOperand(code_reg, | 791 FieldMemOperand(code_reg, |
| 1145 SharedFunctionInfo::kFormalParameterCountOffset)); | 792 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1146 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); | 793 asr(expected_reg, expected_reg, Operand(kSmiTagSize)); |
| 1147 ldr(code_reg, | 794 ldr(code_reg, |
| 1148 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 795 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 1149 | 796 |
| 1150 ParameterCount expected(expected_reg); | 797 ParameterCount expected(expected_reg); |
| 1151 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); | 798 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); |
| 1152 } | 799 } |
| 1153 | 800 |
| 1154 | 801 |
| 1155 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 802 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
| 1156 const ParameterCount& actual, | 803 const ParameterCount& actual, |
| 1157 InvokeFlag flag, | 804 InvokeFlag flag, |
| 1158 const CallWrapper& call_wrapper, | 805 const CallWrapper& call_wrapper, |
| 1159 CallKind call_kind) { | 806 CallKind call_kind) { |
| 1160 // You can't call a function without a valid frame. | 807 // You can't call a function without a valid frame. |
| 1161 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 808 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 1162 | 809 |
| 1163 // Get the function and setup the context. | 810 // Get the function and setup the context. |
| 1164 LoadHeapObject(r1, function); | 811 LoadHeapObject(r1, function); |
| 1165 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 812 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 1166 | 813 |
| 1167 ParameterCount expected(function->shared()->formal_parameter_count()); | 814 ParameterCount expected(function->shared()->formal_parameter_count()); |
| 815 // TODO(STM): only for crankshaft ? |
| 1168 // We call indirectly through the code field in the function to | 816 // We call indirectly through the code field in the function to |
| 1169 // allow recompilation to take effect without changing any of the | 817 // allow recompilation to take effect without changing any of the |
| 1170 // call sites. | 818 // call sites. |
| 1171 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 819 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 1172 InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind); | 820 InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind); |
| 1173 } | 821 } |
| 1174 | 822 |
| 1175 | 823 |
| 1176 void MacroAssembler::IsObjectJSObjectType(Register heap_object, | 824 void MacroAssembler::IsObjectJSObjectType(Register heap_object, |
| 1177 Register map, | 825 Register map, |
| 1178 Register scratch, | 826 Register scratch, |
| 1179 Label* fail) { | 827 Label* fail) { |
| 1180 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); | 828 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); |
| 1181 IsInstanceJSObjectType(map, scratch, fail); | 829 IsInstanceJSObjectType(map, scratch, fail); |
| 1182 } | 830 } |
| 1183 | 831 |
| 1184 | 832 |
| 1185 void MacroAssembler::IsInstanceJSObjectType(Register map, | 833 void MacroAssembler::IsInstanceJSObjectType(Register map, |
| 1186 Register scratch, | 834 Register scratch, |
| 1187 Label* fail) { | 835 Label* fail) { |
| 1188 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 836 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1189 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 837 cmpge(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1190 b(lt, fail); | 838 bf(fail); |
| 1191 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 839 cmpgt(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| 1192 b(gt, fail); | 840 bt(fail); |
| 1193 } | 841 } |
| 1194 | 842 |
| 1195 | 843 |
| 1196 void MacroAssembler::IsObjectJSStringType(Register object, | 844 void MacroAssembler::IsObjectJSStringType(Register object, |
| 1197 Register scratch, | 845 Register scratch, |
| 1198 Label* fail) { | 846 Label* fail) { |
| 1199 ASSERT(kNotStringTag != 0); | 847 ASSERT(kNotStringTag != 0); |
| 1200 | 848 |
| 849 ASSERT(!object.is(sh4_ip) && !scratch.is(sh4_ip)); |
| 850 ASSERT(!object.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 851 |
| 1201 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 852 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1202 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 853 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 1203 tst(scratch, Operand(kIsNotStringMask)); | 854 tst(scratch, Operand(kIsNotStringMask)); |
| 1204 b(ne, fail); | 855 bf(fail); |
| 1205 } | 856 } |
| 1206 | 857 |
| 1207 | 858 |
| 1208 #ifdef ENABLE_DEBUGGER_SUPPORT | 859 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 1209 void MacroAssembler::DebugBreak() { | 860 void MacroAssembler::DebugBreak() { |
| 1210 mov(r0, Operand(0, RelocInfo::NONE)); | 861 RECORD_LINE(); |
| 1211 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | 862 UNIMPLEMENTED_BREAK(); |
| 1212 CEntryStub ces(1); | |
| 1213 ASSERT(AllowThisStubCall(&ces)); | |
| 1214 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | |
| 1215 } | 863 } |
| 1216 #endif | 864 #endif |
| 1217 | 865 |
| 1218 | 866 |
| 1219 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | 867 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
| 1220 int handler_index) { | 868 int handler_index) { |
| 1221 // Adjust this code if not the case. | 869 // Adjust this code if not the case. |
| 1222 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 870 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1223 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 871 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 1224 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 872 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1225 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 873 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1226 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 874 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 1227 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 875 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1228 | 876 |
| 1229 // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available. | 877 // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available. |
| 1230 // We will build up the handler from the bottom by pushing on the stack. | 878 // We will build up the handler from the bottom by pushing on the stack. |
| 1231 // Set up the code object (r5) and the state (r6) for pushing. | 879 // Set up the code object (r5) and the state (r6) for pushing. |
| 1232 unsigned state = | 880 unsigned state = |
| 1233 StackHandler::IndexField::encode(handler_index) | | 881 StackHandler::IndexField::encode(handler_index) | |
| 1234 StackHandler::KindField::encode(kind); | 882 StackHandler::KindField::encode(kind); |
| 1235 mov(r5, Operand(CodeObject())); | 883 mov(r5, Operand(CodeObject())); |
| 1236 mov(r6, Operand(state)); | 884 mov(r6, Operand(state)); |
| 1237 | 885 |
| 1238 // Push the frame pointer, context, state, and code object. | 886 // Push the frame pointer, context, state, and code object. |
| 1239 if (kind == StackHandler::JS_ENTRY) { | 887 if (kind == StackHandler::JS_ENTRY) { |
| 1240 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. | 888 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. |
| 1241 mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer. | 889 mov(sh4_ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer. |
| 1242 stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit()); | 890 Push(sh4_ip, r7, r6, r5); |
| 1243 } else { | 891 } else { |
| 1244 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); | 892 Push(fp, cp, r6, r5); |
| 1245 } | 893 } |
| 1246 | 894 |
| 1247 // Link the current handler as the next handler. | 895 // Link the current handler as the next handler. |
| 1248 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 896 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1249 ldr(r5, MemOperand(r6)); | 897 ldr(r5, MemOperand(r6)); |
| 1250 push(r5); | 898 push(r5); |
| 1251 // Set this new handler as the current one. | 899 // Set this new handler as the current one. |
| 1252 str(sp, MemOperand(r6)); | 900 str(sp, MemOperand(r6)); |
| 1253 } | 901 } |
| 1254 | 902 |
| 1255 | 903 |
| 1256 void MacroAssembler::PopTryHandler() { | 904 void MacroAssembler::PopTryHandler() { |
| 1257 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 905 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 906 RECORD_LINE(); |
| 1258 pop(r1); | 907 pop(r1); |
| 1259 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 908 mov(sh4_ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1260 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); | 909 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
| 1261 str(r1, MemOperand(ip)); | 910 str(r1, MemOperand(sh4_ip)); |
| 1262 } | 911 } |
| 1263 | 912 |
| 1264 | 913 |
| 1265 void MacroAssembler::JumpToHandlerEntry() { | 914 void MacroAssembler::JumpToHandlerEntry() { |
| 1266 // Compute the handler entry address and jump to it. The handler table is | 915 // Compute the handler entry address and jump to it. The handler table is |
| 1267 // a fixed array of (smi-tagged) code offsets. | 916 // a fixed array of (smi-tagged) code offsets. |
| 1268 // r0 = exception, r1 = code object, r2 = state. | 917 // r0 = exception, r1 = code object, r2 = state. |
| 1269 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. | 918 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. |
| 1270 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 919 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 1271 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. | 920 lsr(r2, r2, Operand(StackHandler::kKindWidth)); // Handler index. |
| 1272 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. | 921 lsl(r2, r2, Operand(kPointerSizeLog2)); |
| 922 ldr(r2, MemOperand(r3, r2)); // Smi-tagged offset. |
| 1273 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. | 923 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. |
| 1274 add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump. | 924 asr(sh4_ip, r2, Operand(kSmiTagSize)); |
| 925 add(sh4_ip, r1, sh4_ip); |
| 926 jmp(sh4_ip); // Jump. |
| 1275 } | 927 } |
| 1276 | 928 |
| 1277 | 929 |
| 1278 void MacroAssembler::Throw(Register value) { | 930 void MacroAssembler::Throw(Register value) { |
| 931 ASSERT(!value.is(sh4_ip)); |
| 932 ASSERT(!value.is(sh4_rtmp)); |
| 933 |
| 1279 // Adjust this code if not the case. | 934 // Adjust this code if not the case. |
| 1280 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 935 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1281 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 936 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 1282 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 937 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1283 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 938 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1284 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 939 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 1285 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 940 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1286 | 941 |
| 1287 // The exception is expected in r0. | 942 // The exception is expected in r0. |
| 1288 if (!value.is(r0)) { | 943 if (!value.is(r0)) { |
| 1289 mov(r0, value); | 944 mov(r0, value); |
| 1290 } | 945 } |
| 1291 // Drop the stack pointer to the top of the top handler. | 946 // Drop the stack pointer to the top of the top handler. |
| 1292 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 947 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1293 ldr(sp, MemOperand(r3)); | 948 ldr(sp, MemOperand(r3)); |
| 1294 // Restore the next handler. | 949 // Restore the next handler. |
| 1295 pop(r2); | 950 pop(r2); |
| 1296 str(r2, MemOperand(r3)); | 951 str(r2, MemOperand(r3)); |
| 1297 | 952 |
| 1298 // Get the code object (r1) and state (r2). Restore the context and frame | 953 // Get the code object (r1) and state (r2). Restore the context and frame |
| 1299 // pointer. | 954 // pointer. |
| 1300 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 955 Pop(fp, cp, r2, r1); |
| 1301 | 956 |
| 1302 // If the handler is a JS frame, restore the context to the frame. | 957 // If the handler is a JS frame, restore the context to the frame. |
| 1303 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp | 958 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp |
| 1304 // or cp. | 959 // or cp. |
| 960 Label skip; |
| 1305 tst(cp, cp); | 961 tst(cp, cp); |
| 1306 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | 962 bt(&skip); |
| 963 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 964 bind(&skip); |
| 1307 | 965 |
| 1308 JumpToHandlerEntry(); | 966 JumpToHandlerEntry(); |
| 1309 } | 967 } |
| 1310 | 968 |
| 1311 | 969 |
| 1312 void MacroAssembler::ThrowUncatchable(Register value) { | 970 void MacroAssembler::ThrowUncatchable(Register value) { |
| 1313 // Adjust this code if not the case. | 971 // Adjust this code if not the case. |
| 1314 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 972 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1315 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 973 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 1316 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 974 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1317 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 975 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1318 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 976 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 1319 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 977 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1320 | 978 |
| 1321 // The exception is expected in r0. | 979 // The exception is expected in r0. |
| 1322 if (!value.is(r0)) { | 980 if (!value.is(r0)) { |
| 981 RECORD_LINE(); |
| 1323 mov(r0, value); | 982 mov(r0, value); |
| 1324 } | 983 } |
| 984 |
| 985 RECORD_LINE(); |
| 1325 // Drop the stack pointer to the top of the top stack handler. | 986 // Drop the stack pointer to the top of the top stack handler. |
| 1326 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 987 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1327 ldr(sp, MemOperand(r3)); | 988 ldr(sp, MemOperand(r3)); |
| 1328 | 989 |
| 1329 // Unwind the handlers until the ENTRY handler is found. | 990 // Unwind the handlers until the ENTRY handler is found. |
| 1330 Label fetch_next, check_kind; | 991 Label fetch_next, check_kind; |
| 1331 jmp(&check_kind); | 992 jmp(&check_kind); |
| 1332 bind(&fetch_next); | 993 bind(&fetch_next); |
| 1333 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); | 994 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); |
| 1334 | 995 |
| 1335 bind(&check_kind); | 996 bind(&check_kind); |
| 1336 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); | 997 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); |
| 1337 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset)); | 998 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset)); |
| 1338 tst(r2, Operand(StackHandler::KindField::kMask)); | 999 tst(r2, Operand(StackHandler::KindField::kMask)); |
| 1339 b(ne, &fetch_next); | 1000 b(ne, &fetch_next); |
| 1340 | 1001 |
| 1341 // Set the top handler address to next handler past the top ENTRY handler. | 1002 // Set the top handler address to next handler past the top ENTRY handler. |
| 1342 pop(r2); | 1003 pop(r2); |
| 1343 str(r2, MemOperand(r3)); | 1004 str(r2, MemOperand(r3)); |
| 1344 // Get the code object (r1) and state (r2). Clear the context and frame | 1005 // Get the code object (r1) and state (r2). Clear the context and frame |
| 1345 // pointer (0 was saved in the handler). | 1006 // pointer (0 was saved in the handler). |
| 1346 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 1007 Pop(fp, cp, r2, r1); |
| 1347 | 1008 |
| 1348 JumpToHandlerEntry(); | 1009 JumpToHandlerEntry(); |
| 1349 } | 1010 } |
| 1350 | 1011 |
| 1351 | 1012 |
| 1352 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1013 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 1353 Register scratch, | 1014 Register scratch, |
| 1354 Label* miss) { | 1015 Label* miss) { |
| 1355 Label same_contexts; | 1016 Label same_contexts; |
| 1356 | 1017 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1381 // Read the first word and compare to the native_context_map. | 1042 // Read the first word and compare to the native_context_map. |
| 1382 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1043 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 1383 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1044 LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
| 1384 cmp(holder_reg, ip); | 1045 cmp(holder_reg, ip); |
| 1385 Check(eq, "JSGlobalObject::native_context should be a native context."); | 1046 Check(eq, "JSGlobalObject::native_context should be a native context."); |
| 1386 pop(holder_reg); // Restore holder. | 1047 pop(holder_reg); // Restore holder. |
| 1387 } | 1048 } |
| 1388 | 1049 |
| 1389 // Check if both contexts are the same. | 1050 // Check if both contexts are the same. |
| 1390 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1051 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
| 1391 cmp(scratch, Operand(ip)); | 1052 cmp(scratch, ip); |
| 1392 b(eq, &same_contexts); | 1053 b(eq, &same_contexts); |
| 1393 | 1054 |
| 1394 // Check the context is a native context. | 1055 // Check the context is a native context. |
| 1395 if (emit_debug_code()) { | 1056 if (emit_debug_code()) { |
| 1396 // TODO(119): avoid push(holder_reg)/pop(holder_reg) | 1057 // TODO(119): avoid push(holder_reg)/pop(holder_reg) |
| 1397 // Cannot use ip as a temporary in this verification code. Due to the fact | 1058 // Cannot use ip as a temporary in this verification code. Due to the fact |
| 1398 // that ip is clobbered as part of cmp with an object Operand. | 1059 // that ip is clobbered as part of cmp with an object Operand. |
| 1399 push(holder_reg); // Temporarily save holder on the stack. | 1060 push(holder_reg); // Temporarily save holder on the stack. |
| 1400 mov(holder_reg, ip); // Move ip to its holding place. | 1061 mov(holder_reg, ip); // Move ip to its holding place. |
| 1401 LoadRoot(ip, Heap::kNullValueRootIndex); | 1062 LoadRoot(ip, Heap::kNullValueRootIndex); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1413 } | 1074 } |
| 1414 | 1075 |
| 1415 // Check that the security token in the calling global object is | 1076 // Check that the security token in the calling global object is |
| 1416 // compatible with the security token in the receiving global | 1077 // compatible with the security token in the receiving global |
| 1417 // object. | 1078 // object. |
| 1418 int token_offset = Context::kHeaderSize + | 1079 int token_offset = Context::kHeaderSize + |
| 1419 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 1080 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 1420 | 1081 |
| 1421 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 1082 ldr(scratch, FieldMemOperand(scratch, token_offset)); |
| 1422 ldr(ip, FieldMemOperand(ip, token_offset)); | 1083 ldr(ip, FieldMemOperand(ip, token_offset)); |
| 1423 cmp(scratch, Operand(ip)); | 1084 cmp(scratch, ip); |
| 1424 b(ne, miss); | 1085 b(ne, miss); |
| 1425 | 1086 |
| 1426 bind(&same_contexts); | 1087 bind(&same_contexts); |
| 1427 } | 1088 } |
| 1428 | 1089 |
| 1429 | 1090 |
| 1430 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { | 1091 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { |
| 1431 // First of all we assign the hash seed to scratch. | 1092 // First of all we assign the hash seed to scratch. |
| 1432 LoadRoot(scratch, Heap::kHashSeedRootIndex); | 1093 LoadRoot(scratch, Heap::kHashSeedRootIndex); |
| 1433 SmiUntag(scratch); | 1094 SmiUntag(scratch); |
| 1434 | 1095 |
| 1435 // Xor original key with a seed. | 1096 // Xor original key with a seed. |
| 1436 eor(t0, t0, Operand(scratch)); | 1097 eor(t0, t0, scratch); |
| 1437 | 1098 |
| 1438 // Compute the hash code from the untagged key. This must be kept in sync | 1099 // Compute the hash code from the untagged key. This must be kept in sync |
| 1439 // with ComputeIntegerHash in utils.h. | 1100 // with ComputeIntegerHash in utils.h. |
| 1440 // | 1101 // |
| 1441 // hash = ~hash + (hash << 15); | 1102 // hash = ~hash + (hash << 15); |
| 1442 mvn(scratch, Operand(t0)); | 1103 mvn(scratch, t0); |
| 1443 add(t0, scratch, Operand(t0, LSL, 15)); | 1104 lsl(t0, t0, Operand(15)); |
| 1105 add(t0, scratch, t0); |
| 1444 // hash = hash ^ (hash >> 12); | 1106 // hash = hash ^ (hash >> 12); |
| 1445 eor(t0, t0, Operand(t0, LSR, 12)); | 1107 lsr(scratch, t0, Operand(12)); |
| 1108 eor(t0, t0, scratch); |
| 1446 // hash = hash + (hash << 2); | 1109 // hash = hash + (hash << 2); |
| 1447 add(t0, t0, Operand(t0, LSL, 2)); | 1110 lsl(scratch, t0, Operand(2)); |
| 1111 add(t0, t0, scratch); |
| 1448 // hash = hash ^ (hash >> 4); | 1112 // hash = hash ^ (hash >> 4); |
| 1449 eor(t0, t0, Operand(t0, LSR, 4)); | 1113 lsr(scratch, t0, Operand(4)); |
| 1114 eor(t0, t0, scratch); |
| 1450 // hash = hash * 2057; | 1115 // hash = hash * 2057; |
| 1451 mov(scratch, Operand(t0, LSL, 11)); | 1116 lsl(scratch, t0, Operand(11)); |
| 1452 add(t0, t0, Operand(t0, LSL, 3)); | 1117 lsl(sh4_ip, t0, Operand(3)); |
| 1118 add(t0, t0, sh4_ip); |
| 1453 add(t0, t0, scratch); | 1119 add(t0, t0, scratch); |
| 1454 // hash = hash ^ (hash >> 16); | 1120 // hash = hash ^ (hash >> 16); |
| 1455 eor(t0, t0, Operand(t0, LSR, 16)); | 1121 lsr(scratch, t0, Operand(16)); |
| 1122 eor(t0, t0, scratch); |
| 1456 } | 1123 } |
| 1457 | 1124 |
| 1458 | 1125 |
| 1459 void MacroAssembler::LoadFromNumberDictionary(Label* miss, | 1126 void MacroAssembler::LoadFromNumberDictionary(Label* miss, |
| 1460 Register elements, | 1127 Register elements, |
| 1461 Register key, | 1128 Register key, |
| 1462 Register result, | 1129 Register result, |
| 1463 Register t0, | 1130 Register t0, |
| 1464 Register t1, | 1131 Register t1, |
| 1465 Register t2) { | 1132 Register t2) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1482 // | 1149 // |
| 1483 // t1 - used to hold the capacity mask of the dictionary | 1150 // t1 - used to hold the capacity mask of the dictionary |
| 1484 // | 1151 // |
| 1485 // t2 - used for the index into the dictionary. | 1152 // t2 - used for the index into the dictionary. |
| 1486 Label done; | 1153 Label done; |
| 1487 | 1154 |
| 1488 GetNumberHash(t0, t1); | 1155 GetNumberHash(t0, t1); |
| 1489 | 1156 |
| 1490 // Compute the capacity mask. | 1157 // Compute the capacity mask. |
| 1491 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); | 1158 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); |
| 1492 mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int | 1159 asr(t1, t1, Operand(kSmiTagSize)); // convert smi to int |
| 1493 sub(t1, t1, Operand(1)); | 1160 sub(t1, t1, Operand(1)); |
| 1494 | 1161 |
| 1495 // Generate an unrolled loop that performs a few probes before giving up. | 1162 // Generate an unrolled loop that performs a few probes before giving up. |
| 1496 static const int kProbes = 4; | 1163 static const int kProbes = 4; |
| 1497 for (int i = 0; i < kProbes; i++) { | 1164 for (int i = 0; i < kProbes; i++) { |
| 1498 // Use t2 for index calculations and keep the hash intact in t0. | 1165 // Use t2 for index calculations and keep the hash intact in t0. |
| 1499 mov(t2, t0); | 1166 mov(t2, t0); |
| 1500 // Compute the masked index: (hash + i + i * i) & mask. | 1167 // Compute the masked index: (hash + i + i * i) & mask. |
| 1501 if (i > 0) { | 1168 if (i > 0) { |
| 1502 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1169 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
| 1503 } | 1170 } |
| 1504 and_(t2, t2, Operand(t1)); | 1171 land(t2, t2, t1); |
| 1505 | 1172 |
| 1506 // Scale the index by multiplying by the element size. | 1173 // Scale the index by multiplying by the element size. |
| 1507 ASSERT(SeededNumberDictionary::kEntrySize == 3); | 1174 ASSERT(SeededNumberDictionary::kEntrySize == 3); |
| 1508 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 | 1175 lsl(ip, t2, Operand(1)); |
| 1176 add(t2, t2, ip); // t2 = t2 * 3 |
| 1509 | 1177 |
| 1510 // Check if the key is identical to the name. | 1178 // Check if the key is identical to the name. |
| 1511 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); | 1179 lsl(ip, t2, Operand(kPointerSizeLog2)); |
| 1180 add(t2, elements, ip); |
| 1512 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); | 1181 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); |
| 1513 cmp(key, Operand(ip)); | 1182 cmp(key, ip); |
| 1514 if (i != kProbes - 1) { | 1183 if (i != kProbes - 1) { |
| 1515 b(eq, &done); | 1184 b(eq, &done); |
| 1516 } else { | 1185 } else { |
| 1517 b(ne, miss); | 1186 b(ne, miss); |
| 1518 } | 1187 } |
| 1519 } | 1188 } |
| 1520 | 1189 |
| 1521 bind(&done); | 1190 bind(&done); |
| 1522 // Check that the value is a normal property. | 1191 // Check that the value is a normal property. |
| 1523 // t2: elements + (index * kPointerSize) | 1192 // t2: elements + (index * kPointerSize) |
| 1524 const int kDetailsOffset = | 1193 const int kDetailsOffset = |
| 1525 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 1194 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
| 1526 ldr(t1, FieldMemOperand(t2, kDetailsOffset)); | 1195 ldr(t1, FieldMemOperand(t2, kDetailsOffset)); |
| 1527 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); | 1196 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); |
| 1528 b(ne, miss); | 1197 b(ne, miss); |
| 1529 | 1198 |
| 1530 // Get the value at the masked, scaled index and return. | 1199 // Get the value at the masked, scaled index and return. |
| 1531 const int kValueOffset = | 1200 const int kValueOffset = |
| 1532 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 1201 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
| 1533 ldr(result, FieldMemOperand(t2, kValueOffset)); | 1202 ldr(result, FieldMemOperand(t2, kValueOffset)); |
| 1534 } | 1203 } |
| 1535 | 1204 |
| 1536 | 1205 |
| 1537 void MacroAssembler::AllocateInNewSpace(int object_size, | 1206 void MacroAssembler::AllocateInNewSpace(int object_size, |
| 1538 Register result, | 1207 Register result, |
| 1539 Register scratch1, | 1208 Register scratch1, |
| 1540 Register scratch2, | 1209 Register scratch2, |
| 1541 Label* gc_required, | 1210 Label* gc_required, |
| 1542 AllocationFlags flags) { | 1211 AllocationFlags flags) { |
| 1212 RECORD_LINE(); |
| 1543 if (!FLAG_inline_new) { | 1213 if (!FLAG_inline_new) { |
| 1544 if (emit_debug_code()) { | 1214 if (emit_debug_code()) { |
| 1545 // Trash the registers to simulate an allocation failure. | 1215 // Trash the registers to simulate an allocation failure. |
| 1216 RECORD_LINE(); |
| 1546 mov(result, Operand(0x7091)); | 1217 mov(result, Operand(0x7091)); |
| 1547 mov(scratch1, Operand(0x7191)); | 1218 mov(scratch1, Operand(0x7191)); |
| 1548 mov(scratch2, Operand(0x7291)); | 1219 mov(scratch2, Operand(0x7291)); |
| 1549 } | 1220 } |
| 1221 RECORD_LINE(); |
| 1550 jmp(gc_required); | 1222 jmp(gc_required); |
| 1551 return; | 1223 return; |
| 1552 } | 1224 } |
| 1553 | 1225 |
| 1554 ASSERT(!result.is(scratch1)); | 1226 // Assert that the register arguments are different and that none of |
| 1555 ASSERT(!result.is(scratch2)); | 1227 // them are ip. ip is used explicitly in the code generated below. |
| 1556 ASSERT(!scratch1.is(scratch2)); | 1228 ASSERT(!result.is(scratch1) && !result.is(scratch2) && |
| 1557 ASSERT(!scratch1.is(ip)); | 1229 !scratch1.is(scratch2)); |
| 1558 ASSERT(!scratch2.is(ip)); | 1230 ASSERT(!result.is(sh4_ip) && !scratch1.is(sh4_ip) && |
| 1231 !scratch2.is(sh4_ip)); |
| 1232 ASSERT(!result.is(sh4_rtmp) && !scratch1.is(sh4_rtmp) && |
| 1233 !scratch2.is(sh4_rtmp)); |
| 1559 | 1234 |
| 1560 // Make object size into bytes. | 1235 // Make object size into bytes. |
| 1561 if ((flags & SIZE_IN_WORDS) != 0) { | 1236 if ((flags & SIZE_IN_WORDS) != 0) { |
| 1562 object_size *= kPointerSize; | 1237 object_size *= kPointerSize; |
| 1563 } | 1238 } |
| 1564 ASSERT_EQ(0, object_size & kObjectAlignmentMask); | 1239 ASSERT_EQ(0, object_size & kObjectAlignmentMask); |
| 1565 | 1240 |
| 1566 // Check relative positions of allocation top and limit addresses. | 1241 // Check relative positions of allocation top and limit addresses. |
| 1567 // The values must be adjacent in memory to allow the use of LDM. | 1242 // The values must be adjacent in memory to allow the use of LDM. |
| 1568 // Also, assert that the registers are numbered such that the values | 1243 // Also, assert that the registers are numbered such that the values |
| 1569 // are loaded in the correct order. | 1244 // are loaded in the correct order. |
| 1570 ExternalReference new_space_allocation_top = | 1245 ExternalReference new_space_allocation_top = |
| 1571 ExternalReference::new_space_allocation_top_address(isolate()); | 1246 ExternalReference::new_space_allocation_top_address(isolate()); |
| 1572 ExternalReference new_space_allocation_limit = | 1247 ExternalReference new_space_allocation_limit = |
| 1573 ExternalReference::new_space_allocation_limit_address(isolate()); | 1248 ExternalReference::new_space_allocation_limit_address(isolate()); |
| 1574 intptr_t top = | 1249 intptr_t top = |
| 1575 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); | 1250 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); |
| 1576 intptr_t limit = | 1251 intptr_t limit = |
| 1577 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); | 1252 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); |
| 1578 ASSERT((limit - top) == kPointerSize); | 1253 ASSERT((limit - top) == kPointerSize); |
| 1579 ASSERT(result.code() < ip.code()); | 1254 ASSERT(result.code() < ip.code()); |
| 1580 | 1255 |
| 1581 // Set up allocation top address and object size registers. | 1256 // Set up allocation top address and object size registers. |
| 1582 Register topaddr = scratch1; | 1257 Register topaddr = scratch1; |
| 1583 Register obj_size_reg = scratch2; | 1258 Register obj_size_reg = scratch2; |
| 1259 RECORD_LINE(); |
| 1584 mov(topaddr, Operand(new_space_allocation_top)); | 1260 mov(topaddr, Operand(new_space_allocation_top)); |
| 1585 Operand obj_size_operand = Operand(object_size); | 1261 mov(obj_size_reg, Operand(object_size)); |
| 1586 if (!obj_size_operand.is_single_instruction(this)) { | |
| 1587 // We are about to steal IP, so we need to load this value first | |
| 1588 mov(obj_size_reg, obj_size_operand); | |
| 1589 } | |
| 1590 | 1262 |
| 1591 // This code stores a temporary value in ip. This is OK, as the code below | 1263 // This code stores a temporary value in ip. This is OK, as the code below |
| 1592 // does not need ip for implicit literal generation. | 1264 // does not need ip for implicit literal generation. |
| 1593 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1265 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1266 RECORD_LINE(); |
| 1594 // Load allocation top into result and allocation limit into ip. | 1267 // Load allocation top into result and allocation limit into ip. |
| 1595 ldm(ia, topaddr, result.bit() | ip.bit()); | 1268 ldr(result, MemOperand(topaddr)); |
| 1269 ldr(ip, MemOperand(topaddr, 4)); |
| 1596 } else { | 1270 } else { |
| 1597 if (emit_debug_code()) { | 1271 if (emit_debug_code()) { |
| 1272 RECORD_LINE(); |
| 1598 // Assert that result actually contains top on entry. ip is used | 1273 // Assert that result actually contains top on entry. ip is used |
| 1599 // immediately below so this use of ip does not cause difference with | 1274 // immediately below so this use of ip does not cause difference with |
| 1600 // respect to register content between debug and release mode. | 1275 // respect to register content between debug and release mode. |
| 1601 ldr(ip, MemOperand(topaddr)); | 1276 ldr(ip, MemOperand(topaddr)); |
| 1602 cmp(result, ip); | 1277 cmp(result, ip); |
| 1603 Check(eq, "Unexpected allocation top"); | 1278 Check(eq, "Unexpected allocation top"); |
| 1604 } | 1279 } |
| 1280 RECORD_LINE(); |
| 1605 // Load allocation limit into ip. Result already contains allocation top. | 1281 // Load allocation limit into ip. Result already contains allocation top. |
| 1606 ldr(ip, MemOperand(topaddr, limit - top)); | 1282 ldr(ip, MemOperand(topaddr, limit - top)); |
| 1607 } | 1283 } |
| 1608 | 1284 |
| 1285 RECORD_LINE(); |
| 1609 // Calculate new top and bail out if new space is exhausted. Use result | 1286 // Calculate new top and bail out if new space is exhausted. Use result |
| 1610 // to calculate the new top. | 1287 // to calculate the new top. |
| 1611 if (obj_size_operand.is_single_instruction(this)) { | 1288 addc(scratch2, result, obj_size_reg); |
| 1612 // We can add the size as an immediate | 1289 b(t, gc_required); |
| 1613 add(scratch2, result, obj_size_operand, SetCC); | 1290 |
| 1614 } else { | 1291 RECORD_LINE(); |
| 1615 // Doesn't fit in an immediate, we have to use the register | 1292 cmphi(scratch2, sh4_ip); |
| 1616 add(scratch2, result, obj_size_reg, SetCC); | 1293 bt(gc_required); |
| 1617 } | 1294 |
| 1618 b(cs, gc_required); | 1295 RECORD_LINE(); |
| 1619 cmp(scratch2, Operand(ip)); | |
| 1620 b(hi, gc_required); | |
| 1621 str(scratch2, MemOperand(topaddr)); | 1296 str(scratch2, MemOperand(topaddr)); |
| 1622 | 1297 |
| 1623 // Tag object if requested. | 1298 // Tag object if requested. |
| 1624 if ((flags & TAG_OBJECT) != 0) { | 1299 if ((flags & TAG_OBJECT) != 0) { |
| 1300 RECORD_LINE(); |
| 1625 add(result, result, Operand(kHeapObjectTag)); | 1301 add(result, result, Operand(kHeapObjectTag)); |
| 1626 } | 1302 } |
| 1627 } | 1303 } |
| 1628 | 1304 |
| 1629 | 1305 |
| 1630 void MacroAssembler::AllocateInNewSpace(Register object_size, | 1306 void MacroAssembler::AllocateInNewSpace(Register object_size, |
| 1631 Register result, | 1307 Register result, |
| 1632 Register scratch1, | 1308 Register scratch1, |
| 1633 Register scratch2, | 1309 Register scratch2, |
| 1634 Label* gc_required, | 1310 Label* gc_required, |
| 1635 AllocationFlags flags) { | 1311 AllocationFlags flags) { |
| 1312 RECORD_LINE(); |
| 1636 if (!FLAG_inline_new) { | 1313 if (!FLAG_inline_new) { |
| 1637 if (emit_debug_code()) { | 1314 if (emit_debug_code()) { |
| 1638 // Trash the registers to simulate an allocation failure. | 1315 // Trash the registers to simulate an allocation failure. |
| 1316 RECORD_LINE(); |
| 1639 mov(result, Operand(0x7091)); | 1317 mov(result, Operand(0x7091)); |
| 1640 mov(scratch1, Operand(0x7191)); | 1318 mov(scratch1, Operand(0x7191)); |
| 1641 mov(scratch2, Operand(0x7291)); | 1319 mov(scratch2, Operand(0x7291)); |
| 1642 } | 1320 } |
| 1321 RECORD_LINE(); |
| 1643 jmp(gc_required); | 1322 jmp(gc_required); |
| 1644 return; | 1323 return; |
| 1645 } | 1324 } |
| 1646 | 1325 |
| 1647 // Assert that the register arguments are different and that none of | 1326 // Assert that the register arguments are different and that none of |
| 1648 // them are ip. ip is used explicitly in the code generated below. | 1327 // them are ip. ip is used explicitly in the code generated below. |
| 1328 // Also assert that rtmp is not used as it is used in assembler-sh4.cc. |
| 1649 ASSERT(!result.is(scratch1)); | 1329 ASSERT(!result.is(scratch1)); |
| 1650 ASSERT(!result.is(scratch2)); | 1330 ASSERT(!result.is(scratch2)); |
| 1651 ASSERT(!scratch1.is(scratch2)); | 1331 ASSERT(!scratch1.is(scratch2)); |
| 1652 ASSERT(!object_size.is(ip)); | 1332 ASSERT(!result.is(sh4_ip)); |
| 1653 ASSERT(!result.is(ip)); | 1333 ASSERT(!scratch1.is(sh4_ip)); |
| 1654 ASSERT(!scratch1.is(ip)); | 1334 ASSERT(!scratch2.is(sh4_ip)); |
| 1655 ASSERT(!scratch2.is(ip)); | 1335 ASSERT(!result.is(sh4_rtmp)); |
| 1336 ASSERT(!scratch1.is(sh4_rtmp)); |
| 1337 ASSERT(!scratch2.is(sh4_rtmp)); |
| 1656 | 1338 |
| 1657 // Check relative positions of allocation top and limit addresses. | 1339 // Check relative positions of allocation top and limit addresses. |
| 1658 // The values must be adjacent in memory to allow the use of LDM. | 1340 // The values must be adjacent in memory to allow the use of LDM. |
| 1659 // Also, assert that the registers are numbered such that the values | 1341 // Also, assert that the registers are numbered such that the values |
| 1660 // are loaded in the correct order. | 1342 // are loaded in the correct order. |
| 1661 ExternalReference new_space_allocation_top = | 1343 ExternalReference new_space_allocation_top = |
| 1662 ExternalReference::new_space_allocation_top_address(isolate()); | 1344 ExternalReference::new_space_allocation_top_address(isolate()); |
| 1663 ExternalReference new_space_allocation_limit = | 1345 ExternalReference new_space_allocation_limit = |
| 1664 ExternalReference::new_space_allocation_limit_address(isolate()); | 1346 ExternalReference::new_space_allocation_limit_address(isolate()); |
| 1665 intptr_t top = | 1347 intptr_t top = |
| 1666 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); | 1348 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); |
| 1667 intptr_t limit = | 1349 intptr_t limit = |
| 1668 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); | 1350 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); |
| 1669 ASSERT((limit - top) == kPointerSize); | 1351 ASSERT((limit - top) == kPointerSize); |
| 1670 ASSERT(result.code() < ip.code()); | 1352 ASSERT(result.code() < ip.code()); |
| 1671 | 1353 |
| 1672 // Set up allocation top address. | 1354 // Set up allocation top address. |
| 1673 Register topaddr = scratch1; | 1355 Register topaddr = scratch1; |
| 1356 |
| 1357 RECORD_LINE(); |
| 1674 mov(topaddr, Operand(new_space_allocation_top)); | 1358 mov(topaddr, Operand(new_space_allocation_top)); |
| 1675 | 1359 |
| 1676 // This code stores a temporary value in ip. This is OK, as the code below | 1360 // This code stores a temporary value in ip. This is OK, as the code below |
| 1677 // does not need ip for implicit literal generation. | 1361 // does not need ip for implicit literal generation. |
| 1678 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1362 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1363 RECORD_LINE(); |
| 1679 // Load allocation top into result and allocation limit into ip. | 1364 // Load allocation top into result and allocation limit into ip. |
| 1680 ldm(ia, topaddr, result.bit() | ip.bit()); | 1365 ldr(result, MemOperand(topaddr)); |
| 1366 ldr(ip, MemOperand(topaddr, 4)); |
| 1681 } else { | 1367 } else { |
| 1682 if (emit_debug_code()) { | 1368 if (emit_debug_code()) { |
| 1369 RECORD_LINE(); |
| 1683 // Assert that result actually contains top on entry. ip is used | 1370 // Assert that result actually contains top on entry. ip is used |
| 1684 // immediately below so this use of ip does not cause difference with | 1371 // immediately below so this use of ip does not cause difference with |
| 1685 // respect to register content between debug and release mode. | 1372 // respect to register content between debug and release mode. |
| 1686 ldr(ip, MemOperand(topaddr)); | 1373 ldr(ip, MemOperand(topaddr)); |
| 1687 cmp(result, ip); | 1374 cmp(result, ip); |
| 1688 Check(eq, "Unexpected allocation top"); | 1375 Check(eq, "Unexpected allocation top"); |
| 1689 } | 1376 } |
| 1377 RECORD_LINE(); |
| 1690 // Load allocation limit into ip. Result already contains allocation top. | 1378 // Load allocation limit into ip. Result already contains allocation top. |
| 1691 ldr(ip, MemOperand(topaddr, limit - top)); | 1379 ldr(ip, MemOperand(topaddr, limit - top)); |
| 1692 } | 1380 } |
| 1693 | 1381 |
| 1382 RECORD_LINE(); |
| 1694 // Calculate new top and bail out if new space is exhausted. Use result | 1383 // Calculate new top and bail out if new space is exhausted. Use result |
| 1695 // to calculate the new top. Object size may be in words so a shift is | 1384 // to calculate the new top. Object size may be in words so a shift is |
| 1696 // required to get the number of bytes. | 1385 // required to get the number of bytes. |
| 1697 if ((flags & SIZE_IN_WORDS) != 0) { | 1386 if ((flags & SIZE_IN_WORDS) != 0) { |
| 1698 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); | 1387 RECORD_LINE(); |
| 1388 lsl(scratch2, object_size, Operand(kPointerSizeLog2)); |
| 1389 addc(scratch2, result, scratch2); |
| 1699 } else { | 1390 } else { |
| 1700 add(scratch2, result, Operand(object_size), SetCC); | 1391 RECORD_LINE(); |
| 1392 addc(scratch2, result, object_size); |
| 1701 } | 1393 } |
| 1702 b(cs, gc_required); | 1394 RECORD_LINE(); |
| 1703 cmp(scratch2, Operand(ip)); | 1395 b(t, gc_required); |
| 1704 b(hi, gc_required); | 1396 RECORD_LINE(); |
| 1397 cmphi(scratch2, sh4_ip); |
| 1398 bt(gc_required); |
| 1399 RECORD_LINE(); |
| 1705 | 1400 |
| 1706 // Update allocation top. result temporarily holds the new top. | 1401 // Update allocation top. result temporarily holds the new top. |
| 1707 if (emit_debug_code()) { | 1402 if (emit_debug_code()) { |
| 1403 RECORD_LINE(); |
| 1708 tst(scratch2, Operand(kObjectAlignmentMask)); | 1404 tst(scratch2, Operand(kObjectAlignmentMask)); |
| 1709 Check(eq, "Unaligned allocation in new space"); | 1405 Check(eq, "Unaligned allocation in new space"); |
| 1710 } | 1406 } |
| 1407 RECORD_LINE(); |
| 1711 str(scratch2, MemOperand(topaddr)); | 1408 str(scratch2, MemOperand(topaddr)); |
| 1712 | 1409 |
| 1713 // Tag object if requested. | 1410 // Tag object if requested. |
| 1714 if ((flags & TAG_OBJECT) != 0) { | 1411 if ((flags & TAG_OBJECT) != 0) { |
| 1412 RECORD_LINE(); |
| 1715 add(result, result, Operand(kHeapObjectTag)); | 1413 add(result, result, Operand(kHeapObjectTag)); |
| 1716 } | 1414 } |
| 1717 } | 1415 } |
| 1718 | 1416 |
| 1719 | 1417 |
| 1720 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 1418 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
| 1721 Register scratch) { | 1419 Register scratch) { |
| 1722 ExternalReference new_space_allocation_top = | 1420 ExternalReference new_space_allocation_top = |
| 1723 ExternalReference::new_space_allocation_top_address(isolate()); | 1421 ExternalReference::new_space_allocation_top_address(isolate()); |
| 1724 | 1422 |
| 1725 // Make sure the object has no tag before resetting top. | 1423 // Make sure the object has no tag before resetting top. |
| 1726 and_(object, object, Operand(~kHeapObjectTagMask)); | 1424 land(object, object, Operand(~kHeapObjectTagMask)); |
| 1727 #ifdef DEBUG | 1425 #ifdef DEBUG |
| 1728 // Check that the object un-allocated is below the current top. | 1426 // Check that the object un-allocated is below the current top. |
| 1729 mov(scratch, Operand(new_space_allocation_top)); | 1427 mov(scratch, Operand(new_space_allocation_top)); |
| 1730 ldr(scratch, MemOperand(scratch)); | 1428 ldr(scratch, MemOperand(scratch)); |
| 1731 cmp(object, scratch); | 1429 cmpge(object, scratch); |
| 1732 Check(lt, "Undo allocation of non allocated memory"); | 1430 Check(ne, "Undo allocation of non allocated memory"); |
| 1733 #endif | 1431 #endif |
| 1734 // Write the address of the object to un-allocate as the current top. | 1432 // Write the address of the object to un-allocate as the current top. |
| 1735 mov(scratch, Operand(new_space_allocation_top)); | 1433 mov(scratch, Operand(new_space_allocation_top)); |
| 1736 str(object, MemOperand(scratch)); | 1434 str(object, MemOperand(scratch)); |
| 1737 } | 1435 } |
| 1738 | 1436 |
| 1739 | 1437 |
| 1740 void MacroAssembler::AllocateTwoByteString(Register result, | 1438 void MacroAssembler::AllocateTwoByteString(Register result, |
| 1741 Register length, | 1439 Register length, |
| 1742 Register scratch1, | 1440 Register scratch1, |
| 1743 Register scratch2, | 1441 Register scratch2, |
| 1744 Register scratch3, | 1442 Register scratch3, |
| 1745 Label* gc_required) { | 1443 Label* gc_required) { |
| 1746 // Calculate the number of bytes needed for the characters in the string while | 1444 // Calculate the number of bytes needed for the characters in the string while |
| 1747 // observing object alignment. | 1445 // observing object alignment. |
| 1748 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1446 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 1749 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. | 1447 RECORD_LINE(); |
| 1448 lsl(scratch1, length, Operand(1)); // Length in bytes, not chars. |
| 1750 add(scratch1, scratch1, | 1449 add(scratch1, scratch1, |
| 1751 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); | 1450 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); |
| 1752 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1451 land(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
| 1753 | 1452 |
| 1754 // Allocate two-byte string in new space. | 1453 // Allocate two-byte string in new space. |
| 1755 AllocateInNewSpace(scratch1, | 1454 AllocateInNewSpace(scratch1, |
| 1756 result, | 1455 result, |
| 1757 scratch2, | 1456 scratch2, |
| 1758 scratch3, | 1457 scratch3, |
| 1759 gc_required, | 1458 gc_required, |
| 1760 TAG_OBJECT); | 1459 TAG_OBJECT); |
| 1761 | 1460 |
| 1762 // Set the map, length and hash field. | 1461 // Set the map, length and hash field. |
| 1462 RECORD_LINE(); |
| 1763 InitializeNewString(result, | 1463 InitializeNewString(result, |
| 1764 length, | 1464 length, |
| 1765 Heap::kStringMapRootIndex, | 1465 Heap::kStringMapRootIndex, |
| 1766 scratch1, | 1466 scratch1, |
| 1767 scratch2); | 1467 scratch2); |
| 1768 } | 1468 } |
| 1769 | 1469 |
| 1770 | 1470 |
| 1771 void MacroAssembler::AllocateAsciiString(Register result, | 1471 void MacroAssembler::AllocateAsciiString(Register result, |
| 1772 Register length, | 1472 Register length, |
| 1773 Register scratch1, | 1473 Register scratch1, |
| 1774 Register scratch2, | 1474 Register scratch2, |
| 1775 Register scratch3, | 1475 Register scratch3, |
| 1776 Label* gc_required) { | 1476 Label* gc_required) { |
| 1777 // Calculate the number of bytes needed for the characters in the string while | 1477 // Calculate the number of bytes needed for the characters in the string while |
| 1778 // observing object alignment. | 1478 // observing object alignment. |
| 1779 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); | 1479 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 1780 ASSERT(kCharSize == 1); | 1480 ASSERT(kCharSize == 1); |
| 1481 RECORD_LINE(); |
| 1781 add(scratch1, length, | 1482 add(scratch1, length, |
| 1782 Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); | 1483 Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); |
| 1783 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1484 land(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
| 1784 | 1485 |
| 1785 // Allocate ASCII string in new space. | 1486 // Allocate ASCII string in new space. |
| 1786 AllocateInNewSpace(scratch1, | 1487 AllocateInNewSpace(scratch1, |
| 1787 result, | 1488 result, |
| 1788 scratch2, | 1489 scratch2, |
| 1789 scratch3, | 1490 scratch3, |
| 1790 gc_required, | 1491 gc_required, |
| 1791 TAG_OBJECT); | 1492 TAG_OBJECT); |
| 1792 | 1493 |
| 1494 RECORD_LINE(); |
| 1793 // Set the map, length and hash field. | 1495 // Set the map, length and hash field. |
| 1794 InitializeNewString(result, | 1496 InitializeNewString(result, |
| 1795 length, | 1497 length, |
| 1796 Heap::kAsciiStringMapRootIndex, | 1498 Heap::kAsciiStringMapRootIndex, |
| 1797 scratch1, | 1499 scratch1, |
| 1798 scratch2); | 1500 scratch2); |
| 1799 } | 1501 } |
| 1800 | 1502 |
| 1801 | 1503 |
| 1802 void MacroAssembler::AllocateTwoByteConsString(Register result, | 1504 void MacroAssembler::AllocateTwoByteConsString(Register result, |
| 1803 Register length, | 1505 Register length, |
| 1804 Register scratch1, | 1506 Register scratch1, |
| 1805 Register scratch2, | 1507 Register scratch2, |
| 1806 Label* gc_required) { | 1508 Label* gc_required) { |
| 1509 RECORD_LINE(); |
| 1807 AllocateInNewSpace(ConsString::kSize, | 1510 AllocateInNewSpace(ConsString::kSize, |
| 1808 result, | 1511 result, |
| 1809 scratch1, | 1512 scratch1, |
| 1810 scratch2, | 1513 scratch2, |
| 1811 gc_required, | 1514 gc_required, |
| 1812 TAG_OBJECT); | 1515 TAG_OBJECT); |
| 1813 | 1516 |
| 1517 RECORD_LINE(); |
| 1814 InitializeNewString(result, | 1518 InitializeNewString(result, |
| 1815 length, | 1519 length, |
| 1816 Heap::kConsStringMapRootIndex, | 1520 Heap::kConsStringMapRootIndex, |
| 1817 scratch1, | 1521 scratch1, |
| 1818 scratch2); | 1522 scratch2); |
| 1819 } | 1523 } |
| 1820 | 1524 |
| 1821 | 1525 |
| 1822 void MacroAssembler::AllocateAsciiConsString(Register result, | 1526 void MacroAssembler::AllocateAsciiConsString(Register result, |
| 1823 Register length, | 1527 Register length, |
| 1824 Register scratch1, | 1528 Register scratch1, |
| 1825 Register scratch2, | 1529 Register scratch2, |
| 1826 Label* gc_required) { | 1530 Label* gc_required) { |
| 1531 RECORD_LINE(); |
| 1827 AllocateInNewSpace(ConsString::kSize, | 1532 AllocateInNewSpace(ConsString::kSize, |
| 1828 result, | 1533 result, |
| 1829 scratch1, | 1534 scratch1, |
| 1830 scratch2, | 1535 scratch2, |
| 1831 gc_required, | 1536 gc_required, |
| 1832 TAG_OBJECT); | 1537 TAG_OBJECT); |
| 1833 | 1538 |
| 1539 RECORD_LINE(); |
| 1834 InitializeNewString(result, | 1540 InitializeNewString(result, |
| 1835 length, | 1541 length, |
| 1836 Heap::kConsAsciiStringMapRootIndex, | 1542 Heap::kConsAsciiStringMapRootIndex, |
| 1837 scratch1, | 1543 scratch1, |
| 1838 scratch2); | 1544 scratch2); |
| 1839 } | 1545 } |
| 1840 | 1546 |
| 1841 | 1547 |
| 1842 void MacroAssembler::AllocateTwoByteSlicedString(Register result, | 1548 void MacroAssembler::AllocateTwoByteSlicedString(Register result, |
| 1843 Register length, | 1549 Register length, |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1875 length, | 1581 length, |
| 1876 Heap::kSlicedAsciiStringMapRootIndex, | 1582 Heap::kSlicedAsciiStringMapRootIndex, |
| 1877 scratch1, | 1583 scratch1, |
| 1878 scratch2); | 1584 scratch2); |
| 1879 } | 1585 } |
| 1880 | 1586 |
| 1881 | 1587 |
| 1882 void MacroAssembler::CompareObjectType(Register object, | 1588 void MacroAssembler::CompareObjectType(Register object, |
| 1883 Register map, | 1589 Register map, |
| 1884 Register type_reg, | 1590 Register type_reg, |
| 1885 InstanceType type) { | 1591 InstanceType type, |
| 1592 Condition cond) { |
| 1593 ASSERT(!object.is(sh4_ip) && !map.is(sh4_ip) && !type_reg.is(sh4_ip)); |
| 1594 ASSERT(!object.is(sh4_rtmp) && !map.is(sh4_rtmp) && !type_reg.is(sh4_rtmp)); |
| 1595 |
| 1596 RECORD_LINE(); |
| 1886 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1597 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1887 CompareInstanceType(map, type_reg, type); | 1598 CompareInstanceType(map, type_reg, type, cond); |
| 1888 } | 1599 } |
| 1889 | 1600 |
| 1890 | 1601 |
| 1891 void MacroAssembler::CompareInstanceType(Register map, | 1602 void MacroAssembler::CompareInstanceType(Register map, |
| 1892 Register type_reg, | 1603 Register type_reg, |
| 1893 InstanceType type) { | 1604 InstanceType type, |
| 1605 Condition cond) { |
| 1606 ASSERT(!map.is(sh4_rtmp) && !type_reg.is(sh4_rtmp)); |
| 1607 |
| 1608 RECORD_LINE(); |
| 1894 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1609 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1895 cmp(type_reg, Operand(type)); | 1610 switch (cond) { |
| 1611 case eq: |
| 1612 RECORD_LINE(); |
| 1613 cmpeq(type_reg, Operand(type)); |
| 1614 break; |
| 1615 case ge: |
| 1616 RECORD_LINE(); |
| 1617 cmpge(type_reg, Operand(type)); |
| 1618 break; |
| 1619 case hs: |
| 1620 RECORD_LINE(); |
| 1621 cmphs(type_reg, Operand(type)); |
| 1622 break; |
| 1623 case gt: |
| 1624 RECORD_LINE(); |
| 1625 cmpgt(type_reg, Operand(type)); |
| 1626 break; |
| 1627 default: |
| 1628 UNIMPLEMENTED(); |
| 1629 } |
| 1896 } | 1630 } |
| 1897 | 1631 |
| 1898 | 1632 |
| 1899 void MacroAssembler::CompareRoot(Register obj, | 1633 void MacroAssembler::CompareRoot(Register obj, |
| 1900 Heap::RootListIndex index) { | 1634 Heap::RootListIndex index) { |
| 1901 ASSERT(!obj.is(ip)); | 1635 ASSERT(!obj.is(sh4_ip)); |
| 1636 ASSERT(!obj.is(sh4_rtmp)); |
| 1637 RECORD_LINE(); |
| 1902 LoadRoot(ip, index); | 1638 LoadRoot(ip, index); |
| 1903 cmp(obj, ip); | 1639 cmpeq(obj, ip); |
| 1904 } | 1640 } |
| 1905 | 1641 |
| 1906 | 1642 |
| 1907 void MacroAssembler::CheckFastElements(Register map, | 1643 void MacroAssembler::CheckFastElements(Register map, |
| 1908 Register scratch, | 1644 Register scratch, |
| 1909 Label* fail) { | 1645 Label* fail) { |
| 1910 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1646 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 1911 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1647 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 1912 STATIC_ASSERT(FAST_ELEMENTS == 2); | 1648 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 1913 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 1649 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 1914 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1650 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 1915 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 1651 cmphi(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
| 1916 b(hi, fail); | 1652 bt(fail); |
| 1917 } | 1653 } |
| 1918 | 1654 |
| 1919 | 1655 |
| 1920 void MacroAssembler::CheckFastObjectElements(Register map, | 1656 void MacroAssembler::CheckFastObjectElements(Register map, |
| 1921 Register scratch, | 1657 Register scratch, |
| 1922 Label* fail) { | 1658 Label* fail) { |
| 1923 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1659 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 1924 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1660 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 1925 STATIC_ASSERT(FAST_ELEMENTS == 2); | 1661 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 1926 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 1662 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 1927 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1663 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 1928 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 1664 cmpgt(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
| 1929 b(ls, fail); | 1665 bf(fail); |
| 1930 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 1666 cmphi(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
| 1931 b(hi, fail); | 1667 bt(fail); |
| 1932 } | 1668 } |
| 1933 | 1669 |
| 1934 | |
| 1935 void MacroAssembler::CheckFastSmiElements(Register map, | 1670 void MacroAssembler::CheckFastSmiElements(Register map, |
| 1936 Register scratch, | 1671 Register scratch, |
| 1937 Label* fail) { | 1672 Label* fail) { |
| 1938 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1673 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 1939 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1674 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 1940 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1675 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 1941 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 1676 cmphi(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
| 1942 b(hi, fail); | 1677 bt(fail); |
| 1943 } | 1678 } |
| 1944 | 1679 |
| 1945 | 1680 |
| 1946 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 1681 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
| 1947 Register key_reg, | 1682 Register key_reg, |
| 1948 Register receiver_reg, | 1683 Register receiver_reg, |
| 1949 Register elements_reg, | 1684 Register elements_reg, |
| 1950 Register scratch1, | 1685 Register scratch1, |
| 1951 Register scratch2, | 1686 Register scratch2, |
| 1952 Register scratch3, | 1687 Register scratch3, |
| 1953 Register scratch4, | 1688 Register scratch4, |
| 1954 Label* fail) { | 1689 Label* fail) { |
| 1955 Label smi_value, maybe_nan, have_double_value, is_nan, done; | 1690 UNIMPLEMENTED_BREAK(); |
| 1956 Register mantissa_reg = scratch2; | 1691 } |
| 1957 Register exponent_reg = scratch3; | |
| 1958 | 1692 |
| 1959 // Handle smi values specially. | |
| 1960 JumpIfSmi(value_reg, &smi_value); | |
| 1961 | |
| 1962 // Ensure that the object is a heap number | |
| 1963 CheckMap(value_reg, | |
| 1964 scratch1, | |
| 1965 isolate()->factory()->heap_number_map(), | |
| 1966 fail, | |
| 1967 DONT_DO_SMI_CHECK); | |
| 1968 | |
| 1969 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 | |
| 1970 // in the exponent. | |
| 1971 mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); | |
| 1972 ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); | |
| 1973 cmp(exponent_reg, scratch1); | |
| 1974 b(ge, &maybe_nan); | |
| 1975 | |
| 1976 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); | |
| 1977 | |
| 1978 bind(&have_double_value); | |
| 1979 add(scratch1, elements_reg, | |
| 1980 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | |
| 1981 str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); | |
| 1982 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); | |
| 1983 str(exponent_reg, FieldMemOperand(scratch1, offset)); | |
| 1984 jmp(&done); | |
| 1985 | |
| 1986 bind(&maybe_nan); | |
| 1987 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | |
| 1988 // it's an Infinity, and the non-NaN code path applies. | |
| 1989 b(gt, &is_nan); | |
| 1990 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); | |
| 1991 cmp(mantissa_reg, Operand(0)); | |
| 1992 b(eq, &have_double_value); | |
| 1993 bind(&is_nan); | |
| 1994 // Load canonical NaN for storing into the double array. | |
| 1995 uint64_t nan_int64 = BitCast<uint64_t>( | |
| 1996 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
| 1997 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); | |
| 1998 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); | |
| 1999 jmp(&have_double_value); | |
| 2000 | |
| 2001 bind(&smi_value); | |
| 2002 add(scratch1, elements_reg, | |
| 2003 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | |
| 2004 add(scratch1, scratch1, | |
| 2005 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | |
| 2006 // scratch1 is now effective address of the double element | |
| 2007 | |
| 2008 FloatingPointHelper::Destination destination; | |
| 2009 if (CpuFeatures::IsSupported(VFP2)) { | |
| 2010 destination = FloatingPointHelper::kVFPRegisters; | |
| 2011 } else { | |
| 2012 destination = FloatingPointHelper::kCoreRegisters; | |
| 2013 } | |
| 2014 | |
| 2015 Register untagged_value = elements_reg; | |
| 2016 SmiUntag(untagged_value, value_reg); | |
| 2017 FloatingPointHelper::ConvertIntToDouble(this, | |
| 2018 untagged_value, | |
| 2019 destination, | |
| 2020 d0, | |
| 2021 mantissa_reg, | |
| 2022 exponent_reg, | |
| 2023 scratch4, | |
| 2024 s2); | |
| 2025 if (destination == FloatingPointHelper::kVFPRegisters) { | |
| 2026 CpuFeatures::Scope scope(VFP2); | |
| 2027 vstr(d0, scratch1, 0); | |
| 2028 } else { | |
| 2029 str(mantissa_reg, MemOperand(scratch1, 0)); | |
| 2030 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); | |
| 2031 } | |
| 2032 bind(&done); | |
| 2033 } | |
| 2034 | 1693 |
| 2035 | 1694 |
| 2036 void MacroAssembler::CompareMap(Register obj, | 1695 void MacroAssembler::CompareMap(Register obj, |
| 2037 Register scratch, | 1696 Register scratch, |
| 2038 Handle<Map> map, | 1697 Handle<Map> map, |
| 2039 Label* early_success, | 1698 Label* early_success, |
| 2040 CompareMapMode mode) { | 1699 CompareMapMode mode) { |
| 2041 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 1700 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2042 CompareMap(scratch, map, early_success, mode); | 1701 CompareMap(scratch, map, early_success, mode); |
| 2043 } | 1702 } |
| 2044 | 1703 |
| 2045 | 1704 |
| 2046 void MacroAssembler::CompareMap(Register obj_map, | 1705 void MacroAssembler::CompareMap(Register obj_map, |
| 2047 Handle<Map> map, | 1706 Handle<Map> map, |
| 2048 Label* early_success, | 1707 Label* early_success, |
| 2049 CompareMapMode mode) { | 1708 CompareMapMode mode) { |
| 2050 cmp(obj_map, Operand(map)); | 1709 cmpeq(obj_map, Operand(map)); |
| 2051 if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { | 1710 if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { |
| 2052 ElementsKind kind = map->elements_kind(); | 1711 ElementsKind kind = map->elements_kind(); |
| 2053 if (IsFastElementsKind(kind)) { | 1712 if (IsFastElementsKind(kind)) { |
| 2054 bool packed = IsFastPackedElementsKind(kind); | 1713 bool packed = IsFastPackedElementsKind(kind); |
| 2055 Map* current_map = *map; | 1714 Map* current_map = *map; |
| 2056 while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { | 1715 while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { |
| 2057 kind = GetNextMoreGeneralFastElementsKind(kind, packed); | 1716 kind = GetNextMoreGeneralFastElementsKind(kind, packed); |
| 2058 current_map = current_map->LookupElementsTransitionMap(kind); | 1717 current_map = current_map->LookupElementsTransitionMap(kind); |
| 2059 if (!current_map) break; | 1718 if (!current_map) break; |
| 2060 b(eq, early_success); | 1719 b(eq, early_success); |
| 2061 cmp(obj_map, Operand(Handle<Map>(current_map))); | 1720 cmpeq(obj_map, Operand(Handle<Map>(current_map))); |
| 2062 } | 1721 } |
| 2063 } | 1722 } |
| 2064 } | 1723 } |
| 2065 } | 1724 } |
| 2066 | 1725 |
| 2067 | 1726 |
| 1727 |
| 2068 void MacroAssembler::CheckMap(Register obj, | 1728 void MacroAssembler::CheckMap(Register obj, |
| 2069 Register scratch, | 1729 Register scratch, |
| 2070 Handle<Map> map, | 1730 Handle<Map> map, |
| 2071 Label* fail, | 1731 Label* fail, |
| 2072 SmiCheckType smi_check_type, | 1732 SmiCheckType smi_check_type, |
| 2073 CompareMapMode mode) { | 1733 CompareMapMode mode) { |
| 1734 ASSERT(!obj.is(sh4_ip) && !scratch.is(sh4_ip)); |
| 1735 ASSERT(!obj.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 1736 RECORD_LINE(); |
| 2074 if (smi_check_type == DO_SMI_CHECK) { | 1737 if (smi_check_type == DO_SMI_CHECK) { |
| 1738 RECORD_LINE(); |
| 2075 JumpIfSmi(obj, fail); | 1739 JumpIfSmi(obj, fail); |
| 2076 } | 1740 } |
| 2077 | 1741 |
| 1742 RECORD_LINE(); |
| 2078 Label success; | 1743 Label success; |
| 2079 CompareMap(obj, scratch, map, &success, mode); | 1744 CompareMap(obj, scratch, map, &success, mode); |
| 2080 b(ne, fail); | 1745 b(ne, fail); |
| 2081 bind(&success); | 1746 bind(&success); |
| 2082 } | 1747 } |
| 2083 | 1748 |
| 2084 | 1749 |
| 2085 void MacroAssembler::CheckMap(Register obj, | 1750 void MacroAssembler::CheckMap(Register obj, |
| 2086 Register scratch, | 1751 Register scratch, |
| 2087 Heap::RootListIndex index, | 1752 Heap::RootListIndex index, |
| 2088 Label* fail, | 1753 Label* fail, |
| 2089 SmiCheckType smi_check_type) { | 1754 SmiCheckType smi_check_type) { |
| 1755 ASSERT(!obj.is(sh4_ip) && !scratch.is(sh4_ip)); |
| 1756 ASSERT(!obj.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 1757 RECORD_LINE(); |
| 2090 if (smi_check_type == DO_SMI_CHECK) { | 1758 if (smi_check_type == DO_SMI_CHECK) { |
| 1759 RECORD_LINE(); |
| 2091 JumpIfSmi(obj, fail); | 1760 JumpIfSmi(obj, fail); |
| 2092 } | 1761 } |
| 1762 RECORD_LINE(); |
| 2093 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 1763 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2094 LoadRoot(ip, index); | 1764 LoadRoot(ip, index); |
| 2095 cmp(scratch, ip); | 1765 cmp(scratch, ip); |
| 2096 b(ne, fail); | 1766 b(ne, fail); |
| 2097 } | 1767 } |
| 2098 | 1768 |
| 2099 | 1769 |
| 2100 void MacroAssembler::DispatchMap(Register obj, | 1770 void MacroAssembler::DispatchMap(Register obj, |
| 2101 Register scratch, | 1771 Register scratch, |
| 2102 Handle<Map> map, | 1772 Handle<Map> map, |
| 2103 Handle<Code> success, | 1773 Handle<Code> success, |
| 2104 SmiCheckType smi_check_type) { | 1774 SmiCheckType smi_check_type) { |
| 2105 Label fail; | 1775 Label fail; |
| 2106 if (smi_check_type == DO_SMI_CHECK) { | 1776 if (smi_check_type == DO_SMI_CHECK) { |
| 2107 JumpIfSmi(obj, &fail); | 1777 JumpIfSmi(obj, &fail, Label::kNear); |
| 2108 } | 1778 } |
| 2109 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 1779 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2110 mov(ip, Operand(map)); | 1780 mov(ip, Operand(map)); |
| 2111 cmp(scratch, ip); | 1781 cmp(scratch, ip); |
| 2112 Jump(success, RelocInfo::CODE_TARGET, eq); | 1782 Label skip; |
| 1783 bf(&skip); |
| 1784 Jump(success, RelocInfo::CODE_TARGET); |
| 1785 bind(&skip); |
| 2113 bind(&fail); | 1786 bind(&fail); |
| 2114 } | 1787 } |
| 2115 | 1788 |
| 2116 | 1789 |
| 2117 void MacroAssembler::TryGetFunctionPrototype(Register function, | 1790 void MacroAssembler::TryGetFunctionPrototype(Register function, |
| 2118 Register result, | 1791 Register result, |
| 2119 Register scratch, | 1792 Register scratch, |
| 2120 Label* miss, | 1793 Label* miss, |
| 2121 bool miss_on_bound_function) { | 1794 bool miss_on_bound_function) { |
| 1795 ASSERT(!function.is(sh4_ip) && !result.is(sh4_ip) && !scratch.is(sh4_ip)); |
| 1796 ASSERT(!function.is(sh4_rtmp) && !result.is(sh4_rtmp) && |
| 1797 !scratch.is(sh4_rtmp)); |
| 1798 |
| 1799 RECORD_LINE(); |
| 2122 // Check that the receiver isn't a smi. | 1800 // Check that the receiver isn't a smi. |
| 2123 JumpIfSmi(function, miss); | 1801 JumpIfSmi(function, miss); |
| 2124 | 1802 |
| 2125 // Check that the function really is a function. Load map into result reg. | 1803 // Check that the function really is a function. Load map into result reg. |
| 2126 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); | 1804 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE, eq); |
| 2127 b(ne, miss); | 1805 bf(miss); |
| 2128 | 1806 |
| 2129 if (miss_on_bound_function) { | 1807 if (miss_on_bound_function) { |
| 1808 RECORD_LINE(); |
| 2130 ldr(scratch, | 1809 ldr(scratch, |
| 2131 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 1810 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 2132 ldr(scratch, | 1811 ldr(scratch, |
| 2133 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | 1812 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
| 2134 tst(scratch, | 1813 tst(scratch, |
| 2135 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); | 1814 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); |
| 2136 b(ne, miss); | 1815 b(ne, miss); |
| 2137 } | 1816 } |
| 2138 | 1817 |
| 1818 RECORD_LINE(); |
| 2139 // Make sure that the function has an instance prototype. | 1819 // Make sure that the function has an instance prototype. |
| 2140 Label non_instance; | 1820 Label non_instance; |
| 2141 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | 1821 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 2142 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); | 1822 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
| 2143 b(ne, &non_instance); | 1823 bf_near(&non_instance); |
| 2144 | 1824 |
| 1825 RECORD_LINE(); |
| 2145 // Get the prototype or initial map from the function. | 1826 // Get the prototype or initial map from the function. |
| 2146 ldr(result, | 1827 ldr(result, |
| 2147 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 1828 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2148 | 1829 |
| 2149 // If the prototype or initial map is the hole, don't return it and | 1830 // If the prototype or initial map is the hole, don't return it and |
| 2150 // simply miss the cache instead. This will allow us to allocate a | 1831 // simply miss the cache instead. This will allow us to allocate a |
| 2151 // prototype object on-demand in the runtime system. | 1832 // prototype object on-demand in the runtime system. |
| 2152 LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 1833 LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2153 cmp(result, ip); | 1834 cmp(result, ip); |
| 2154 b(eq, miss); | 1835 b(eq, miss); |
| 2155 | 1836 |
| 1837 RECORD_LINE(); |
| 2156 // If the function does not have an initial map, we're done. | 1838 // If the function does not have an initial map, we're done. |
| 2157 Label done; | 1839 Label done; |
| 2158 CompareObjectType(result, scratch, scratch, MAP_TYPE); | 1840 CompareObjectType(result, scratch, scratch, MAP_TYPE, eq); |
| 2159 b(ne, &done); | 1841 bf_near(&done); |
| 2160 | 1842 |
| 1843 RECORD_LINE(); |
| 2161 // Get the prototype from the initial map. | 1844 // Get the prototype from the initial map. |
| 2162 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 1845 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 2163 jmp(&done); | 1846 jmp_near(&done); |
| 2164 | 1847 |
| 1848 RECORD_LINE(); |
| 2165 // Non-instance prototype: Fetch prototype from constructor field | 1849 // Non-instance prototype: Fetch prototype from constructor field |
| 2166 // in initial map. | 1850 // in initial map. |
| 2167 bind(&non_instance); | 1851 bind(&non_instance); |
| 2168 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 1852 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 2169 | 1853 |
| 2170 // All done. | 1854 // All done. |
| 2171 bind(&done); | 1855 bind(&done); |
| 2172 } | 1856 } |
| 2173 | 1857 |
| 2174 | 1858 |
| 2175 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { | 1859 void MacroAssembler::CallStub(CodeStub* stub) { |
| 2176 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. | 1860 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. |
| 2177 Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond); | 1861 Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None()); |
| 2178 } | 1862 } |
| 2179 | 1863 |
| 2180 | 1864 |
| 2181 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { | 1865 void MacroAssembler::TailCallStub(CodeStub* stub) { |
| 2182 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); | 1866 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); |
| 2183 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); | 1867 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); |
| 2184 } | 1868 } |
| 2185 | 1869 |
| 2186 | 1870 |
| 2187 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | 1871 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { |
| 2188 return ref0.address() - ref1.address(); | 1872 return ref0.address() - ref1.address(); |
| 2189 } | 1873 } |
| 2190 | 1874 |
| 2191 | 1875 |
| 2192 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, | 1876 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, |
| 2193 int stack_space) { | 1877 int stack_space) { |
| 2194 ExternalReference next_address = | 1878 ExternalReference next_address = |
| 2195 ExternalReference::handle_scope_next_address(); | 1879 ExternalReference::handle_scope_next_address(); |
| 2196 const int kNextOffset = 0; | 1880 const int kNextOffset = 0; |
| 2197 const int kLimitOffset = AddressOffset( | 1881 const int kLimitOffset = AddressOffset( |
| 2198 ExternalReference::handle_scope_limit_address(), | 1882 ExternalReference::handle_scope_limit_address(), |
| 2199 next_address); | 1883 next_address); |
| 2200 const int kLevelOffset = AddressOffset( | 1884 const int kLevelOffset = AddressOffset( |
| 2201 ExternalReference::handle_scope_level_address(), | 1885 ExternalReference::handle_scope_level_address(), |
| 2202 next_address); | 1886 next_address); |
| 2203 | 1887 |
| 1888 mov(r4, r0); |
| 1889 mov(r5, r1); |
| 1890 |
| 2204 // Allocate HandleScope in callee-save registers. | 1891 // Allocate HandleScope in callee-save registers. |
| 2205 mov(r7, Operand(next_address)); | 1892 // TODO(stm): use of r10 and r11 is dangerous here (ip and rtmp) |
| 2206 ldr(r4, MemOperand(r7, kNextOffset)); | 1893 // We must be sure to not have them clobbered until the actual call. |
| 2207 ldr(r5, MemOperand(r7, kLimitOffset)); | 1894 mov(sh4_r11, Operand(next_address)); |
| 2208 ldr(r6, MemOperand(r7, kLevelOffset)); | 1895 ldr(sh4_r8, MemOperand(sh4_r11, kNextOffset), r0); |
| 2209 add(r6, r6, Operand(1)); | 1896 ldr(r9, MemOperand(sh4_r11, kLimitOffset), r0); |
| 2210 str(r6, MemOperand(r7, kLevelOffset)); | 1897 ldr(sh4_r10, MemOperand(sh4_r11, kLevelOffset), r0); |
| 1898 add(sh4_r10, sh4_r10, Operand(1), r0); |
| 1899 str(sh4_r10, MemOperand(sh4_r11, kLevelOffset), r0); |
| 2211 | 1900 |
| 2212 // Native call returns to the DirectCEntry stub which redirects to the | 1901 // Native call returns to the DirectCEntry stub which redirects to the |
| 2213 // return address pushed on stack (could have moved after GC). | 1902 // return address pushed on stack (could have moved after GC). |
| 2214 // DirectCEntry stub itself is generated early and never moves. | 1903 // DirectCEntry stub itself is generated early and never moves. |
| 2215 DirectCEntryStub stub; | 1904 // This scratch register must not be the return value |
| 2216 stub.GenerateCall(this, function); | 1905 DirectCEntryStub stub(r2); |
| 1906 stub.GenerateCall(this, function, r0, r1); |
| 1907 |
| 1908 // Move back the registers [r8, r11] => [r4, r7] |
| 1909 mov(r4, sh4_r8); |
| 1910 mov(r5, r9); |
| 1911 mov(r6, sh4_r10); |
| 1912 mov(r7, sh4_r11); |
| 2217 | 1913 |
| 2218 Label promote_scheduled_exception; | 1914 Label promote_scheduled_exception; |
| 2219 Label delete_allocated_handles; | 1915 Label delete_allocated_handles; |
| 2220 Label leave_exit_frame; | 1916 Label leave_exit_frame; |
| 2221 | 1917 |
| 2222 // If result is non-zero, dereference to get the result value | 1918 // If result is non-zero, dereference to get the result value |
| 2223 // otherwise set it to undefined. | 1919 // otherwise set it to undefined. |
| 1920 Label ltrue, lfalse; |
| 2224 cmp(r0, Operand(0)); | 1921 cmp(r0, Operand(0)); |
| 2225 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); | 1922 bf_near(&lfalse); |
| 2226 ldr(r0, MemOperand(r0), ne); | 1923 LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
| 1924 jmp_near(<rue); |
| 1925 bind(&lfalse); |
| 1926 ldr(r0, MemOperand(r0)); |
| 1927 bind(<rue); |
| 2227 | 1928 |
| 2228 // No more valid handles (the result handle was the last one). Restore | 1929 // No more valid handles (the result handle was the last one). Restore |
| 2229 // previous handle scope. | 1930 // previous handle scope. |
| 2230 str(r4, MemOperand(r7, kNextOffset)); | 1931 str(r4, MemOperand(r7, kNextOffset)); |
| 2231 if (emit_debug_code()) { | 1932 if (emit_debug_code()) { |
| 2232 ldr(r1, MemOperand(r7, kLevelOffset)); | 1933 ldr(r1, MemOperand(r7, kLevelOffset)); |
| 2233 cmp(r1, r6); | 1934 cmp(r1, r6); |
| 2234 Check(eq, "Unexpected level after return from api call"); | 1935 Check(eq, "Unexpected level after return from api call"); |
| 2235 } | 1936 } |
| 2236 sub(r6, r6, Operand(1)); | 1937 sub(r6, r6, Operand(1)); |
| 2237 str(r6, MemOperand(r7, kLevelOffset)); | 1938 str(r6, MemOperand(r7, kLevelOffset)); |
| 2238 ldr(ip, MemOperand(r7, kLimitOffset)); | 1939 ldr(sh4_ip, MemOperand(r7, kLimitOffset)); |
| 2239 cmp(r5, ip); | 1940 cmp(r5, sh4_ip); |
| 2240 b(ne, &delete_allocated_handles); | 1941 b(ne, &delete_allocated_handles); |
| 2241 | 1942 |
| 2242 // Check if the function scheduled an exception. | 1943 // Check if the function scheduled an exception. |
| 2243 bind(&leave_exit_frame); | 1944 bind(&leave_exit_frame); |
| 2244 LoadRoot(r4, Heap::kTheHoleValueRootIndex); | 1945 LoadRoot(r4, Heap::kTheHoleValueRootIndex); |
| 2245 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); | 1946 mov(sh4_ip, |
| 2246 ldr(r5, MemOperand(ip)); | 1947 Operand(ExternalReference::scheduled_exception_address(isolate()))); |
| 1948 ldr(r5, MemOperand(sh4_ip)); |
| 2247 cmp(r4, r5); | 1949 cmp(r4, r5); |
| 2248 b(ne, &promote_scheduled_exception); | 1950 b(ne, &promote_scheduled_exception); |
| 2249 | 1951 |
| 2250 // LeaveExitFrame expects unwind space to be in a register. | 1952 // LeaveExitFrame expects unwind space to be in a register. |
| 2251 mov(r4, Operand(stack_space)); | 1953 mov(r4, Operand(stack_space)); |
| 2252 LeaveExitFrame(false, r4); | 1954 LeaveExitFrame(false, r4); |
| 2253 mov(pc, lr); | 1955 rts(); |
| 2254 | 1956 |
| 2255 bind(&promote_scheduled_exception); | 1957 bind(&promote_scheduled_exception); |
| 2256 TailCallExternalReference( | 1958 TailCallExternalReference( |
| 2257 ExternalReference(Runtime::kPromoteScheduledException, isolate()), | 1959 ExternalReference(Runtime::kPromoteScheduledException, isolate()), |
| 2258 0, | 1960 0, |
| 2259 1); | 1961 1); |
| 2260 | 1962 |
| 2261 // HandleScope limit has changed. Delete allocated extensions. | 1963 // HandleScope limit has changed. Delete allocated extensions. |
| 2262 bind(&delete_allocated_handles); | 1964 bind(&delete_allocated_handles); |
| 2263 str(r5, MemOperand(r7, kLimitOffset)); | 1965 // use r9 instead of r5 for making PrepareCallCFunction() happy |
| 2264 mov(r4, r0); | 1966 str(r9, MemOperand(r7, kLimitOffset)); |
| 2265 PrepareCallCFunction(1, r5); | 1967 mov(sh4_r8, r0); // preserve in calle-saved the result (r0) |
| 2266 mov(r0, Operand(ExternalReference::isolate_address())); | 1968 PrepareCallCFunction(1, r9); |
| 1969 mov(r4, Operand(ExternalReference::isolate_address())); // C-ABI paramater |
| 2267 CallCFunction( | 1970 CallCFunction( |
| 2268 ExternalReference::delete_handle_scope_extensions(isolate()), 1); | 1971 ExternalReference::delete_handle_scope_extensions(isolate()), 1); |
| 2269 mov(r0, r4); | 1972 mov(r0, sh4_r8); // restore result (r0) |
| 2270 jmp(&leave_exit_frame); | 1973 jmp(&leave_exit_frame); |
| 2271 } | 1974 } |
| 2272 | 1975 |
| 2273 | 1976 |
| 2274 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 1977 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
| 2275 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; | 1978 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; |
| 2276 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); | 1979 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); |
| 2277 } | 1980 } |
| 2278 | 1981 |
| 2279 | 1982 |
| 2280 void MacroAssembler::IllegalOperation(int num_arguments) { | 1983 void MacroAssembler::IllegalOperation(int num_arguments) { |
| 1984 RECORD_LINE(); |
| 2281 if (num_arguments > 0) { | 1985 if (num_arguments > 0) { |
| 2282 add(sp, sp, Operand(num_arguments * kPointerSize)); | 1986 add(sp, sp, Operand(num_arguments * kPointerSize)); |
| 2283 } | 1987 } |
| 2284 LoadRoot(r0, Heap::kUndefinedValueRootIndex); | 1988 LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
| 2285 } | 1989 } |
| 2286 | 1990 |
| 1991 void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
| 1992 DwVfpRegister value, |
| 1993 Register scratch) { |
| 1994 asr(scratch, smi, Operand(kSmiTagSize)); |
| 1995 dfloat(value, scratch); |
| 1996 } |
| 1997 |
| 2287 | 1998 |
| 2288 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 1999 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
| 2289 // If the hash field contains an array index pick it out. The assert checks | 2000 // If the hash field contains an array index pick it out. The assert checks |
| 2290 // that the constants for the maximum number of digits for an array index | 2001 // that the constants for the maximum number of digits for an array index |
| 2291 // cached in the hash field and the number of bits reserved for it does not | 2002 // cached in the hash field and the number of bits reserved for it does not |
| 2292 // conflict. | 2003 // conflict. |
| 2293 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 2004 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
| 2294 (1 << String::kArrayIndexValueBits)); | 2005 (1 << String::kArrayIndexValueBits)); |
| 2295 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in | 2006 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in |
| 2296 // the low kHashShift bits. | 2007 // the low kHashShift bits. |
| 2297 STATIC_ASSERT(kSmiTag == 0); | 2008 STATIC_ASSERT(kSmiTag == 0); |
| 2009 ASSERT(!hash.is(sh4_ip) && !index.is(sh4_ip)); |
| 2010 ASSERT(!hash.is(sh4_rtmp) && !index.is(sh4_rtmp)); |
| 2011 RECORD_LINE(); |
| 2298 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); | 2012 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); |
| 2299 mov(index, Operand(hash, LSL, kSmiTagSize)); | 2013 lsl(index, hash, Operand(kSmiTagSize)); |
| 2300 } | 2014 } |
| 2301 | 2015 |
| 2302 | 2016 |
| 2303 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, | |
| 2304 Register outHighReg, | |
| 2305 Register outLowReg) { | |
| 2306 // ARMv7 VFP3 instructions to implement integer to double conversion. | |
| 2307 mov(r7, Operand(inReg, ASR, kSmiTagSize)); | |
| 2308 vmov(s15, r7); | |
| 2309 vcvt_f64_s32(d7, s15); | |
| 2310 vmov(outLowReg, outHighReg, d7); | |
| 2311 } | |
| 2312 | |
| 2313 | |
| 2314 void MacroAssembler::ObjectToDoubleVFPRegister(Register object, | |
| 2315 DwVfpRegister result, | |
| 2316 Register scratch1, | |
| 2317 Register scratch2, | |
| 2318 Register heap_number_map, | |
| 2319 SwVfpRegister scratch3, | |
| 2320 Label* not_number, | |
| 2321 ObjectToDoubleFlags flags) { | |
| 2322 Label done; | |
| 2323 if ((flags & OBJECT_NOT_SMI) == 0) { | |
| 2324 Label not_smi; | |
| 2325 JumpIfNotSmi(object, ¬_smi); | |
| 2326 // Remove smi tag and convert to double. | |
| 2327 mov(scratch1, Operand(object, ASR, kSmiTagSize)); | |
| 2328 vmov(scratch3, scratch1); | |
| 2329 vcvt_f64_s32(result, scratch3); | |
| 2330 b(&done); | |
| 2331 bind(¬_smi); | |
| 2332 } | |
| 2333 // Check for heap number and load double value from it. | |
| 2334 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 2335 sub(scratch2, object, Operand(kHeapObjectTag)); | |
| 2336 cmp(scratch1, heap_number_map); | |
| 2337 b(ne, not_number); | |
| 2338 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { | |
| 2339 // If exponent is all ones the number is either a NaN or +/-Infinity. | |
| 2340 ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 2341 Sbfx(scratch1, | |
| 2342 scratch1, | |
| 2343 HeapNumber::kExponentShift, | |
| 2344 HeapNumber::kExponentBits); | |
| 2345 // All-one value sign extend to -1. | |
| 2346 cmp(scratch1, Operand(-1)); | |
| 2347 b(eq, not_number); | |
| 2348 } | |
| 2349 vldr(result, scratch2, HeapNumber::kValueOffset); | |
| 2350 bind(&done); | |
| 2351 } | |
| 2352 | |
| 2353 | |
| 2354 void MacroAssembler::SmiToDoubleVFPRegister(Register smi, | |
| 2355 DwVfpRegister value, | |
| 2356 Register scratch1, | |
| 2357 SwVfpRegister scratch2) { | |
| 2358 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); | |
| 2359 vmov(scratch2, scratch1); | |
| 2360 vcvt_f64_s32(value, scratch2); | |
| 2361 } | |
| 2362 | |
| 2363 | |
| 2364 // Tries to get a signed int32 out of a double precision floating point heap | 2017 // Tries to get a signed int32 out of a double precision floating point heap |
| 2365 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the | 2018 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the |
| 2366 // 32bits signed integer range. | 2019 // 32bits signed integer range. |
| 2367 void MacroAssembler::ConvertToInt32(Register source, | 2020 void MacroAssembler::ConvertToInt32(Register source, |
| 2368 Register dest, | 2021 Register dest, |
| 2369 Register scratch, | 2022 Register scratch, |
| 2370 Register scratch2, | 2023 Register scratch2, |
| 2371 DwVfpRegister double_scratch, | 2024 DwVfpRegister double_scratch, |
| 2372 Label *not_int32) { | 2025 Label *not_int32) { |
| 2373 if (CpuFeatures::IsSupported(VFP2)) { | 2026 ASSERT(!source.is(sh4_ip) && !dest.is(sh4_ip) && !scratch.is(sh4_ip) && |
| 2374 CpuFeatures::Scope scope(VFP2); | 2027 !scratch2.is(sh4_ip)); |
| 2028 ASSERT(!source.is(sh4_rtmp) && !dest.is(sh4_rtmp) && !scratch.is(sh4_rtmp) && |
| 2029 !scratch2.is(sh4_rtmp)); |
| 2030 ASSERT(!source.is(dest) && !source.is(scratch) && !source.is(scratch2) && |
| 2031 !dest.is(scratch) && !dest.is(scratch2) && !scratch.is(scratch2)); |
| 2032 |
| 2033 if (CpuFeatures::IsSupported(FPU)) { |
| 2375 sub(scratch, source, Operand(kHeapObjectTag)); | 2034 sub(scratch, source, Operand(kHeapObjectTag)); |
| 2376 vldr(double_scratch, scratch, HeapNumber::kValueOffset); | 2035 dldr(double_scratch, MemOperand(scratch, HeapNumber::kValueOffset)); |
| 2377 vcvt_s32_f64(double_scratch.low(), double_scratch); | 2036 idouble(dest, double_scratch); |
| 2378 vmov(dest, double_scratch.low()); | |
| 2379 // Signed vcvt instruction will saturate to the minimum (0x80000000) or | 2037 // Signed vcvt instruction will saturate to the minimum (0x80000000) or |
| 2380 // maximun (0x7fffffff) signed 32bits integer when the double is out of | 2038 // maximun (0x7fffffff) signed 32bits integer when the double is out of |
| 2381 // range. When substracting one, the minimum signed integer becomes the | 2039 // range. When substracting one, the minimum signed integer becomes the |
| 2382 // maximun signed integer. | 2040 // maximun signed integer. |
| 2383 sub(scratch, dest, Operand(1)); | 2041 sub(scratch, dest, Operand(1)); |
| 2384 cmp(scratch, Operand(LONG_MAX - 1)); | 2042 cmpge(scratch, Operand(LONG_MAX - 1)); |
| 2385 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. | 2043 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. |
| 2386 b(ge, not_int32); | 2044 bt(not_int32); |
| 2387 } else { | 2045 } else { |
| 2388 // This code is faster for doubles that are in the ranges -0x7fffffff to | 2046 // This code is faster for doubles that are in the ranges -0x7fffffff to |
| 2389 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to | 2047 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to |
| 2390 // the range of signed int32 values that are not Smis. Jumps to the label | 2048 // the range of signed int32 values that are not Smis. Jumps to the label |
| 2391 // 'not_int32' if the double isn't in the range -0x80000000.0 to | 2049 // 'not_int32' if the double isn't in the range -0x80000000.0 to |
| 2392 // 0x80000000.0 (excluding the endpoints). | 2050 // 0x80000000.0 (excluding the endpoints). |
| 2393 Label right_exponent, done; | 2051 Label right_exponent, done; |
| 2394 // Get exponent word. | 2052 // Get exponent word. |
| 2395 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); | 2053 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); |
| 2396 // Get exponent alone in scratch2. | 2054 // Get exponent alone in scratch2. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2411 // for cmp because of the overflow flag, but we know the exponent is in the | 2069 // for cmp because of the overflow flag, but we know the exponent is in the |
| 2412 // range 0-2047 so there is no overflow. | 2070 // range 0-2047 so there is no overflow. |
| 2413 int fudge_factor = 0x400; | 2071 int fudge_factor = 0x400; |
| 2414 sub(scratch2, scratch2, Operand(fudge_factor)); | 2072 sub(scratch2, scratch2, Operand(fudge_factor)); |
| 2415 cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); | 2073 cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); |
| 2416 // If we have a match of the int32-but-not-Smi exponent then skip some | 2074 // If we have a match of the int32-but-not-Smi exponent then skip some |
| 2417 // logic. | 2075 // logic. |
| 2418 b(eq, &right_exponent); | 2076 b(eq, &right_exponent); |
| 2419 // If the exponent is higher than that then go to slow case. This catches | 2077 // If the exponent is higher than that then go to slow case. This catches |
| 2420 // numbers that don't fit in a signed int32, infinities and NaNs. | 2078 // numbers that don't fit in a signed int32, infinities and NaNs. |
| 2421 b(gt, not_int32); | 2079 cmpgt(scratch2, Operand(non_smi_exponent - fudge_factor)); |
| 2080 bt(not_int32); |
| 2422 | 2081 |
| 2423 // We know the exponent is smaller than 30 (biased). If it is less than | 2082 // We know the exponent is smaller than 30 (biased). If it is less than |
| 2424 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. | 2083 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. |
| 2425 // it rounds to zero. | 2084 // it rounds to zero. |
| 2426 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; | 2085 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; |
| 2427 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); | 2086 cmpge(scratch2, Operand(zero_exponent - fudge_factor)); // for branch below |
| 2087 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor)); |
| 2428 // Dest already has a Smi zero. | 2088 // Dest already has a Smi zero. |
| 2429 b(lt, &done); | 2089 bf(&done); |
| 2430 | 2090 |
| 2431 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to | 2091 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to |
| 2432 // get how much to shift down. | 2092 // get how much to shift down. |
| 2433 rsb(dest, scratch2, Operand(30)); | 2093 rsb(dest, scratch2, Operand(30)); |
| 2434 | 2094 |
| 2435 bind(&right_exponent); | 2095 bind(&right_exponent); |
| 2436 // Get the top bits of the mantissa. | 2096 // Get the top bits of the mantissa. |
| 2437 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 2097 land(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
| 2438 // Put back the implicit 1. | 2098 // Put back the implicit 1. |
| 2439 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 2099 lor(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); |
| 2440 // Shift up the mantissa bits to take up the space the exponent used to | 2100 // Shift up the mantissa bits to take up the space the exponent used to |
| 2441 // take. We just orred in the implicit bit so that took care of one and | 2101 // take. We just orred in the implicit bit so that took care of one and |
| 2442 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | 2102 // we want to leave the sign bit 0 so we subtract 2 bits from the shift |
| 2443 // distance. | 2103 // distance. |
| 2444 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 2104 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 2445 mov(scratch2, Operand(scratch2, LSL, shift_distance)); | 2105 lsl(scratch2, scratch2, Operand(shift_distance)); |
| 2446 // Put sign in zero flag. | 2106 // Put sign in zero flag. |
| 2447 tst(scratch, Operand(HeapNumber::kSignMask)); | 2107 tst(scratch, Operand(HeapNumber::kSignMask)); |
| 2448 // Get the second half of the double. For some exponents we don't | 2108 // Get the second half of the double. For some exponents we don't |
| 2449 // actually need this because the bits get shifted out again, but | 2109 // actually need this because the bits get shifted out again, but |
| 2450 // it's probably slower to test than just to do it. | 2110 // it's probably slower to test than just to do it. |
| 2451 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 2111 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
| 2452 // Shift down 22 bits to get the last 10 bits. | 2112 // Shift down 22 bits to get the last 10 bits. |
| 2453 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 2113 lsr(scratch, scratch, Operand(32 - shift_distance)); |
| 2114 lor(scratch, scratch2, scratch); |
| 2454 // Move down according to the exponent. | 2115 // Move down according to the exponent. |
| 2455 mov(dest, Operand(scratch, LSR, dest)); | 2116 lsr(dest, scratch, dest); |
| 2456 // Fix sign if sign bit was set. | 2117 // Fix sign if sign bit was set. |
| 2457 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 2118 rsb(dest, dest, Operand(0, RelocInfo::NONE), ne); |
| 2458 bind(&done); | 2119 bind(&done); |
| 2459 } | 2120 } |
| 2460 } | 2121 } |
| 2461 | 2122 |
| 2462 | 2123 |
| 2463 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, | 2124 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, |
| 2464 Register result, | 2125 Register result, |
| 2465 DwVfpRegister double_input, | 2126 DwVfpRegister double_input, |
| 2466 Register scratch, | 2127 Register scratch, |
| 2467 DwVfpRegister double_scratch, | |
| 2468 CheckForInexactConversion check_inexact) { | 2128 CheckForInexactConversion check_inexact) { |
| 2469 ASSERT(!result.is(scratch)); | 2129 ASSERT(rounding_mode == kRoundToZero); |
| 2470 ASSERT(!double_input.is(double_scratch)); | 2130 int32_t check_inexact_conversion = |
| 2131 (check_inexact == kCheckForInexactConversion) ? kFPUInexactExceptionBit : 0; |
| 2471 | 2132 |
| 2472 ASSERT(CpuFeatures::IsSupported(VFP2)); | 2133 idouble(result, double_input, scratch); |
| 2473 CpuFeatures::Scope scope(VFP2); | |
| 2474 Register prev_fpscr = result; | |
| 2475 Label done; | |
| 2476 | 2134 |
| 2477 // Test for values that can be exactly represented as a signed 32-bit integer. | 2135 // Check for FPU exceptions |
| 2478 vcvt_s32_f64(double_scratch.low(), double_input); | 2136 tst(scratch, Operand(kFPUExceptionMask | check_inexact_conversion)); |
| 2479 vmov(result, double_scratch.low()); | |
| 2480 vcvt_f64_s32(double_scratch, double_scratch.low()); | |
| 2481 VFPCompareAndSetFlags(double_input, double_scratch); | |
| 2482 b(eq, &done); | |
| 2483 | |
| 2484 // Convert to integer, respecting rounding mode. | |
| 2485 int32_t check_inexact_conversion = | |
| 2486 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; | |
| 2487 | |
| 2488 // Set custom FPCSR: | |
| 2489 // - Set rounding mode. | |
| 2490 // - Clear vfp cumulative exception flags. | |
| 2491 // - Make sure Flush-to-zero mode control bit is unset. | |
| 2492 vmrs(prev_fpscr); | |
| 2493 bic(scratch, | |
| 2494 prev_fpscr, | |
| 2495 Operand(kVFPExceptionMask | | |
| 2496 check_inexact_conversion | | |
| 2497 kVFPRoundingModeMask | | |
| 2498 kVFPFlushToZeroMask)); | |
| 2499 // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. | |
| 2500 if (rounding_mode != kRoundToNearest) { | |
| 2501 orr(scratch, scratch, Operand(rounding_mode)); | |
| 2502 } | |
| 2503 vmsr(scratch); | |
| 2504 | |
| 2505 // Convert the argument to an integer. | |
| 2506 vcvt_s32_f64(double_scratch.low(), | |
| 2507 double_input, | |
| 2508 (rounding_mode == kRoundToZero) ? kDefaultRoundToZero | |
| 2509 : kFPSCRRounding); | |
| 2510 | |
| 2511 // Retrieve FPSCR. | |
| 2512 vmrs(scratch); | |
| 2513 // Restore FPSCR. | |
| 2514 vmsr(prev_fpscr); | |
| 2515 // Move the converted value into the result register. | |
| 2516 vmov(result, double_scratch.low()); | |
| 2517 // Check for vfp exceptions. | |
| 2518 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); | |
| 2519 | |
| 2520 bind(&done); | |
| 2521 } | 2137 } |
| 2522 | 2138 |
| 2523 | 2139 |
| 2524 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, | 2140 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, |
| 2525 Register input_high, | 2141 Register input_high, |
| 2526 Register input_low, | 2142 Register input_low, |
| 2527 Register scratch) { | 2143 Register scratch) { |
| 2528 Label done, normal_exponent, restore_sign; | 2144 Label done, normal_exponent, restore_sign; |
| 2529 | 2145 |
| 2530 // Extract the biased exponent in result. | 2146 // Extract the biased exponent in result. |
| 2531 Ubfx(result, | 2147 Ubfx(result, |
| 2532 input_high, | 2148 input_high, |
| 2533 HeapNumber::kExponentShift, | 2149 HeapNumber::kExponentShift, |
| 2534 HeapNumber::kExponentBits); | 2150 HeapNumber::kExponentBits); |
| 2535 | 2151 |
| 2536 // Check for Infinity and NaNs, which should return 0. | 2152 // Check for Infinity and NaNs, which should return 0. |
| 2537 cmp(result, Operand(HeapNumber::kExponentMask)); | 2153 cmp(result, Operand(HeapNumber::kExponentMask)); |
| 2538 mov(result, Operand(0), LeaveCC, eq); | 2154 mov(result, Operand(0), eq); |
| 2539 b(eq, &done); | 2155 b(eq, &done); |
| 2540 | 2156 |
| 2541 // Express exponent as delta to (number of mantissa bits + 31). | 2157 // Express exponent as delta to (number of mantissa bits + 31). |
| 2542 sub(result, | 2158 sub(result, |
| 2543 result, | 2159 result, |
| 2544 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), | 2160 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); |
| 2545 SetCC); | 2161 cmpgt(result, Operand(0)); |
| 2546 | 2162 |
| 2547 // If the delta is strictly positive, all bits would be shifted away, | 2163 // If the delta is strictly positive, all bits would be shifted away, |
| 2548 // which means that we can return 0. | 2164 // which means that we can return 0. |
| 2549 b(le, &normal_exponent); | 2165 b(f, &normal_exponent); |
| 2550 mov(result, Operand(0)); | 2166 mov(result, Operand(0)); |
| 2551 b(&done); | 2167 b(&done); |
| 2552 | 2168 |
| 2553 bind(&normal_exponent); | 2169 bind(&normal_exponent); |
| 2554 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; | 2170 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 2555 // Calculate shift. | 2171 // Calculate shift. |
| 2556 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); | 2172 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits)); |
| 2557 | 2173 |
| 2558 // Save the sign. | 2174 // Save the sign. |
| 2559 Register sign = result; | 2175 Register sign = result; |
| 2560 result = no_reg; | 2176 result = no_reg; |
| 2561 and_(sign, input_high, Operand(HeapNumber::kSignMask)); | 2177 land(sign, input_high, Operand(HeapNumber::kSignMask)); |
| 2562 | 2178 |
| 2563 // Set the implicit 1 before the mantissa part in input_high. | 2179 // Set the implicit 1 before the mantissa part in input_high. |
| 2564 orr(input_high, | 2180 orr(input_high, |
| 2565 input_high, | 2181 input_high, |
| 2566 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | 2182 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
| 2567 // Shift the mantissa bits to the correct position. | 2183 // Shift the mantissa bits to the correct position. |
| 2568 // We don't need to clear non-mantissa bits as they will be shifted away. | 2184 // We don't need to clear non-mantissa bits as they will be shifted away. |
| 2569 // If they weren't, it would mean that the answer is in the 32bit range. | 2185 // If they weren't, it would mean that the answer is in the 32bit range. |
| 2570 mov(input_high, Operand(input_high, LSL, scratch)); | 2186 lsl(input_high, input_high, scratch); |
| 2571 | 2187 |
| 2572 // Replace the shifted bits with bits from the lower mantissa word. | 2188 // Replace the shifted bits with bits from the lower mantissa word. |
| 2573 Label pos_shift, shift_done; | 2189 Label pos_shift, shift_done; |
| 2574 rsb(scratch, scratch, Operand(32), SetCC); | 2190 rsb(scratch, scratch, Operand(32)); |
| 2575 b(&pos_shift, ge); | 2191 cmpge(scratch, Operand(0)); |
| 2192 bt(&pos_shift); |
| 2576 | 2193 |
| 2577 // Negate scratch. | 2194 // Negate scratch. |
| 2578 rsb(scratch, scratch, Operand(0)); | 2195 rsb(scratch, scratch, Operand(0)); |
| 2579 mov(input_low, Operand(input_low, LSL, scratch)); | 2196 lsl(input_low, input_low, scratch); |
| 2580 b(&shift_done); | 2197 b(&shift_done); |
| 2581 | 2198 |
| 2582 bind(&pos_shift); | 2199 bind(&pos_shift); |
| 2583 mov(input_low, Operand(input_low, LSR, scratch)); | 2200 lsr(input_low, input_low, scratch); |
| 2584 | 2201 |
| 2585 bind(&shift_done); | 2202 bind(&shift_done); |
| 2586 orr(input_high, input_high, Operand(input_low)); | 2203 orr(input_high, input_high, input_low); |
| 2587 // Restore sign if necessary. | 2204 // Restore sign if necessary. |
| 2588 cmp(sign, Operand(0)); | 2205 cmp(sign, Operand(0)); |
| 2589 result = sign; | 2206 result = sign; |
| 2590 sign = no_reg; | 2207 sign = no_reg; |
| 2591 rsb(result, input_high, Operand(0), LeaveCC, ne); | 2208 rsb(result, input_high, Operand(0)); |
| 2592 mov(result, input_high, LeaveCC, eq); | 2209 mov(result, input_high, eq); |
| 2593 bind(&done); | 2210 bind(&done); |
| 2594 } | 2211 } |
| 2595 | 2212 |
| 2596 | 2213 |
| 2597 void MacroAssembler::EmitECMATruncate(Register result, | 2214 void MacroAssembler::EmitECMATruncate(Register result, |
| 2598 DwVfpRegister double_input, | 2215 DwVfpRegister double_input, |
| 2599 SwVfpRegister single_scratch, | 2216 SwVfpRegister single_scratch, |
| 2600 Register scratch, | 2217 Register scratch, |
| 2601 Register input_high, | 2218 Register input_high, |
| 2602 Register input_low) { | 2219 Register input_low) { |
| 2603 CpuFeatures::Scope scope(VFP2); | 2220 ASSERT(CpuFeatures::IsSupported(FPU)); |
| 2604 ASSERT(!input_high.is(result)); | 2221 ASSERT(!input_high.is(result)); |
| 2605 ASSERT(!input_low.is(result)); | 2222 ASSERT(!input_low.is(result)); |
| 2606 ASSERT(!input_low.is(input_high)); | 2223 ASSERT(!input_low.is(input_high)); |
| 2607 ASSERT(!scratch.is(result) && | 2224 ASSERT(!scratch.is(result) && |
| 2608 !scratch.is(input_high) && | 2225 !scratch.is(input_high) && |
| 2609 !scratch.is(input_low)); | 2226 !scratch.is(input_low)); |
| 2610 ASSERT(!single_scratch.is(double_input.low()) && | 2227 ASSERT(!single_scratch.is(double_input.low()) && |
| 2611 !single_scratch.is(double_input.high())); | 2228 !single_scratch.is(double_input.high())); |
| 2612 | 2229 |
| 2613 Label done; | 2230 Label done; |
| 2614 | 2231 |
| 2615 // Clear cumulative exception flags. | 2232 // Do the conversion |
| 2616 ClearFPSCRBits(kVFPExceptionMask, scratch); | 2233 idouble(result, double_input); |
| 2617 // Try a conversion to a signed integer. | 2234 // Retrieve the FPSCR. |
| 2618 vcvt_s32_f64(single_scratch, double_input); | 2235 str_fpscr(scratch); |
| 2619 vmov(result, single_scratch); | |
| 2620 // Retrieve he FPSCR. | |
| 2621 vmrs(scratch); | |
| 2622 // Check for overflow and NaNs. | 2236 // Check for overflow and NaNs. |
| 2623 tst(scratch, Operand(kVFPOverflowExceptionBit | | 2237 tst(scratch, Operand(kFPUOverflowExceptionBit | |
| 2624 kVFPUnderflowExceptionBit | | 2238 kFPUUnderflowExceptionBit | |
| 2625 kVFPInvalidOpExceptionBit)); | 2239 kFPUInvalidExceptionBit | |
| 2240 kFPUInexactExceptionBit)); |
| 2626 // If we had no exceptions we are done. | 2241 // If we had no exceptions we are done. |
| 2627 b(eq, &done); | 2242 b(eq, &done); |
| 2628 | 2243 |
| 2629 // Load the double value and perform a manual truncation. | 2244 // Load the double value and perform a manual truncation. |
| 2630 vmov(input_low, input_high, double_input); | 2245 movd(input_low, input_high, double_input); |
| 2631 EmitOutOfInt32RangeTruncate(result, | 2246 EmitOutOfInt32RangeTruncate(result, |
| 2632 input_high, | 2247 input_high, |
| 2633 input_low, | 2248 input_low, |
| 2634 scratch); | 2249 scratch); |
| 2635 bind(&done); | 2250 bind(&done); |
| 2636 } | 2251 } |
| 2637 | 2252 |
| 2638 | 2253 |
| 2639 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2254 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 2640 Register src, | 2255 Register src, |
| 2641 int num_least_bits) { | 2256 int num_least_bits) { |
| 2642 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2257 Ubfx(dst, src, kSmiTagSize, num_least_bits); |
| 2643 ubfx(dst, src, kSmiTagSize, num_least_bits); | |
| 2644 } else { | |
| 2645 mov(dst, Operand(src, ASR, kSmiTagSize)); | |
| 2646 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | |
| 2647 } | |
| 2648 } | 2258 } |
| 2649 | 2259 |
| 2650 | 2260 |
| 2651 void MacroAssembler::GetLeastBitsFromInt32(Register dst, | 2261 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
| 2652 Register src, | 2262 Register src, |
| 2653 int num_least_bits) { | 2263 int num_least_bits) { |
| 2654 and_(dst, src, Operand((1 << num_least_bits) - 1)); | 2264 ASSERT(!dst.is(sh4_rtmp) && !src.is(sh4_rtmp)); |
| 2265 land(dst, src, Operand((1 << num_least_bits) - 1)); |
| 2655 } | 2266 } |
| 2656 | 2267 |
| 2657 | 2268 |
| 2658 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 2269 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
| 2659 int num_arguments) { | 2270 int num_arguments) { |
| 2271 // No register conventions on entry. |
| 2660 // All parameters are on the stack. r0 has the return value after call. | 2272 // All parameters are on the stack. r0 has the return value after call. |
| 2273 #ifdef DEBUG |
| 2274 // Clobber parameter registers on entry. |
| 2275 Dead(r0, r1, r2, r3); |
| 2276 Dead(r4, r5, r6, r7); |
| 2277 #endif |
| 2661 | 2278 |
| 2662 // If the expected number of arguments of the runtime function is | 2279 // If the expected number of arguments of the runtime function is |
| 2663 // constant, we check that the actual number of arguments match the | 2280 // constant, we check that the actual number of arguments match the |
| 2664 // expectation. | 2281 // expectation. |
| 2282 RECORD_LINE(); |
| 2665 if (f->nargs >= 0 && f->nargs != num_arguments) { | 2283 if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 2666 IllegalOperation(num_arguments); | 2284 IllegalOperation(num_arguments); |
| 2667 return; | 2285 return; |
| 2668 } | 2286 } |
| 2669 | 2287 |
| 2670 // TODO(1236192): Most runtime routines don't need the number of | 2288 // TODO(1236192): Most runtime routines don't need the number of |
| 2671 // arguments passed in because it is constant. At some point we | 2289 // arguments passed in because it is constant. At some point we |
| 2672 // should remove this need and make the runtime routine entry code | 2290 // should remove this need and make the runtime routine entry code |
| 2673 // smarter. | 2291 // smarter. |
| 2674 mov(r0, Operand(num_arguments)); | 2292 mov(r0, Operand(num_arguments)); |
| 2675 mov(r1, Operand(ExternalReference(f, isolate()))); | 2293 mov(r1, Operand(ExternalReference(f, isolate()))); |
| 2676 CEntryStub stub(1); | 2294 CEntryStub stub(1); |
| 2677 CallStub(&stub); | 2295 CallStub(&stub); |
| 2678 } | 2296 } |
| 2679 | 2297 |
| 2680 | 2298 |
| 2681 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { | 2299 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
| 2300 RECORD_LINE(); |
| 2682 CallRuntime(Runtime::FunctionForId(fid), num_arguments); | 2301 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
| 2683 } | 2302 } |
| 2684 | 2303 |
| 2685 | 2304 |
| 2686 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { | |
| 2687 const Runtime::Function* function = Runtime::FunctionForId(id); | |
| 2688 mov(r0, Operand(function->nargs)); | |
| 2689 mov(r1, Operand(ExternalReference(function, isolate()))); | |
| 2690 CEntryStub stub(1, kSaveFPRegs); | |
| 2691 CallStub(&stub); | |
| 2692 } | |
| 2693 | |
| 2694 | |
| 2695 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 2305 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
| 2696 int num_arguments) { | 2306 int num_arguments) { |
| 2697 mov(r0, Operand(num_arguments)); | 2307 mov(r0, Operand(num_arguments)); |
| 2698 mov(r1, Operand(ext)); | 2308 mov(r1, Operand(ext)); |
| 2699 | 2309 |
| 2700 CEntryStub stub(1); | 2310 CEntryStub stub(1); |
| 2701 CallStub(&stub); | 2311 CallStub(&stub); |
| 2702 } | 2312 } |
| 2703 | 2313 |
| 2704 | 2314 |
| 2705 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | 2315 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
| 2706 int num_arguments, | 2316 int num_arguments, |
| 2707 int result_size) { | 2317 int result_size) { |
| 2708 // TODO(1236192): Most runtime routines don't need the number of | 2318 // TODO(1236192): Most runtime routines don't need the number of |
| 2709 // arguments passed in because it is constant. At some point we | 2319 // arguments passed in because it is constant. At some point we |
| 2710 // should remove this need and make the runtime routine entry code | 2320 // should remove this need and make the runtime routine entry code |
| 2711 // smarter. | 2321 // smarter. |
| 2322 RECORD_LINE(); |
| 2712 mov(r0, Operand(num_arguments)); | 2323 mov(r0, Operand(num_arguments)); |
| 2713 JumpToExternalReference(ext); | 2324 JumpToExternalReference(ext); |
| 2714 } | 2325 } |
| 2715 | 2326 |
| 2716 | 2327 |
| 2717 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | 2328 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
| 2718 int num_arguments, | 2329 int num_arguments, |
| 2719 int result_size) { | 2330 int result_size) { |
| 2331 RECORD_LINE(); |
| 2720 TailCallExternalReference(ExternalReference(fid, isolate()), | 2332 TailCallExternalReference(ExternalReference(fid, isolate()), |
| 2721 num_arguments, | 2333 num_arguments, |
| 2722 result_size); | 2334 result_size); |
| 2723 } | 2335 } |
| 2724 | 2336 |
| 2725 | 2337 |
| 2726 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 2338 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
| 2727 #if defined(__thumb__) | 2339 RECORD_LINE(); |
| 2728 // Thumb mode builtin. | |
| 2729 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); | |
| 2730 #endif | |
| 2731 mov(r1, Operand(builtin)); | 2340 mov(r1, Operand(builtin)); |
| 2732 CEntryStub stub(1); | 2341 CEntryStub stub(1); |
| 2733 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2342 RECORD_LINE(); |
| 2343 jmp(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 2734 } | 2344 } |
| 2735 | 2345 |
| 2736 | 2346 |
| 2737 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 2347 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 2738 InvokeFlag flag, | 2348 InvokeFlag flag, |
| 2739 const CallWrapper& call_wrapper) { | 2349 const CallWrapper& call_wrapper) { |
| 2740 // You can't call a builtin without a valid frame. | 2350 // You can't call a builtin without a valid frame. |
| 2741 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2351 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 2742 | 2352 |
| 2353 // No register conventions on entry. |
| 2354 // All parameters are on stack. |
| 2355 // Return value in r0 after call. |
| 2356 #ifdef DEBUG |
| 2357 // Clobber parameter registers on entry. |
| 2358 Dead(r0, r1, r2, r3); |
| 2359 Dead(r4, r5, r6, r7); |
| 2360 #endif |
| 2361 |
| 2362 RECORD_LINE(); |
| 2743 GetBuiltinEntry(r2, id); | 2363 GetBuiltinEntry(r2, id); |
| 2744 if (flag == CALL_FUNCTION) { | 2364 if (flag == CALL_FUNCTION) { |
| 2745 call_wrapper.BeforeCall(CallSize(r2)); | 2365 RECORD_LINE(); |
| 2366 call_wrapper.BeforeCall(2 * kInstrSize); |
| 2746 SetCallKind(r5, CALL_AS_METHOD); | 2367 SetCallKind(r5, CALL_AS_METHOD); |
| 2747 Call(r2); | 2368 jsr(r2); |
| 2748 call_wrapper.AfterCall(); | 2369 call_wrapper.AfterCall(); |
| 2749 } else { | 2370 } else { |
| 2750 ASSERT(flag == JUMP_FUNCTION); | 2371 ASSERT(flag == JUMP_FUNCTION); |
| 2372 RECORD_LINE(); |
| 2751 SetCallKind(r5, CALL_AS_METHOD); | 2373 SetCallKind(r5, CALL_AS_METHOD); |
| 2752 Jump(r2); | 2374 jmp(r2); |
| 2753 } | 2375 } |
| 2754 } | 2376 } |
| 2755 | 2377 |
| 2756 | 2378 |
| 2757 void MacroAssembler::GetBuiltinFunction(Register target, | 2379 void MacroAssembler::GetBuiltinFunction(Register target, |
| 2758 Builtins::JavaScript id) { | 2380 Builtins::JavaScript id) { |
| 2381 ASSERT(!target.is(sh4_ip)); |
| 2382 ASSERT(!target.is(sh4_rtmp)); |
| 2383 RECORD_LINE(); |
| 2759 // Load the builtins object into target register. | 2384 // Load the builtins object into target register. |
| 2760 ldr(target, | 2385 ldr(target, |
| 2761 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2386 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 2762 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | 2387 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
| 2763 // Load the JavaScript builtin function from the builtins object. | 2388 // Load the JavaScript builtin function from the builtins object. |
| 2764 ldr(target, FieldMemOperand(target, | 2389 ldr(target, FieldMemOperand(target, |
| 2765 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 2390 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
| 2766 } | 2391 } |
| 2767 | 2392 |
| 2768 | 2393 |
| 2769 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 2394 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
| 2395 // FIXME(stm): why r1 ?? |
| 2770 ASSERT(!target.is(r1)); | 2396 ASSERT(!target.is(r1)); |
| 2397 ASSERT(!target.is(sh4_rtmp)); |
| 2398 ASSERT(!target.is(sh4_ip)); |
| 2399 RECORD_LINE(); |
| 2771 GetBuiltinFunction(r1, id); | 2400 GetBuiltinFunction(r1, id); |
| 2401 RECORD_LINE(); |
| 2772 // Load the code entry point from the builtins object. | 2402 // Load the code entry point from the builtins object. |
| 2773 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2403 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 2774 } | 2404 } |
| 2775 | 2405 |
| 2776 | 2406 |
| 2777 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 2407 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
| 2778 Register scratch1, Register scratch2) { | 2408 Register scratch1, Register scratch2) { |
| 2409 RECORD_LINE(); |
| 2410 ASSERT(!scratch1.is(scratch2)); |
| 2411 ASSERT(!scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 2412 ASSERT(!scratch1.is(sh4_ip) && !scratch2.is(sh4_ip)); |
| 2779 if (FLAG_native_code_counters && counter->Enabled()) { | 2413 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2414 RECORD_LINE(); |
| 2780 mov(scratch1, Operand(value)); | 2415 mov(scratch1, Operand(value)); |
| 2781 mov(scratch2, Operand(ExternalReference(counter))); | 2416 mov(scratch2, Operand(ExternalReference(counter))); |
| 2782 str(scratch1, MemOperand(scratch2)); | 2417 str(scratch1, MemOperand(scratch2)); |
| 2783 } | 2418 } |
| 2784 } | 2419 } |
| 2785 | 2420 |
| 2786 | 2421 |
| 2787 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 2422 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
| 2788 Register scratch1, Register scratch2) { | 2423 Register scratch1, Register scratch2) { |
| 2789 ASSERT(value > 0); | 2424 ASSERT(value > 0); |
| 2425 ASSERT(!scratch1.is(scratch2)); |
| 2426 ASSERT(!scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 2427 ASSERT(!scratch1.is(sh4_ip) && !scratch2.is(sh4_ip)); |
| 2428 RECORD_LINE(); |
| 2790 if (FLAG_native_code_counters && counter->Enabled()) { | 2429 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2430 RECORD_LINE(); |
| 2791 mov(scratch2, Operand(ExternalReference(counter))); | 2431 mov(scratch2, Operand(ExternalReference(counter))); |
| 2792 ldr(scratch1, MemOperand(scratch2)); | 2432 ldr(scratch1, MemOperand(scratch2)); |
| 2793 add(scratch1, scratch1, Operand(value)); | 2433 add(scratch1, scratch1, Operand(value)); |
| 2794 str(scratch1, MemOperand(scratch2)); | 2434 str(scratch1, MemOperand(scratch2)); |
| 2795 } | 2435 } |
| 2796 } | 2436 } |
| 2797 | 2437 |
| 2798 | 2438 |
| 2799 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 2439 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
| 2800 Register scratch1, Register scratch2) { | 2440 Register scratch1, Register scratch2) { |
| 2801 ASSERT(value > 0); | 2441 ASSERT(value > 0); |
| 2442 ASSERT(!scratch1.is(scratch2)); |
| 2443 ASSERT(!scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 2444 ASSERT(!scratch1.is(sh4_ip) && !scratch2.is(sh4_ip)); |
| 2445 |
| 2446 RECORD_LINE(); |
| 2802 if (FLAG_native_code_counters && counter->Enabled()) { | 2447 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2448 RECORD_LINE(); |
| 2803 mov(scratch2, Operand(ExternalReference(counter))); | 2449 mov(scratch2, Operand(ExternalReference(counter))); |
| 2804 ldr(scratch1, MemOperand(scratch2)); | 2450 ldr(scratch1, MemOperand(scratch2)); |
| 2805 sub(scratch1, scratch1, Operand(value)); | 2451 sub(scratch1, scratch1, Operand(value)); |
| 2806 str(scratch1, MemOperand(scratch2)); | 2452 str(scratch1, MemOperand(scratch2)); |
| 2807 } | 2453 } |
| 2808 } | 2454 } |
| 2809 | 2455 |
| 2810 | 2456 |
| 2811 void MacroAssembler::Assert(Condition cond, const char* msg) { | 2457 void MacroAssembler::Assert(Condition cond, const char* msg) { |
| 2812 if (emit_debug_code()) | 2458 if (emit_debug_code()) |
| 2813 Check(cond, msg); | 2459 Check(cond, msg); |
| 2814 } | 2460 } |
| 2815 | 2461 |
| 2816 | 2462 |
| 2817 void MacroAssembler::AssertRegisterIsRoot(Register reg, | 2463 void MacroAssembler::AssertRegisterIsRoot(Register reg, Register scratch, |
| 2818 Heap::RootListIndex index) { | 2464 Heap::RootListIndex index) { |
| 2465 // TODO(STM): scratch need or ip ok ? |
| 2466 ASSERT(!reg.is(scratch)); |
| 2819 if (emit_debug_code()) { | 2467 if (emit_debug_code()) { |
| 2820 LoadRoot(ip, index); | 2468 LoadRoot(scratch, index); |
| 2821 cmp(reg, ip); | 2469 cmp(reg, scratch); |
| 2822 Check(eq, "Register did not match expected root"); | 2470 Check(eq, "Register did not match expected root"); |
| 2823 } | 2471 } |
| 2824 } | 2472 } |
| 2825 | 2473 |
| 2826 | 2474 |
| 2827 void MacroAssembler::AssertFastElements(Register elements) { | 2475 void MacroAssembler::AssertFastElements(Register elements) { |
| 2476 RECORD_LINE(); |
| 2828 if (emit_debug_code()) { | 2477 if (emit_debug_code()) { |
| 2829 ASSERT(!elements.is(ip)); | 2478 ASSERT(!elements.is(sh4_rtmp)); |
| 2479 ASSERT(!elements.is(sh4_ip)); |
| 2830 Label ok; | 2480 Label ok; |
| 2481 RECORD_LINE(); |
| 2831 push(elements); | 2482 push(elements); |
| 2832 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 2483 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 2833 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 2484 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
| 2834 cmp(elements, ip); | 2485 cmp(elements, ip); |
| 2835 b(eq, &ok); | 2486 b(eq, &ok); |
| 2487 RECORD_LINE(); |
| 2836 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); | 2488 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); |
| 2837 cmp(elements, ip); | 2489 cmp(elements, ip); |
| 2838 b(eq, &ok); | 2490 b(eq, &ok); |
| 2491 RECORD_LINE(); |
| 2839 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); | 2492 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); |
| 2840 cmp(elements, ip); | 2493 cmp(elements, ip); |
| 2841 b(eq, &ok); | 2494 b(eq, &ok); |
| 2495 RECORD_LINE(); |
| 2842 Abort("JSObject with fast elements map has slow elements"); | 2496 Abort("JSObject with fast elements map has slow elements"); |
| 2843 bind(&ok); | 2497 bind(&ok); |
| 2498 RECORD_LINE(); |
| 2844 pop(elements); | 2499 pop(elements); |
| 2845 } | 2500 } |
| 2846 } | 2501 } |
| 2847 | 2502 |
| 2848 | 2503 |
| 2849 void MacroAssembler::Check(Condition cond, const char* msg) { | 2504 void MacroAssembler::Check(Condition cond, const char* msg) { |
| 2850 Label L; | 2505 Label L; |
| 2506 RECORD_LINE(); |
| 2851 b(cond, &L); | 2507 b(cond, &L); |
| 2852 Abort(msg); | 2508 Abort(msg); |
| 2853 // will not return here | 2509 // will not return here |
| 2854 bind(&L); | 2510 bind(&L); |
| 2855 } | 2511 } |
| 2856 | 2512 |
| 2513 void MacroAssembler::DebugPrint(Register obj) { |
| 2514 RECORD_LINE(); |
| 2515 push(obj); |
| 2516 CallRuntime(Runtime::kDebugPrint, 1); |
| 2517 } |
| 2857 | 2518 |
| 2858 void MacroAssembler::Abort(const char* msg) { | 2519 void MacroAssembler::Abort(const char* msg) { |
| 2859 Label abort_start; | 2520 Label abort_start; |
| 2860 bind(&abort_start); | 2521 bind(&abort_start); |
| 2522 RECORD_LINE(); |
| 2861 // We want to pass the msg string like a smi to avoid GC | 2523 // We want to pass the msg string like a smi to avoid GC |
| 2862 // problems, however msg is not guaranteed to be aligned | 2524 // problems, however msg is not guaranteed to be aligned |
| 2863 // properly. Instead, we pass an aligned pointer that is | 2525 // properly. Instead, we pass an aligned pointer that is |
| 2864 // a proper v8 smi, but also pass the alignment difference | 2526 // a proper v8 smi, but also pass the alignment difference |
| 2865 // from the real pointer as a smi. | 2527 // from the real pointer as a smi. |
| 2866 intptr_t p1 = reinterpret_cast<intptr_t>(msg); | 2528 intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
| 2867 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; | 2529 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
| 2868 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); | 2530 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
| 2869 #ifdef DEBUG | 2531 #ifdef DEBUG |
| 2870 if (msg != NULL) { | 2532 if (msg != NULL) { |
| 2871 RecordComment("Abort message: "); | 2533 RecordComment("Abort message: "); |
| 2872 RecordComment(msg); | 2534 RecordComment(msg); |
| 2873 } | 2535 } |
| 2874 #endif | 2536 #endif |
| 2875 | |
| 2876 mov(r0, Operand(p0)); | 2537 mov(r0, Operand(p0)); |
| 2877 push(r0); | 2538 push(r0); |
| 2878 mov(r0, Operand(Smi::FromInt(p1 - p0))); | 2539 mov(r0, Operand(Smi::FromInt(p1 - p0))); |
| 2879 push(r0); | 2540 push(r0); |
| 2880 // Disable stub call restrictions to always allow calls to abort. | 2541 // Disable stub call restrictions to always allow calls to abort. |
| 2881 if (!has_frame_) { | 2542 if (!has_frame_) { |
| 2882 // We don't actually want to generate a pile of code for this, so just | 2543 // We don't actually want to generate a pile of code for this, so just |
| 2883 // claim there is a stack frame, without generating one. | 2544 // claim there is a stack frame, without generating one. |
| 2884 FrameScope scope(this, StackFrame::NONE); | 2545 FrameScope scope(this, StackFrame::NONE); |
| 2885 CallRuntime(Runtime::kAbort, 2); | 2546 CallRuntime(Runtime::kAbort, 2); |
| 2886 } else { | 2547 } else { |
| 2887 CallRuntime(Runtime::kAbort, 2); | 2548 CallRuntime(Runtime::kAbort, 2); |
| 2888 } | 2549 } |
| 2889 // will not return here | 2550 // will not return here |
| 2890 if (is_const_pool_blocked()) { | 2551 // TODO(STM): implement this when const pool manager is active |
| 2891 // If the calling code cares about the exact number of | 2552 // if (is_const_pool_blocked()) { |
| 2892 // instructions generated, we insert padding here to keep the size | 2553 // } |
| 2893 // of the Abort macro constant. | |
| 2894 static const int kExpectedAbortInstructions = 10; | |
| 2895 int abort_instructions = InstructionsGeneratedSince(&abort_start); | |
| 2896 ASSERT(abort_instructions <= kExpectedAbortInstructions); | |
| 2897 while (abort_instructions++ < kExpectedAbortInstructions) { | |
| 2898 nop(); | |
| 2899 } | |
| 2900 } | |
| 2901 } | 2554 } |
| 2902 | 2555 |
| 2903 | 2556 |
| 2557 // Clobbers: sh4_rtmp, dst |
| 2558 // live-in: cp |
| 2559 // live-out: cp, dst |
| 2904 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2560 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| 2561 ASSERT(!dst.is(sh4_rtmp)); |
| 2562 RECORD_LINE(); |
| 2905 if (context_chain_length > 0) { | 2563 if (context_chain_length > 0) { |
| 2564 RECORD_LINE(); |
| 2906 // Move up the chain of contexts to the context containing the slot. | 2565 // Move up the chain of contexts to the context containing the slot. |
| 2907 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2566 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 2908 for (int i = 1; i < context_chain_length; i++) { | 2567 for (int i = 1; i < context_chain_length; i++) { |
| 2568 RECORD_LINE(); |
| 2909 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2569 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 2910 } | 2570 } |
| 2911 } else { | 2571 } else { |
| 2572 RECORD_LINE(); |
| 2912 // Slot is in the current function context. Move it into the | 2573 // Slot is in the current function context. Move it into the |
| 2913 // destination register in case we store into it (the write barrier | 2574 // destination register in case we store into it (the write barrier |
| 2914 // cannot be allowed to destroy the context in esi). | 2575 // cannot be allowed to destroy the context in esi). |
| 2915 mov(dst, cp); | 2576 mov(dst, cp); |
| 2916 } | 2577 } |
| 2917 } | 2578 } |
| 2918 | 2579 |
| 2919 | 2580 |
| 2920 void MacroAssembler::LoadTransitionedArrayMapConditional( | 2581 void MacroAssembler::LoadTransitionedArrayMapConditional( |
| 2921 ElementsKind expected_kind, | 2582 ElementsKind expected_kind, |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2978 ldr(function, FieldMemOperand(function, | 2639 ldr(function, FieldMemOperand(function, |
| 2979 GlobalObject::kNativeContextOffset)); | 2640 GlobalObject::kNativeContextOffset)); |
| 2980 // Load the function from the native context. | 2641 // Load the function from the native context. |
| 2981 ldr(function, MemOperand(function, Context::SlotOffset(index))); | 2642 ldr(function, MemOperand(function, Context::SlotOffset(index))); |
| 2982 } | 2643 } |
| 2983 | 2644 |
| 2984 | 2645 |
| 2985 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 2646 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
| 2986 Register map, | 2647 Register map, |
| 2987 Register scratch) { | 2648 Register scratch) { |
| 2649 ASSERT(!scratch.is(sh4_ip)); |
| 2650 ASSERT(!scratch.is(sh4_rtmp)); |
| 2988 // Load the initial map. The global functions all have initial maps. | 2651 // Load the initial map. The global functions all have initial maps. |
| 2989 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2652 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2990 if (emit_debug_code()) { | 2653 if (emit_debug_code()) { |
| 2991 Label ok, fail; | 2654 Label ok; |
| 2655 Label fail; |
| 2992 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | 2656 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
| 2993 b(&ok); | 2657 b_near(&ok); |
| 2994 bind(&fail); | 2658 bind(&fail); |
| 2995 Abort("Global functions must have initial map"); | 2659 Abort("Global functions must have initial map"); |
| 2996 bind(&ok); | 2660 bind(&ok); |
| 2997 } | 2661 } |
| 2998 } | 2662 } |
| 2999 | 2663 |
| 3000 | 2664 |
| 3001 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( | 2665 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
| 3002 Register reg, | 2666 Register reg, |
| 3003 Register scratch, | 2667 Register scratch, |
| 3004 Label* not_power_of_two_or_zero) { | 2668 Label* not_power_of_two_or_zero) { |
| 3005 sub(scratch, reg, Operand(1), SetCC); | 2669 ASSERT(!reg.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 3006 b(mi, not_power_of_two_or_zero); | 2670 RECORD_LINE(); |
| 2671 // Note: actually the case 0x80000000 is considered a power of two |
| 2672 // (not a neg value) |
| 2673 sub(scratch, reg, Operand(1)); |
| 2674 cmpge(scratch, Operand(0)); |
| 2675 bf(not_power_of_two_or_zero); |
| 3007 tst(scratch, reg); | 2676 tst(scratch, reg); |
| 3008 b(ne, not_power_of_two_or_zero); | 2677 b(ne, not_power_of_two_or_zero); |
| 3009 } | 2678 } |
| 3010 | 2679 |
| 3011 | 2680 |
| 3012 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( | 2681 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( |
| 3013 Register reg, | 2682 Register reg, |
| 3014 Register scratch, | 2683 Register scratch, |
| 3015 Label* zero_and_neg, | 2684 Label* zero_and_neg, |
| 3016 Label* not_power_of_two) { | 2685 Label* not_power_of_two) { |
| 3017 sub(scratch, reg, Operand(1), SetCC); | 2686 ASSERT(!reg.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 3018 b(mi, zero_and_neg); | 2687 RECORD_LINE(); |
| 2688 // Note: actually the case 0x80000000 is considered a pozer of two |
| 2689 // (not a neg value) |
| 2690 sub(scratch, reg, Operand(1)); |
| 2691 cmpge(scratch, Operand(0)); |
| 2692 bf(zero_and_neg); |
| 3019 tst(scratch, reg); | 2693 tst(scratch, reg); |
| 3020 b(ne, not_power_of_two); | 2694 b(ne, not_power_of_two); |
| 3021 } | 2695 } |
| 3022 | 2696 |
| 3023 | 2697 |
| 3024 void MacroAssembler::JumpIfNotBothSmi(Register reg1, | 2698 void MacroAssembler::JumpIfNotBothSmi(Register reg1, |
| 3025 Register reg2, | 2699 Register reg2, |
| 3026 Label* on_not_both_smi) { | 2700 Label* on_not_both_smi, |
| 2701 Label::Distance distance) { |
| 2702 ASSERT(!reg1.is(sh4_rtmp) && !reg2.is(sh4_rtmp)); |
| 3027 STATIC_ASSERT(kSmiTag == 0); | 2703 STATIC_ASSERT(kSmiTag == 0); |
| 2704 RECORD_LINE(); |
| 3028 tst(reg1, Operand(kSmiTagMask)); | 2705 tst(reg1, Operand(kSmiTagMask)); |
| 3029 tst(reg2, Operand(kSmiTagMask), eq); | 2706 b(ne, on_not_both_smi, distance); |
| 3030 b(ne, on_not_both_smi); | 2707 tst(reg2, Operand(kSmiTagMask)); |
| 2708 b(ne, on_not_both_smi, distance); |
| 3031 } | 2709 } |
| 3032 | 2710 |
| 3033 | 2711 |
| 3034 void MacroAssembler::UntagAndJumpIfSmi( | 2712 void MacroAssembler::UntagAndJumpIfSmi( |
| 3035 Register dst, Register src, Label* smi_case) { | 2713 Register dst, Register src, Label* smi_case) { |
| 3036 STATIC_ASSERT(kSmiTag == 0); | 2714 STATIC_ASSERT(kSmiTag == 0); |
| 3037 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); | 2715 tst(src, Operand(kSmiTagMask)); |
| 3038 b(cc, smi_case); // Shifter carry is not set for a smi. | 2716 asr(dst, src, Operand(kSmiTagSize)); |
| 3039 } | 2717 bt(smi_case); |
| 3040 | |
| 3041 | |
| 3042 void MacroAssembler::UntagAndJumpIfNotSmi( | |
| 3043 Register dst, Register src, Label* non_smi_case) { | |
| 3044 STATIC_ASSERT(kSmiTag == 0); | |
| 3045 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); | |
| 3046 b(cs, non_smi_case); // Shifter carry is set for a non-smi. | |
| 3047 } | 2718 } |
| 3048 | 2719 |
| 3049 | 2720 |
| 3050 void MacroAssembler::JumpIfEitherSmi(Register reg1, | 2721 void MacroAssembler::JumpIfEitherSmi(Register reg1, |
| 3051 Register reg2, | 2722 Register reg2, |
| 3052 Label* on_either_smi) { | 2723 Label* on_either_smi, |
| 2724 Label::Distance distance) { |
| 2725 ASSERT(!reg1.is(sh4_rtmp) && !reg2.is(sh4_rtmp)); |
| 3053 STATIC_ASSERT(kSmiTag == 0); | 2726 STATIC_ASSERT(kSmiTag == 0); |
| 2727 RECORD_LINE(); |
| 3054 tst(reg1, Operand(kSmiTagMask)); | 2728 tst(reg1, Operand(kSmiTagMask)); |
| 3055 tst(reg2, Operand(kSmiTagMask), ne); | 2729 b(eq, on_either_smi, distance); |
| 3056 b(eq, on_either_smi); | 2730 tst(reg2, Operand(kSmiTagMask)); |
| 2731 b(eq, on_either_smi, distance); |
| 3057 } | 2732 } |
| 3058 | 2733 |
| 3059 | 2734 |
| 3060 void MacroAssembler::AssertNotSmi(Register object) { | 2735 void MacroAssembler::AbortIfSmi(Register object) { |
| 3061 if (emit_debug_code()) { | 2736 STATIC_ASSERT(kSmiTag == 0); |
| 3062 STATIC_ASSERT(kSmiTag == 0); | 2737 ASSERT(!object.is(sh4_rtmp)); |
| 3063 tst(object, Operand(kSmiTagMask)); | 2738 |
| 3064 Check(ne, "Operand is a smi"); | 2739 tst(object, Operand(kSmiTagMask)); |
| 3065 } | 2740 Assert(ne, "Operand is a smi"); |
| 3066 } | 2741 } |
| 3067 | 2742 |
| 3068 | 2743 |
| 3069 void MacroAssembler::AssertSmi(Register object) { | 2744 void MacroAssembler::AbortIfNotSmi(Register object) { |
| 3070 if (emit_debug_code()) { | 2745 STATIC_ASSERT(kSmiTag == 0); |
| 3071 STATIC_ASSERT(kSmiTag == 0); | 2746 ASSERT(!object.is(sh4_rtmp)); |
| 3072 tst(object, Operand(kSmiTagMask)); | 2747 |
| 3073 Check(eq, "Operand is not smi"); | 2748 tst(object, Operand(kSmiTagMask)); |
| 3074 } | 2749 Assert(eq, "Operand is not smi"); |
| 3075 } | 2750 } |
| 3076 | 2751 |
| 3077 | 2752 |
| 3078 void MacroAssembler::AssertString(Register object) { | 2753 void MacroAssembler::AbortIfNotString(Register object) { |
| 3079 if (emit_debug_code()) { | 2754 STATIC_ASSERT(kSmiTag == 0); |
| 3080 STATIC_ASSERT(kSmiTag == 0); | 2755 ASSERT(!object.is(sh4_ip)); |
| 3081 tst(object, Operand(kSmiTagMask)); | 2756 ASSERT(!object.is(sh4_rtmp)); |
| 3082 Check(ne, "Operand is a smi and not a string"); | 2757 |
| 3083 push(object); | 2758 tst(object, Operand(kSmiTagMask)); |
| 3084 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2759 Assert(ne, "Operand is not a string"); |
| 3085 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); | 2760 RECORD_LINE(); |
| 3086 pop(object); | 2761 push(object); |
| 3087 Check(lo, "Operand is not a string"); | 2762 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3088 } | 2763 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE, hs); |
| 2764 pop(object); |
| 2765 Assert(ne, "Operand is not a string"); |
| 3089 } | 2766 } |
| 3090 | 2767 |
| 3091 | 2768 |
| 3092 | 2769 void MacroAssembler::AbortIfNotRootValue(Register src, |
| 3093 void MacroAssembler::AssertRootValue(Register src, | 2770 Heap::RootListIndex root_value_index, |
| 3094 Heap::RootListIndex root_value_index, | 2771 const char* message) { |
| 3095 const char* message) { | 2772 ASSERT(!src.is(sh4_ip)); |
| 3096 if (emit_debug_code()) { | 2773 ASSERT(!src.is(sh4_rtmp)); |
| 3097 CompareRoot(src, root_value_index); | 2774 CompareRoot(src, root_value_index); |
| 3098 Check(eq, message); | 2775 Assert(eq, message); |
| 3099 } | |
| 3100 } | 2776 } |
| 3101 | 2777 |
| 3102 | 2778 |
| 2779 void MacroAssembler::PrintRegisterValue(Register reg) { |
| 2780 ASSERT(!reg.is(r4) && !reg.is(r5) && !reg.is(r6) && !reg.is(r7)); |
| 2781 ASSERT(!reg.is(sh4_rtmp)); |
| 2782 Label gc_required, skip, not_smi; |
| 2783 RECORD_LINE(); |
| 2784 EnterInternalFrame(); |
| 2785 // Save reg as it is scratched by WriteInt32ToHeapNumberStub() |
| 2786 push(reg); |
| 2787 pushm(kJSCallerSaved); |
| 2788 TrySmiTag(reg, ¬_smi, r5/*scratch*/); |
| 2789 mov(r4, reg); |
| 2790 jmp(&skip); |
| 2791 bind(¬_smi); |
| 2792 RECORD_LINE(); |
| 2793 LoadRoot(r7, Heap::kHeapNumberMapRootIndex); |
| 2794 AllocateHeapNumber(r4/*result heap number*/, r5/*scratch*/, r6/*scratch*/, |
| 2795 r7/*heap_number_map*/, &gc_required); |
| 2796 WriteInt32ToHeapNumberStub stub(reg, r4, r5/*scratch*/); |
| 2797 CallStub(&stub); |
| 2798 jmp(&skip); |
| 2799 bind(&gc_required); |
| 2800 RECORD_LINE(); |
| 2801 Abort("GC required while dumping number"); |
| 2802 bind(&skip); |
| 2803 RECORD_LINE(); |
| 2804 push(r4); |
| 2805 CallRuntime(Runtime::kNumberToString, 1); |
| 2806 push(r0); |
| 2807 CallRuntime(Runtime::kGlobalPrint, 1); |
| 2808 popm(kJSCallerSaved); |
| 2809 pop(reg); |
| 2810 LeaveInternalFrame(); |
| 2811 } |
| 2812 |
| 2813 |
| 3103 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 2814 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
| 3104 Register heap_number_map, | 2815 Register heap_number_map, |
| 3105 Register scratch, | 2816 Register scratch, |
| 3106 Label* on_not_heap_number) { | 2817 Label* on_not_heap_number) { |
| 2818 RECORD_LINE(); |
| 2819 ASSERT(!scratch.is(sh4_ip)); |
| 2820 ASSERT(!scratch.is(sh4_rtmp)); |
| 2821 AssertRegisterIsRoot(heap_number_map, scratch, Heap::kHeapNumberMapRootIndex); |
| 3107 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 2822 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3108 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 3109 cmp(scratch, heap_number_map); | 2823 cmp(scratch, heap_number_map); |
| 3110 b(ne, on_not_heap_number); | 2824 b(ne, on_not_heap_number); |
| 3111 } | 2825 } |
| 3112 | 2826 |
| 3113 | 2827 |
| 3114 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( | 2828 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( |
| 3115 Register first, | 2829 Register first, |
| 3116 Register second, | 2830 Register second, |
| 3117 Register scratch1, | 2831 Register scratch1, |
| 3118 Register scratch2, | 2832 Register scratch2, |
| 3119 Label* failure) { | 2833 Label* failure) { |
| 2834 |
| 2835 ASSERT(!first.is(sh4_ip) && !second.is(sh4_ip) && !scratch1.is(sh4_ip) && |
| 2836 !scratch2.is(sh4_ip)); |
| 2837 ASSERT(!first.is(sh4_rtmp) && !second.is(sh4_rtmp) && |
| 2838 !scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 2839 RECORD_LINE(); |
| 3120 // Test that both first and second are sequential ASCII strings. | 2840 // Test that both first and second are sequential ASCII strings. |
| 3121 // Assume that they are non-smis. | 2841 // Assume that they are non-smis. |
| 3122 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | 2842 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
| 3123 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | 2843 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
| 3124 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 2844 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
| 3125 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | 2845 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
| 3126 | 2846 |
| 3127 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, | 2847 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, |
| 3128 scratch2, | 2848 scratch2, |
| 3129 scratch1, | 2849 scratch1, |
| 3130 scratch2, | 2850 scratch2, |
| 3131 failure); | 2851 failure); |
| 3132 } | 2852 } |
| 3133 | 2853 |
| 2854 |
| 3134 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, | 2855 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, |
| 3135 Register second, | 2856 Register second, |
| 3136 Register scratch1, | 2857 Register scratch1, |
| 3137 Register scratch2, | 2858 Register scratch2, |
| 3138 Label* failure) { | 2859 Label* failure) { |
| 2860 ASSERT(!first.is(sh4_ip) && !second.is(sh4_ip) && !scratch1.is(sh4_ip) && |
| 2861 !scratch2.is(sh4_ip)); |
| 2862 ASSERT(!first.is(sh4_rtmp) && !second.is(sh4_rtmp) && |
| 2863 !scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 2864 RECORD_LINE(); |
| 3139 // Check that neither is a smi. | 2865 // Check that neither is a smi. |
| 3140 STATIC_ASSERT(kSmiTag == 0); | 2866 STATIC_ASSERT(kSmiTag == 0); |
| 3141 and_(scratch1, first, Operand(second)); | 2867 land(scratch1, first, second); |
| 3142 JumpIfSmi(scratch1, failure); | 2868 JumpIfSmi(scratch1, failure); |
| 3143 JumpIfNonSmisNotBothSequentialAsciiStrings(first, | 2869 JumpIfNonSmisNotBothSequentialAsciiStrings(first, |
| 3144 second, | 2870 second, |
| 3145 scratch1, | 2871 scratch1, |
| 3146 scratch2, | 2872 scratch2, |
| 3147 failure); | 2873 failure); |
| 3148 } | 2874 } |
| 3149 | 2875 |
| 3150 | 2876 |
| 3151 // Allocates a heap number or jumps to the need_gc label if the young space | 2877 // Allocates a heap number or jumps to the need_gc label if the young space |
| 3152 // is full and a scavenge is needed. | 2878 // is full and a scavenge is needed. |
| 3153 void MacroAssembler::AllocateHeapNumber(Register result, | 2879 void MacroAssembler::AllocateHeapNumber(Register result, |
| 3154 Register scratch1, | 2880 Register scratch1, |
| 3155 Register scratch2, | 2881 Register scratch2, |
| 3156 Register heap_number_map, | 2882 Register heap_number_map, |
| 3157 Label* gc_required, | 2883 Label* gc_required) { |
| 3158 TaggingMode tagging_mode) { | |
| 3159 // Allocate an object in the heap for the heap number and tag it as a heap | 2884 // Allocate an object in the heap for the heap number and tag it as a heap |
| 3160 // object. | 2885 // object. |
| 2886 RECORD_LINE(); |
| 3161 AllocateInNewSpace(HeapNumber::kSize, | 2887 AllocateInNewSpace(HeapNumber::kSize, |
| 3162 result, | 2888 result, |
| 3163 scratch1, | 2889 scratch1, |
| 3164 scratch2, | 2890 scratch2, |
| 3165 gc_required, | 2891 gc_required, |
| 3166 tagging_mode == TAG_RESULT ? TAG_OBJECT : | 2892 TAG_OBJECT); |
| 3167 NO_ALLOCATION_FLAGS); | |
| 3168 | 2893 |
| 3169 // Store heap number map in the allocated object. | 2894 // Store heap number map in the allocated object. |
| 3170 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2895 RECORD_LINE(); |
| 3171 if (tagging_mode == TAG_RESULT) { | 2896 AssertRegisterIsRoot(heap_number_map, scratch1, |
| 3172 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | 2897 Heap::kHeapNumberMapRootIndex); |
| 3173 } else { | 2898 RECORD_LINE(); |
| 3174 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); | 2899 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
| 3175 } | |
| 3176 } | 2900 } |
| 3177 | 2901 |
| 3178 | 2902 |
| 3179 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | |
| 3180 DwVfpRegister value, | |
| 3181 Register scratch1, | |
| 3182 Register scratch2, | |
| 3183 Register heap_number_map, | |
| 3184 Label* gc_required) { | |
| 3185 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); | |
| 3186 sub(scratch1, result, Operand(kHeapObjectTag)); | |
| 3187 vstr(value, scratch1, HeapNumber::kValueOffset); | |
| 3188 } | |
| 3189 | |
| 3190 | |
| 3191 // Copies a fixed number of fields of heap objects from src to dst. | 2903 // Copies a fixed number of fields of heap objects from src to dst. |
| 3192 void MacroAssembler::CopyFields(Register dst, | 2904 void MacroAssembler::CopyFields(Register dst, |
| 3193 Register src, | 2905 Register src, |
| 3194 RegList temps, | 2906 RegList temps, |
| 3195 int field_count) { | 2907 int field_count) { |
| 3196 // At least one bit set in the first 15 registers. | 2908 // At least one bit set in the first 15 registers. |
| 3197 ASSERT((temps & ((1 << 15) - 1)) != 0); | 2909 ASSERT((temps & ((1 << 15) - 1)) != 0); |
| 3198 ASSERT((temps & dst.bit()) == 0); | 2910 ASSERT((temps & dst.bit()) == 0); |
| 3199 ASSERT((temps & src.bit()) == 0); | 2911 ASSERT((temps & src.bit()) == 0); |
| 3200 // Primitive implementation using only one temporary register. | 2912 // Primitive implementation using only one temporary register. |
| 3201 | 2913 |
| 3202 Register tmp = no_reg; | 2914 Register tmp = no_reg; |
| 3203 // Find a temp register in temps list. | 2915 // Find a temp register in temps list. |
| 3204 for (int i = 0; i < 15; i++) { | 2916 for (int i = 0; i < 15; i++) { |
| 3205 if ((temps & (1 << i)) != 0) { | 2917 if ((temps & (1 << i)) != 0) { |
| 3206 tmp.set_code(i); | 2918 tmp.set_code(i); |
| 3207 break; | 2919 break; |
| 3208 } | 2920 } |
| 3209 } | 2921 } |
| 3210 ASSERT(!tmp.is(no_reg)); | 2922 ASSERT(!tmp.is(no_reg)); |
| 3211 | 2923 RECORD_LINE(); |
| 3212 for (int i = 0; i < field_count; i++) { | 2924 for (int i = 0; i < field_count; i++) { |
| 2925 RECORD_LINE(); |
| 3213 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); | 2926 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); |
| 3214 str(tmp, FieldMemOperand(dst, i * kPointerSize)); | 2927 str(tmp, FieldMemOperand(dst, i * kPointerSize)); |
| 3215 } | 2928 } |
| 3216 } | 2929 } |
| 3217 | 2930 |
| 3218 | 2931 |
| 3219 void MacroAssembler::CopyBytes(Register src, | 2932 void MacroAssembler::CopyBytes(Register src, |
| 3220 Register dst, | 2933 Register dst, |
| 3221 Register length, | 2934 Register length, |
| 3222 Register scratch) { | 2935 Register scratch) { |
| 3223 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; | 2936 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; |
| 3224 | 2937 |
| 3225 // Align src before copying in word size chunks. | 2938 // Align src before copying in word size chunks. |
| 3226 bind(&align_loop); | 2939 bind(&align_loop); |
| 3227 cmp(length, Operand(0)); | 2940 cmp(length, Operand(0)); |
| 3228 b(eq, &done); | 2941 b(eq, &done, Label::kNear); |
| 3229 bind(&align_loop_1); | 2942 bind(&align_loop_1); |
| 3230 tst(src, Operand(kPointerSize - 1)); | 2943 tst(src, Operand(kPointerSize - 1)); |
| 3231 b(eq, &word_loop); | 2944 b(eq, &word_loop, Label::kNear); |
| 3232 ldrb(scratch, MemOperand(src, 1, PostIndex)); | 2945 ldrb(scratch, MemOperand(src)); |
| 3233 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2946 add(src, src, Operand(1)); |
| 3234 sub(length, length, Operand(1), SetCC); | 2947 strb(scratch, MemOperand(dst)); |
| 3235 b(ne, &byte_loop_1); | 2948 add(dst, dst, Operand(1)); |
| 2949 dt(length); |
| 2950 b(ne, &byte_loop_1, Label::kNear); |
| 3236 | 2951 |
| 3237 // Copy bytes in word size chunks. | 2952 // Copy bytes in word size chunks. |
| 3238 bind(&word_loop); | 2953 bind(&word_loop); |
| 3239 if (emit_debug_code()) { | 2954 if (emit_debug_code()) { |
| 3240 tst(src, Operand(kPointerSize - 1)); | 2955 tst(src, Operand(kPointerSize - 1)); |
| 3241 Assert(eq, "Expecting alignment for CopyBytes"); | 2956 Assert(eq, "Expecting alignment for CopyBytes"); |
| 3242 } | 2957 } |
| 3243 cmp(length, Operand(kPointerSize)); | 2958 cmpge(length, Operand(kPointerSize)); |
| 3244 b(lt, &byte_loop); | 2959 bf_near(&byte_loop); |
| 3245 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); | 2960 ldr(scratch, MemOperand(src)); |
| 3246 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { | 2961 add(src, src, Operand(kPointerSize)); |
| 3247 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); | 2962 #if CAN_USE_UNALIGNED_ACCESSES |
| 3248 } else { | 2963 str(scratch, MemOperand(dst)); |
| 3249 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2964 add(dst, dst, Operand(kPointerSize)); |
| 3250 mov(scratch, Operand(scratch, LSR, 8)); | 2965 #else |
| 3251 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2966 strb(scratch, MemOperand(dst)); |
| 3252 mov(scratch, Operand(scratch, LSR, 8)); | 2967 add(dst, dst, Operand(1)); |
| 3253 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2968 lsr(scratch, scratch, Operand(8)); |
| 3254 mov(scratch, Operand(scratch, LSR, 8)); | 2969 strb(scratch, MemOperand(dst)); |
| 3255 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2970 add(dst, dst, Operand(1)); |
| 3256 } | 2971 lsr(scratch, scratch, Operand(8)); |
| 2972 strb(scratch, MemOperand(dst)); |
| 2973 add(dst, dst, Operand(1)); |
| 2974 lsr(scratch, scratch, Operand(8)); |
| 2975 strb(scratch, MemOperand(dst)); |
| 2976 add(dst, dst, Operand(1)); |
| 2977 #endif |
| 3257 sub(length, length, Operand(kPointerSize)); | 2978 sub(length, length, Operand(kPointerSize)); |
| 3258 b(&word_loop); | 2979 b_near(&word_loop); |
| 3259 | 2980 |
| 3260 // Copy the last bytes if any left. | 2981 // Copy the last bytes if any left. |
| 3261 bind(&byte_loop); | 2982 bind(&byte_loop); |
| 3262 cmp(length, Operand(0)); | 2983 cmp(length, Operand(0)); |
| 3263 b(eq, &done); | 2984 b(eq, &done, Label::kNear); |
| 3264 bind(&byte_loop_1); | 2985 bind(&byte_loop_1); |
| 3265 ldrb(scratch, MemOperand(src, 1, PostIndex)); | 2986 ldrb(scratch, MemOperand(src)); |
| 3266 strb(scratch, MemOperand(dst, 1, PostIndex)); | 2987 add(src, src, Operand(1)); |
| 3267 sub(length, length, Operand(1), SetCC); | 2988 strb(scratch, MemOperand(dst)); |
| 3268 b(ne, &byte_loop_1); | 2989 add(dst, dst, Operand(1)); |
| 2990 dt(length); |
| 2991 b(ne, &byte_loop_1, Label::kNear); |
| 3269 bind(&done); | 2992 bind(&done); |
| 3270 } | 2993 } |
| 3271 | 2994 |
| 3272 | 2995 |
| 3273 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | 2996 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
| 3274 Register end_offset, | 2997 Register end_offset, |
| 3275 Register filler) { | 2998 Register filler) { |
| 3276 Label loop, entry; | 2999 Label loop, entry; |
| 3277 b(&entry); | 3000 jmp(&entry); |
| 3278 bind(&loop); | 3001 bind(&loop); |
| 3279 str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); | 3002 str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); |
| 3280 bind(&entry); | 3003 bind(&entry); |
| 3281 cmp(start_offset, end_offset); | 3004 cmpge(start_offset, end_offset); |
| 3282 b(lt, &loop); | 3005 bf(&loop); |
| 3283 } | 3006 } |
| 3284 | 3007 |
| 3285 | 3008 |
| 3286 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. | 3009 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. |
| 3287 Register source, // Input. | 3010 Register source, // Input. |
| 3288 Register scratch) { | 3011 Register scratch) { |
| 3289 ASSERT(!zeros.is(source) || !source.is(scratch)); | 3012 ASSERT(!zeros.is(source) || !source.is(scratch)); |
| 3290 ASSERT(!zeros.is(scratch)); | 3013 ASSERT(!zeros.is(scratch)); |
| 3291 ASSERT(!scratch.is(ip)); | 3014 ASSERT(!scratch.is(sh4_rtmp)); |
| 3292 ASSERT(!source.is(ip)); | 3015 ASSERT(!source.is(sh4_rtmp)); |
| 3293 ASSERT(!zeros.is(ip)); | 3016 ASSERT(!zeros.is(sh4_rtmp)); |
| 3294 #ifdef CAN_USE_ARMV5_INSTRUCTIONS | 3017 ASSERT(!scratch.is(sh4_ip)); |
| 3295 clz(zeros, source); // This instruction is only supported after ARM5. | 3018 ASSERT(!source.is(sh4_ip)); |
| 3296 #else | 3019 ASSERT(!zeros.is(sh4_ip)); |
| 3297 // Order of the next two lines is important: zeros register | 3020 RECORD_LINE(); |
| 3298 // can be the same as source register. | 3021 |
| 3299 Move(scratch, source); | 3022 Label l0, l1, l2, l3, l4, l5; |
| 3300 mov(zeros, Operand(0, RelocInfo::NONE)); | 3023 cmpeq(source, Operand(0)); |
| 3024 bf_near(&l0); |
| 3025 mov(zeros, Operand(32)); |
| 3026 jmp_near(&l5); |
| 3027 |
| 3028 bind(&l0); |
| 3029 // Be carefull to save source in scratch, source and zeros may be the same |
| 3030 // register |
| 3031 mov(scratch, source); |
| 3032 mov(zeros, Operand(0)); |
| 3301 // Top 16. | 3033 // Top 16. |
| 3302 tst(scratch, Operand(0xffff0000)); | 3034 tst(scratch, Operand(0xffff0000)); |
| 3303 add(zeros, zeros, Operand(16), LeaveCC, eq); | 3035 bf_near(&l1); |
| 3304 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); | 3036 add(zeros, zeros, Operand(16)); |
| 3037 lsl(scratch, scratch, Operand(16)); |
| 3305 // Top 8. | 3038 // Top 8. |
| 3039 bind(&l1); |
| 3306 tst(scratch, Operand(0xff000000)); | 3040 tst(scratch, Operand(0xff000000)); |
| 3307 add(zeros, zeros, Operand(8), LeaveCC, eq); | 3041 bf_near(&l2); |
| 3308 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); | 3042 add(zeros, zeros, Operand(8)); |
| 3043 lsl(scratch, scratch, Operand(8)); |
| 3309 // Top 4. | 3044 // Top 4. |
| 3045 bind(&l2); |
| 3310 tst(scratch, Operand(0xf0000000)); | 3046 tst(scratch, Operand(0xf0000000)); |
| 3311 add(zeros, zeros, Operand(4), LeaveCC, eq); | 3047 bf_near(&l3); |
| 3312 mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); | 3048 add(zeros, zeros, Operand(4)); |
| 3049 lsl(scratch, scratch, Operand(4)); |
| 3313 // Top 2. | 3050 // Top 2. |
| 3051 bind(&l3); |
| 3314 tst(scratch, Operand(0xc0000000)); | 3052 tst(scratch, Operand(0xc0000000)); |
| 3315 add(zeros, zeros, Operand(2), LeaveCC, eq); | 3053 bf_near(&l4); |
| 3316 mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); | 3054 add(zeros, zeros, Operand(2)); |
| 3055 lsl(scratch, scratch, Operand(2)); |
| 3317 // Top bit. | 3056 // Top bit. |
| 3057 bind(&l4); |
| 3318 tst(scratch, Operand(0x80000000u)); | 3058 tst(scratch, Operand(0x80000000u)); |
| 3319 add(zeros, zeros, Operand(1), LeaveCC, eq); | 3059 bf_near(&l5); |
| 3320 #endif | 3060 add(zeros, zeros, Operand(1)); |
| 3061 bind(&l5); |
| 3321 } | 3062 } |
| 3322 | 3063 |
| 3323 | 3064 |
| 3324 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | 3065 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 3325 Register first, | 3066 Register first, |
| 3326 Register second, | 3067 Register second, |
| 3327 Register scratch1, | 3068 Register scratch1, |
| 3328 Register scratch2, | 3069 Register scratch2, |
| 3329 Label* failure) { | 3070 Label* failure) { |
| 3071 ASSERT(!first.is(sh4_ip) && !second.is(sh4_ip) && !scratch1.is(sh4_ip) && |
| 3072 !scratch2.is(sh4_ip)); |
| 3073 ASSERT(!first.is(sh4_rtmp) && !second.is(sh4_rtmp) && |
| 3074 !scratch1.is(sh4_rtmp) && !scratch2.is(sh4_rtmp)); |
| 3075 |
| 3330 int kFlatAsciiStringMask = | 3076 int kFlatAsciiStringMask = |
| 3331 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3077 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
| 3332 int kFlatAsciiStringTag = ASCII_STRING_TYPE; | 3078 int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 3333 and_(scratch1, first, Operand(kFlatAsciiStringMask)); | 3079 RECORD_LINE(); |
| 3334 and_(scratch2, second, Operand(kFlatAsciiStringMask)); | 3080 land(scratch1, first, Operand(kFlatAsciiStringMask)); |
| 3081 land(scratch2, second, Operand(kFlatAsciiStringMask)); |
| 3335 cmp(scratch1, Operand(kFlatAsciiStringTag)); | 3082 cmp(scratch1, Operand(kFlatAsciiStringTag)); |
| 3336 // Ignore second test if first test failed. | 3083 // Ignore second test if first test failed. |
| 3337 cmp(scratch2, Operand(kFlatAsciiStringTag), eq); | 3084 b(ne, failure); |
| 3085 RECORD_LINE(); |
| 3086 cmp(scratch2, Operand(kFlatAsciiStringTag)); |
| 3338 b(ne, failure); | 3087 b(ne, failure); |
| 3339 } | 3088 } |
| 3340 | 3089 |
| 3341 | 3090 |
| 3342 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, | 3091 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, |
| 3343 Register scratch, | 3092 Register scratch, |
| 3344 Label* failure) { | 3093 Label* failure) { |
| 3094 ASSERT(!type.is(sh4_rtmp) && !scratch.is(sh4_rtmp)); |
| 3095 |
| 3345 int kFlatAsciiStringMask = | 3096 int kFlatAsciiStringMask = |
| 3346 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3097 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
| 3347 int kFlatAsciiStringTag = ASCII_STRING_TYPE; | 3098 int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 3348 and_(scratch, type, Operand(kFlatAsciiStringMask)); | 3099 RECORD_LINE(); |
| 3100 land(scratch, type, Operand(kFlatAsciiStringMask)); |
| 3349 cmp(scratch, Operand(kFlatAsciiStringTag)); | 3101 cmp(scratch, Operand(kFlatAsciiStringTag)); |
| 3350 b(ne, failure); | 3102 b(ne, failure); |
| 3351 } | 3103 } |
| 3352 | 3104 |
| 3353 static const int kRegisterPassedArguments = 4; | 3105 static const int kRegisterPassedArguments = 4; |
| 3354 | 3106 |
| 3355 | 3107 |
| 3356 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, | 3108 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, |
| 3357 int num_double_arguments) { | 3109 int num_double_arguments) { |
| 3358 int stack_passed_words = 0; | 3110 int stack_passed_words = 0; |
| 3359 if (use_eabi_hardfloat()) { | 3111 // Up to 4 simple arguments are passed in [r4, r7] |
| 3360 // In the hard floating point calling convention, we can use | |
| 3361 // all double registers to pass doubles. | |
| 3362 if (num_double_arguments > DoubleRegister::kNumRegisters) { | |
| 3363 stack_passed_words += | |
| 3364 2 * (num_double_arguments - DoubleRegister::kNumRegisters); | |
| 3365 } | |
| 3366 } else { | |
| 3367 // In the soft floating point calling convention, every double | |
| 3368 // argument is passed using two registers. | |
| 3369 num_reg_arguments += 2 * num_double_arguments; | |
| 3370 } | |
| 3371 // Up to four simple arguments are passed in registers r0..r3. | |
| 3372 if (num_reg_arguments > kRegisterPassedArguments) { | 3112 if (num_reg_arguments > kRegisterPassedArguments) { |
| 3373 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; | 3113 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; |
| 3374 } | 3114 } |
| 3115 |
| 3116 // Up to 4 double arguments are passed in [dr4, dr10] |
| 3117 if (num_double_arguments > kRegisterPassedArguments) { |
| 3118 stack_passed_words += 2 * (num_double_arguments - kRegisterPassedArguments); |
| 3119 } |
| 3120 |
| 3375 return stack_passed_words; | 3121 return stack_passed_words; |
| 3376 } | 3122 } |
| 3377 | 3123 |
| 3378 | 3124 |
| 3379 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3125 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
| 3380 int num_double_arguments, | 3126 int num_double_arguments, |
| 3381 Register scratch) { | 3127 Register scratch) { |
| 3382 int frame_alignment = ActivationFrameAlignment(); | 3128 ASSERT(!scratch.is(sh4_ip)); |
| 3383 int stack_passed_arguments = CalculateStackPassedWords( | 3129 ASSERT(!scratch.is(sh4_rtmp)); |
| 3384 num_reg_arguments, num_double_arguments); | 3130 // Depending on the number of registers used, assert on the right scratch |
| 3131 // registers. |
| 3132 ASSERT((num_reg_arguments < 1 || !scratch.is(r4)) && |
| 3133 (num_reg_arguments < 2 || !scratch.is(r5)) && |
| 3134 (num_reg_arguments < 3 || !scratch.is(r6)) && |
| 3135 (num_reg_arguments < 4 || !scratch.is(r7))); |
| 3136 int frame_alignment = OS::ActivationFrameAlignment(); |
| 3137 int stack_passed_arguments = CalculateStackPassedWords(num_reg_arguments, |
| 3138 num_double_arguments); |
| 3385 if (frame_alignment > kPointerSize) { | 3139 if (frame_alignment > kPointerSize) { |
| 3140 RECORD_LINE(); |
| 3386 // Make stack end at alignment and make room for num_arguments - 4 words | 3141 // Make stack end at alignment and make room for num_arguments - 4 words |
| 3387 // and the original value of sp. | 3142 // and the original value of sp. |
| 3388 mov(scratch, sp); | 3143 mov(scratch, sp); |
| 3389 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 3144 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
| 3390 ASSERT(IsPowerOf2(frame_alignment)); | 3145 ASSERT(IsPowerOf2(frame_alignment)); |
| 3391 and_(sp, sp, Operand(-frame_alignment)); | 3146 land(sp, sp, Operand(-frame_alignment)); |
| 3392 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3147 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 3393 } else { | 3148 } else { |
| 3149 RECORD_LINE(); |
| 3394 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 3150 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
| 3395 } | 3151 } |
| 3396 } | 3152 } |
| 3397 | 3153 |
| 3398 | 3154 |
| 3399 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3155 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
| 3400 Register scratch) { | 3156 Register scratch) { |
| 3401 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3157 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
| 3402 } | 3158 } |
| 3403 | 3159 |
| 3404 | 3160 |
| 3405 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { | |
| 3406 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 3407 if (use_eabi_hardfloat()) { | |
| 3408 Move(d0, dreg); | |
| 3409 } else { | |
| 3410 vmov(r0, r1, dreg); | |
| 3411 } | |
| 3412 } | |
| 3413 | |
| 3414 | |
| 3415 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, | |
| 3416 DoubleRegister dreg2) { | |
| 3417 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 3418 if (use_eabi_hardfloat()) { | |
| 3419 if (dreg2.is(d0)) { | |
| 3420 ASSERT(!dreg1.is(d1)); | |
| 3421 Move(d1, dreg2); | |
| 3422 Move(d0, dreg1); | |
| 3423 } else { | |
| 3424 Move(d0, dreg1); | |
| 3425 Move(d1, dreg2); | |
| 3426 } | |
| 3427 } else { | |
| 3428 vmov(r0, r1, dreg1); | |
| 3429 vmov(r2, r3, dreg2); | |
| 3430 } | |
| 3431 } | |
| 3432 | |
| 3433 | |
| 3434 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, | |
| 3435 Register reg) { | |
| 3436 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
| 3437 if (use_eabi_hardfloat()) { | |
| 3438 Move(d0, dreg); | |
| 3439 Move(r0, reg); | |
| 3440 } else { | |
| 3441 Move(r2, reg); | |
| 3442 vmov(r0, r1, dreg); | |
| 3443 } | |
| 3444 } | |
| 3445 | |
| 3446 | |
| 3447 void MacroAssembler::CallCFunction(ExternalReference function, | 3161 void MacroAssembler::CallCFunction(ExternalReference function, |
| 3448 int num_reg_arguments, | 3162 int num_reg_arguments, |
| 3449 int num_double_arguments) { | 3163 int num_double_arguments) { |
| 3450 mov(ip, Operand(function)); | 3164 RECORD_LINE(); |
| 3451 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); | 3165 mov(r3, Operand(function)); |
| 3166 CallCFunctionHelper(r3, num_reg_arguments, num_double_arguments); |
| 3452 } | 3167 } |
| 3453 | 3168 |
| 3454 | 3169 |
| 3455 void MacroAssembler::CallCFunction(Register function, | |
| 3456 int num_reg_arguments, | |
| 3457 int num_double_arguments) { | |
| 3458 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); | |
| 3459 } | |
| 3460 | |
| 3461 | |
| 3462 void MacroAssembler::CallCFunction(ExternalReference function, | |
| 3463 int num_arguments) { | |
| 3464 CallCFunction(function, num_arguments, 0); | |
| 3465 } | |
| 3466 | |
| 3467 | |
| 3468 void MacroAssembler::CallCFunction(Register function, | |
| 3469 int num_arguments) { | |
| 3470 CallCFunction(function, num_arguments, 0); | |
| 3471 } | |
| 3472 | |
| 3473 | |
| 3474 void MacroAssembler::CallCFunctionHelper(Register function, | 3170 void MacroAssembler::CallCFunctionHelper(Register function, |
| 3475 int num_reg_arguments, | 3171 int num_reg_arguments, |
| 3476 int num_double_arguments) { | 3172 int num_double_arguments) { |
| 3477 ASSERT(has_frame()); | 3173 ASSERT(has_frame()); |
| 3478 // Make sure that the stack is aligned before calling a C function unless | 3174 ASSERT(!function.is(sh4_ip)); |
| 3479 // running in the simulator. The simulator has its own alignment check which | 3175 ASSERT(!function.is(sh4_rtmp)); |
| 3480 // provides more information. | 3176 #if defined(V8_HOST_ARCH_SH4) |
| 3481 #if defined(V8_HOST_ARCH_ARM) | |
| 3482 if (emit_debug_code()) { | 3177 if (emit_debug_code()) { |
| 3483 int frame_alignment = OS::ActivationFrameAlignment(); | 3178 int frame_alignment = OS::ActivationFrameAlignment(); |
| 3484 int frame_alignment_mask = frame_alignment - 1; | 3179 int frame_alignment_mask = frame_alignment - 1; |
| 3485 if (frame_alignment > kPointerSize) { | 3180 if (frame_alignment > kPointerSize) { |
| 3486 ASSERT(IsPowerOf2(frame_alignment)); | 3181 ASSERT(IsPowerOf2(frame_alignment)); |
| 3487 Label alignment_as_expected; | 3182 Label alignment_as_expected; |
| 3488 tst(sp, Operand(frame_alignment_mask)); | 3183 tst(sp, Operand(frame_alignment_mask)); |
| 3489 b(eq, &alignment_as_expected); | 3184 b(eq, &alignment_as_expected); |
| 3490 // Don't use Check here, as it will call Runtime_Abort possibly | 3185 // Don't use Check here, as it will call Runtime_Abort possibly |
| 3491 // re-entering here. | 3186 // re-entering here. |
| 3492 stop("Unexpected alignment"); | 3187 stop("Unexpected alignment"); |
| 3493 bind(&alignment_as_expected); | 3188 bind(&alignment_as_expected); |
| 3494 } | 3189 } |
| 3495 } | 3190 } |
| 3496 #endif | 3191 #endif |
| 3497 | 3192 |
| 3498 // Just call directly. The function called cannot cause a GC, or | 3193 // Just call directly. The function called cannot cause a GC, or |
| 3499 // allow preemption, so the return address in the link register | 3194 // allow preemption, so the return address in the link register |
| 3500 // stays correct. | 3195 // stays correct. |
| 3501 Call(function); | 3196 jsr(function); |
| 3502 int stack_passed_arguments = CalculateStackPassedWords( | 3197 int stack_passed_arguments = CalculateStackPassedWords( |
| 3503 num_reg_arguments, num_double_arguments); | 3198 num_reg_arguments, num_double_arguments); |
| 3504 if (ActivationFrameAlignment() > kPointerSize) { | 3199 if (OS::ActivationFrameAlignment() > kPointerSize) { |
| 3505 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3200 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 3506 } else { | 3201 } else { |
| 3507 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 3202 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); |
| 3508 } | 3203 } |
| 3509 } | 3204 } |
| 3510 | 3205 |
| 3511 | 3206 |
| 3512 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | 3207 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, |
| 3513 Register result) { | 3208 Register result) { |
| 3514 const uint32_t kLdrOffsetMask = (1 << 12) - 1; | 3209 UNIMPLEMENTED_BREAK(); |
| 3515 const int32_t kPCRegOffset = 2 * kPointerSize; | |
| 3516 ldr(result, MemOperand(ldr_location)); | |
| 3517 if (emit_debug_code()) { | |
| 3518 // Check that the instruction is a ldr reg, [pc + offset] . | |
| 3519 and_(result, result, Operand(kLdrPCPattern)); | |
| 3520 cmp(result, Operand(kLdrPCPattern)); | |
| 3521 Check(eq, "The instruction to patch should be a load from pc."); | |
| 3522 // Result was clobbered. Restore it. | |
| 3523 ldr(result, MemOperand(ldr_location)); | |
| 3524 } | |
| 3525 // Get the address of the constant. | |
| 3526 and_(result, result, Operand(kLdrOffsetMask)); | |
| 3527 add(result, ldr_location, Operand(result)); | |
| 3528 add(result, result, Operand(kPCRegOffset)); | |
| 3529 } | 3210 } |
| 3530 | 3211 |
| 3531 | 3212 |
| 3532 void MacroAssembler::CheckPageFlag( | 3213 void MacroAssembler::CheckPageFlag( |
| 3533 Register object, | 3214 Register object, |
| 3534 Register scratch, | 3215 Register scratch, |
| 3535 int mask, | 3216 int mask, |
| 3536 Condition cc, | 3217 Condition cc, |
| 3537 Label* condition_met) { | 3218 Label* condition_met) { |
| 3538 Bfc(scratch, object, 0, kPageSizeBits); | 3219 land(scratch, object, Operand(~Page::kPageAlignmentMask)); |
| 3539 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | 3220 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 3540 tst(scratch, Operand(mask)); | 3221 cmphs(scratch, Operand(mask)); |
| 3541 b(cc, condition_met); | 3222 bf(condition_met); |
| 3542 } | 3223 } |
| 3543 | 3224 |
| 3544 | 3225 |
| 3545 void MacroAssembler::JumpIfBlack(Register object, | 3226 void MacroAssembler::JumpIfBlack(Register object, |
| 3546 Register scratch0, | 3227 Register scratch0, |
| 3547 Register scratch1, | 3228 Register scratch1, |
| 3548 Label* on_black) { | 3229 Label* on_black) { |
| 3549 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | 3230 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
| 3550 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3231 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 3551 } | 3232 } |
| 3552 | 3233 |
| 3553 | 3234 |
| 3554 void MacroAssembler::HasColor(Register object, | 3235 void MacroAssembler::HasColor(Register object, |
| 3555 Register bitmap_scratch, | 3236 Register bitmap_scratch, |
| 3556 Register mask_scratch, | 3237 Register mask_scratch, |
| 3557 Label* has_color, | 3238 Label* has_color, |
| 3558 int first_bit, | 3239 int first_bit, |
| 3559 int second_bit) { | 3240 int second_bit) { |
| 3560 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); | 3241 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); |
| 3561 | 3242 |
| 3562 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3243 GetMarkBits(object, bitmap_scratch, mask_scratch); |
| 3563 | 3244 |
| 3564 Label other_color, word_boundary; | 3245 Label other_color, word_boundary; |
| 3565 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3246 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 3566 tst(ip, Operand(mask_scratch)); | 3247 tst(ip, mask_scratch); |
| 3567 b(first_bit == 1 ? eq : ne, &other_color); | 3248 b(first_bit == 1 ? eq : ne, &other_color); |
| 3568 // Shift left 1 by adding. | 3249 // Shift left 1 by adding. |
| 3569 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); | 3250 add(mask_scratch, mask_scratch, mask_scratch); |
| 3251 tst(mask_scratch, mask_scratch); |
| 3570 b(eq, &word_boundary); | 3252 b(eq, &word_boundary); |
| 3571 tst(ip, Operand(mask_scratch)); | 3253 tst(ip, mask_scratch); |
| 3572 b(second_bit == 1 ? ne : eq, has_color); | 3254 b(second_bit == 1 ? ne : eq, has_color); |
| 3573 jmp(&other_color); | 3255 jmp(&other_color); |
| 3574 | 3256 |
| 3575 bind(&word_boundary); | 3257 bind(&word_boundary); |
| 3576 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); | 3258 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); |
| 3577 tst(ip, Operand(1)); | 3259 tst(ip, Operand(1)); |
| 3578 b(second_bit == 1 ? ne : eq, has_color); | 3260 b(second_bit == 1 ? ne : eq, has_color); |
| 3579 bind(&other_color); | 3261 bind(&other_color); |
| 3580 } | 3262 } |
| 3581 | 3263 |
| 3582 | 3264 |
| 3583 // Detect some, but not all, common pointer-free objects. This is used by the | |
| 3584 // incremental write barrier which doesn't care about oddballs (they are always | |
| 3585 // marked black immediately so this code is not hit). | |
| 3586 void MacroAssembler::JumpIfDataObject(Register value, | |
| 3587 Register scratch, | |
| 3588 Label* not_data_object) { | |
| 3589 Label is_data_object; | |
| 3590 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 3591 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | |
| 3592 b(eq, &is_data_object); | |
| 3593 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | |
| 3594 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | |
| 3595 // If it's a string and it's not a cons string then it's an object containing | |
| 3596 // no GC pointers. | |
| 3597 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
| 3598 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); | |
| 3599 b(ne, not_data_object); | |
| 3600 bind(&is_data_object); | |
| 3601 } | |
| 3602 | |
| 3603 | |
| 3604 void MacroAssembler::GetMarkBits(Register addr_reg, | 3265 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 3605 Register bitmap_reg, | 3266 Register bitmap_reg, |
| 3606 Register mask_reg) { | 3267 Register mask_reg) { |
| 3607 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); | 3268 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
| 3608 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); | 3269 land(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); |
| 3609 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 3270 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
| 3610 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 3271 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
| 3611 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); | 3272 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); |
| 3612 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); | 3273 lsl(ip, ip, Operand(kPointerSizeLog2)); |
| 3274 add(bitmap_reg, bitmap_reg, ip); |
| 3613 mov(ip, Operand(1)); | 3275 mov(ip, Operand(1)); |
| 3614 mov(mask_reg, Operand(ip, LSL, mask_reg)); | 3276 lsl(mask_reg, ip, mask_reg); |
| 3615 } | 3277 } |
| 3616 | 3278 |
| 3617 | 3279 |
| 3618 void MacroAssembler::EnsureNotWhite( | 3280 void MacroAssembler::EnsureNotWhite( |
| 3619 Register value, | 3281 Register value, |
| 3620 Register bitmap_scratch, | 3282 Register bitmap_scratch, |
| 3621 Register mask_scratch, | 3283 Register mask_scratch, |
| 3622 Register load_scratch, | 3284 Register load_scratch, |
| 3623 Label* value_is_white_and_not_data) { | 3285 Label* value_is_white_and_not_data) { |
| 3624 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); | 3286 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 3635 // Since both black and grey have a 1 in the first position and white does | 3297 // Since both black and grey have a 1 in the first position and white does |
| 3636 // not have a 1 there we only need to check one bit. | 3298 // not have a 1 there we only need to check one bit. |
| 3637 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3299 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 3638 tst(mask_scratch, load_scratch); | 3300 tst(mask_scratch, load_scratch); |
| 3639 b(ne, &done); | 3301 b(ne, &done); |
| 3640 | 3302 |
| 3641 if (emit_debug_code()) { | 3303 if (emit_debug_code()) { |
| 3642 // Check for impossible bit pattern. | 3304 // Check for impossible bit pattern. |
| 3643 Label ok; | 3305 Label ok; |
| 3644 // LSL may overflow, making the check conservative. | 3306 // LSL may overflow, making the check conservative. |
| 3645 tst(load_scratch, Operand(mask_scratch, LSL, 1)); | 3307 lsl(ip, mask_scratch, Operand(1)); |
| 3308 tst(load_scratch, ip); |
| 3646 b(eq, &ok); | 3309 b(eq, &ok); |
| 3647 stop("Impossible marking bit pattern"); | 3310 stop("Impossible marking bit pattern"); |
| 3648 bind(&ok); | 3311 bind(&ok); |
| 3649 } | 3312 } |
| 3650 | 3313 |
| 3651 // Value is white. We check whether it is data that doesn't need scanning. | 3314 // Value is white. We check whether it is data that doesn't need scanning. |
| 3652 // Currently only checks for HeapNumber and non-cons strings. | 3315 // Currently only checks for HeapNumber and non-cons strings. |
| 3653 Register map = load_scratch; // Holds map while checking type. | 3316 Register map = load_scratch; // Holds map while checking type. |
| 3654 Register length = load_scratch; // Holds length of object after testing type. | 3317 Register length = load_scratch; // Holds length of object after testing type. |
| 3655 Label is_data_object; | 3318 Label is_data_object; |
| 3656 | 3319 |
| 3657 // Check for heap-number | 3320 // Check for heap-number |
| 3658 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | 3321 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 3659 CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 3322 CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| 3660 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); | 3323 mov(length, Operand(HeapNumber::kSize), eq); |
| 3661 b(eq, &is_data_object); | 3324 b(eq, &is_data_object); |
| 3662 | 3325 |
| 3663 // Check for strings. | 3326 // Check for strings. |
| 3664 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3327 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 3665 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3328 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 3666 // If it's a string and it's not a cons string then it's an object containing | 3329 // If it's a string and it's not a cons string then it's an object containing |
| 3667 // no GC pointers. | 3330 // no GC pointers. |
| 3668 Register instance_type = load_scratch; | 3331 Register instance_type = load_scratch; |
| 3669 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 3332 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 3670 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 3333 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
| 3671 b(ne, value_is_white_and_not_data); | 3334 b(ne, value_is_white_and_not_data); |
| 3672 // It's a non-indirect (non-cons and non-slice) string. | 3335 // It's a non-indirect (non-cons and non-slice) string. |
| 3673 // If it's external, the length is just ExternalString::kSize. | 3336 // If it's external, the length is just ExternalString::kSize. |
| 3674 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 3337 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
| 3675 // External strings are the only ones with the kExternalStringTag bit | 3338 // External strings are the only ones with the kExternalStringTag bit |
| 3676 // set. | 3339 // set. |
| 3677 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | 3340 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); |
| 3678 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | 3341 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); |
| 3679 tst(instance_type, Operand(kExternalStringTag)); | 3342 tst(instance_type, Operand(kExternalStringTag)); |
| 3680 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); | 3343 mov(length, Operand(ExternalString::kSize), ne); |
| 3681 b(ne, &is_data_object); | 3344 b(ne, &is_data_object); |
| 3682 | 3345 |
| 3683 // Sequential string, either ASCII or UC16. | 3346 // Sequential string, either ASCII or UC16. |
| 3684 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | 3347 // For ASCII (char-size of 1) we shift the smi tag away to get the length. |
| 3685 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | 3348 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby |
| 3686 // getting the length multiplied by 2. | 3349 // getting the length multiplied by 2. |
| 3687 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); | 3350 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); |
| 3688 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 3351 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 3689 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); | 3352 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); |
| 3690 tst(instance_type, Operand(kStringEncodingMask)); | 3353 tst(instance_type, Operand(kStringEncodingMask)); |
| 3691 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); | 3354 Label skip; |
| 3355 bt(&skip); |
| 3356 lsr(ip, ip, Operand(1)); |
| 3357 bind(&skip); |
| 3692 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); | 3358 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 3693 and_(length, length, Operand(~kObjectAlignmentMask)); | 3359 land(length, length, Operand(~kObjectAlignmentMask)); |
| 3694 | 3360 |
| 3695 bind(&is_data_object); | 3361 bind(&is_data_object); |
| 3696 // Value is a data object, and it is white. Mark it black. Since we know | 3362 // Value is a data object, and it is white. Mark it black. Since we know |
| 3697 // that the object is white we can make it black by flipping one bit. | 3363 // that the object is white we can make it black by flipping one bit. |
| 3698 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3364 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 3699 orr(ip, ip, Operand(mask_scratch)); | 3365 orr(ip, ip, mask_scratch); |
| 3700 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3366 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 3701 | 3367 |
| 3702 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); | 3368 land(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); |
| 3703 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 3369 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 3704 add(ip, ip, Operand(length)); | 3370 add(ip, ip, length); |
| 3705 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 3371 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 3706 | 3372 |
| 3707 bind(&done); | 3373 bind(&done); |
| 3708 } | 3374 } |
| 3709 | 3375 |
| 3710 | 3376 |
| 3711 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | |
| 3712 Usat(output_reg, 8, Operand(input_reg)); | |
| 3713 } | |
| 3714 | |
| 3715 | |
| 3716 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | |
| 3717 DoubleRegister input_reg, | |
| 3718 DoubleRegister temp_double_reg) { | |
| 3719 Label above_zero; | |
| 3720 Label done; | |
| 3721 Label in_bounds; | |
| 3722 | |
| 3723 Vmov(temp_double_reg, 0.0); | |
| 3724 VFPCompareAndSetFlags(input_reg, temp_double_reg); | |
| 3725 b(gt, &above_zero); | |
| 3726 | |
| 3727 // Double value is less than zero, NaN or Inf, return 0. | |
| 3728 mov(result_reg, Operand(0)); | |
| 3729 b(al, &done); | |
| 3730 | |
| 3731 // Double value is >= 255, return 255. | |
| 3732 bind(&above_zero); | |
| 3733 Vmov(temp_double_reg, 255.0, result_reg); | |
| 3734 VFPCompareAndSetFlags(input_reg, temp_double_reg); | |
| 3735 b(le, &in_bounds); | |
| 3736 mov(result_reg, Operand(255)); | |
| 3737 b(al, &done); | |
| 3738 | |
| 3739 // In 0-255 range, round and truncate. | |
| 3740 bind(&in_bounds); | |
| 3741 // Save FPSCR. | |
| 3742 vmrs(ip); | |
| 3743 // Set rounding mode to round to the nearest integer by clearing bits[23:22]. | |
| 3744 bic(result_reg, ip, Operand(kVFPRoundingModeMask)); | |
| 3745 vmsr(result_reg); | |
| 3746 vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding); | |
| 3747 vmov(result_reg, input_reg.low()); | |
| 3748 // Restore FPSCR. | |
| 3749 vmsr(ip); | |
| 3750 bind(&done); | |
| 3751 } | |
| 3752 | |
| 3753 | |
| 3754 void MacroAssembler::LoadInstanceDescriptors(Register map, | 3377 void MacroAssembler::LoadInstanceDescriptors(Register map, |
| 3755 Register descriptors) { | 3378 Register descriptors, |
| 3379 Register scratch) { |
| 3756 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 3380 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
| 3757 } | 3381 } |
| 3758 | 3382 |
| 3759 | 3383 |
| 3760 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | 3384 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
| 3761 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 3385 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
| 3762 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | 3386 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
| 3763 } | 3387 } |
| 3764 | 3388 |
| 3765 | 3389 |
| 3766 void MacroAssembler::EnumLength(Register dst, Register map) { | 3390 void MacroAssembler::EnumLength(Register dst, Register map) { |
| 3767 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | 3391 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
| 3768 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 3392 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
| 3769 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); | 3393 land(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); |
| 3770 } | 3394 } |
| 3771 | 3395 |
| 3772 | 3396 |
| 3773 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { | 3397 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { |
| 3774 Register empty_fixed_array_value = r6; | 3398 Register empty_fixed_array_value = r6; |
| 3775 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | 3399 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
| 3776 Label next, start; | 3400 Label next, start; |
| 3777 mov(r2, r0); | 3401 mov(r2, r0); |
| 3778 | 3402 |
| 3779 // Check if the enum length field is properly initialized, indicating that | 3403 // Check if the enum length field is properly initialized, indicating that |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3825 if (reg4.is_valid()) regs |= reg4.bit(); | 3449 if (reg4.is_valid()) regs |= reg4.bit(); |
| 3826 if (reg5.is_valid()) regs |= reg5.bit(); | 3450 if (reg5.is_valid()) regs |= reg5.bit(); |
| 3827 if (reg6.is_valid()) regs |= reg6.bit(); | 3451 if (reg6.is_valid()) regs |= reg6.bit(); |
| 3828 int n_of_non_aliasing_regs = NumRegs(regs); | 3452 int n_of_non_aliasing_regs = NumRegs(regs); |
| 3829 | 3453 |
| 3830 return n_of_valid_regs != n_of_non_aliasing_regs; | 3454 return n_of_valid_regs != n_of_non_aliasing_regs; |
| 3831 } | 3455 } |
| 3832 #endif | 3456 #endif |
| 3833 | 3457 |
| 3834 | 3458 |
| 3459 void MacroAssembler::Drop(int stack_elements) { |
| 3460 RECORD_LINE(); |
| 3461 if (stack_elements > 0) { |
| 3462 RECORD_LINE(); |
| 3463 add(sp, sp, Operand(stack_elements * kPointerSize)); |
| 3464 } |
| 3465 } |
| 3466 |
| 3467 |
| 3468 void MacroAssembler::UnimplementedBreak(const char *file, int line) { |
| 3469 uint32_t file_id = 0; |
| 3470 const char *base = strrchr(file, '/'); |
| 3471 if (base == NULL) |
| 3472 base = file; |
| 3473 while (*base) { |
| 3474 file_id += *base; |
| 3475 base++; |
| 3476 } |
| 3477 RECORD_LINE(); |
| 3478 mov(r0, Operand(file_id)); |
| 3479 mov(r1, Operand(line)); |
| 3480 bkpt(); |
| 3481 } |
| 3482 |
| 3483 |
| 3484 void MacroAssembler::Ret(Condition cond) { |
| 3485 ASSERT(cond == al || cond == eq || cond == ne); |
| 3486 if (cond == al) { |
| 3487 RECORD_LINE(); |
| 3488 rts(); |
| 3489 } else { |
| 3490 RECORD_LINE(); |
| 3491 Label skip; |
| 3492 if (cond == eq) { |
| 3493 bf_near(&skip); |
| 3494 } else { |
| 3495 bt_near(&skip); |
| 3496 } |
| 3497 rts(); |
| 3498 bind(&skip); |
| 3499 } |
| 3500 } |
| 3501 |
| 3502 |
| 3503 MacroAssembler* MacroAssembler::RecordFunctionLine(const char* function, |
| 3504 int line) { |
| 3505 if (FLAG_code_comments) { |
| 3506 /* 10(strlen of MAXINT) + 1(separator) +1(nul). */ |
| 3507 int size = strlen("/line/")+strlen(function) + 10 + 1 + 1; |
| 3508 char *buffer = new char[size]; |
| 3509 snprintf(buffer, size, "/line/%s/%d", function, line); |
| 3510 buffer[size-1] = '\0'; |
| 3511 RecordComment(buffer); |
| 3512 } |
| 3513 return this; |
| 3514 } |
| 3515 |
| 3516 |
| 3835 CodePatcher::CodePatcher(byte* address, int instructions) | 3517 CodePatcher::CodePatcher(byte* address, int instructions) |
| 3836 : address_(address), | 3518 : address_(address), |
| 3837 instructions_(instructions), | 3519 instructions_(instructions), |
| 3838 size_(instructions * Assembler::kInstrSize), | 3520 size_(instructions * Assembler::kInstrSize), |
| 3839 masm_(NULL, address, size_ + Assembler::kGap) { | 3521 masm_(NULL, address, size_ + Assembler::kGap) { |
| 3840 // Create a new macro assembler pointing to the address of the code to patch. | 3522 // Create a new macro assembler pointing to the address of the code to patch. |
| 3841 // The size is adjusted with kGap on order for the assembler to generate size | 3523 // The size is adjusted with kGap on order for the assembler to generate size |
| 3842 // bytes of instructions without failing with buffer size constraints. | 3524 // bytes of instructions without failing with buffer size constraints. |
| 3843 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3525 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3844 } | 3526 } |
| 3845 | 3527 |
| 3846 | 3528 |
| 3847 CodePatcher::~CodePatcher() { | 3529 CodePatcher::~CodePatcher() { |
| 3848 // Indicate that code has changed. | 3530 // Indicate that code has changed. |
| 3849 CPU::FlushICache(address_, size_); | 3531 CPU::FlushICache(address_, size_); |
| 3850 | 3532 |
| 3851 // Check that the code was patched as expected. | 3533 // Check that the code was patched as expected. |
| 3852 ASSERT(masm_.pc_ == address_ + size_); | 3534 ASSERT(masm_.pc_ == address_ + size_); |
| 3853 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3535 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3854 } | 3536 } |
| 3855 | 3537 |
| 3856 | 3538 |
| 3857 void CodePatcher::Emit(Instr instr) { | |
| 3858 masm()->emit(instr); | |
| 3859 } | |
| 3860 | |
| 3861 | |
| 3862 void CodePatcher::Emit(Address addr) { | |
| 3863 masm()->emit(reinterpret_cast<Instr>(addr)); | |
| 3864 } | |
| 3865 | |
| 3866 | |
| 3867 void CodePatcher::EmitCondition(Condition cond) { | 3539 void CodePatcher::EmitCondition(Condition cond) { |
| 3868 Instr instr = Assembler::instr_at(masm_.pc_); | 3540 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3869 instr = (instr & ~kCondMask) | cond; | 3541 ASSERT(cond == eq || cond == ne); |
| 3542 ASSERT(Assembler::IsBranch(instr)); |
| 3543 instr = (instr & ~0x200); // Changed to bt |
| 3544 if (cond == ne) |
| 3545 instr |= 0x200; // Changed to bf |
| 3870 masm_.emit(instr); | 3546 masm_.emit(instr); |
| 3871 } | 3547 } |
| 3872 | 3548 |
| 3873 | 3549 |
| 3874 } } // namespace v8::internal | 3550 } } // namespace v8::internal |
| 3875 | 3551 |
| 3876 #endif // V8_TARGET_ARCH_ARM | 3552 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |