Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 6685088: Merge isolates to bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 26 matching lines...) Expand all
37 #include "v8.h" 37 #include "v8.h"
38 38
39 #if defined(V8_TARGET_ARCH_ARM) 39 #if defined(V8_TARGET_ARCH_ARM)
40 40
41 #include "arm/assembler-arm-inl.h" 41 #include "arm/assembler-arm-inl.h"
42 #include "serialize.h" 42 #include "serialize.h"
43 43
44 namespace v8 { 44 namespace v8 {
45 namespace internal { 45 namespace internal {
46 46
47 // Safe default is no features. 47 CpuFeatures::CpuFeatures()
48 unsigned CpuFeatures::supported_ = 0; 48 : supported_(0),
49 unsigned CpuFeatures::enabled_ = 0; 49 enabled_(0),
50 unsigned CpuFeatures::found_by_runtime_probing_ = 0; 50 found_by_runtime_probing_(0) {
51 51 }
52 52
53 #ifdef __arm__ 53 #ifdef __arm__
54 static uint64_t CpuFeaturesImpliedByCompiler() { 54 static uint64_t CpuFeaturesImpliedByCompiler() {
55 uint64_t answer = 0; 55 uint64_t answer = 0;
56 #ifdef CAN_USE_ARMV7_INSTRUCTIONS 56 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
57 answer |= 1u << ARMv7; 57 answer |= 1u << ARMv7;
58 #endif // def CAN_USE_ARMV7_INSTRUCTIONS 58 #endif // def CAN_USE_ARMV7_INSTRUCTIONS
59 // If the compiler is allowed to use VFP then we can use VFP too in our code 59 // If the compiler is allowed to use VFP then we can use VFP too in our code
60 // generation even when generating snapshots. This won't work for cross 60 // generation even when generating snapshots. This won't work for cross
61 // compilation. 61 // compilation.
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
141 141
142 142
143 // ----------------------------------------------------------------------------- 143 // -----------------------------------------------------------------------------
144 // Implementation of Operand and MemOperand 144 // Implementation of Operand and MemOperand
145 // See assembler-arm-inl.h for inlined constructors 145 // See assembler-arm-inl.h for inlined constructors
146 146
147 Operand::Operand(Handle<Object> handle) { 147 Operand::Operand(Handle<Object> handle) {
148 rm_ = no_reg; 148 rm_ = no_reg;
149 // Verify all Objects referred by code are NOT in new space. 149 // Verify all Objects referred by code are NOT in new space.
150 Object* obj = *handle; 150 Object* obj = *handle;
151 ASSERT(!Heap::InNewSpace(obj)); 151 ASSERT(!HEAP->InNewSpace(obj));
152 if (obj->IsHeapObject()) { 152 if (obj->IsHeapObject()) {
153 imm32_ = reinterpret_cast<intptr_t>(handle.location()); 153 imm32_ = reinterpret_cast<intptr_t>(handle.location());
154 rmode_ = RelocInfo::EMBEDDED_OBJECT; 154 rmode_ = RelocInfo::EMBEDDED_OBJECT;
155 } else { 155 } else {
156 // no relocation needed 156 // no relocation needed
157 imm32_ = reinterpret_cast<intptr_t>(obj); 157 imm32_ = reinterpret_cast<intptr_t>(obj);
158 rmode_ = RelocInfo::NONE; 158 rmode_ = RelocInfo::NONE;
159 } 159 }
160 } 160 }
161 161
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
259 al | B26 | L | NegOffset | fp.code() * B16; 259 al | B26 | L | NegOffset | fp.code() * B16;
260 const Instr kStrRegFpNegOffsetPattern = 260 const Instr kStrRegFpNegOffsetPattern =
261 al | B26 | NegOffset | fp.code() * B16; 261 al | B26 | NegOffset | fp.code() * B16;
262 const Instr kLdrStrInstrTypeMask = 0xffff0000; 262 const Instr kLdrStrInstrTypeMask = 0xffff0000;
263 const Instr kLdrStrInstrArgumentMask = 0x0000ffff; 263 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
264 const Instr kLdrStrOffsetMask = 0x00000fff; 264 const Instr kLdrStrOffsetMask = 0x00000fff;
265 265
266 266
267 // Spare buffer. 267 // Spare buffer.
268 static const int kMinimalBufferSize = 4*KB; 268 static const int kMinimalBufferSize = 4*KB;
269 static byte* spare_buffer_ = NULL;
270 269
271 270
272 Assembler::Assembler(void* buffer, int buffer_size) 271 Assembler::Assembler(void* buffer, int buffer_size)
273 : positions_recorder_(this), 272 : positions_recorder_(this),
274 allow_peephole_optimization_(false), 273 allow_peephole_optimization_(false),
275 emit_debug_code_(FLAG_debug_code) { 274 emit_debug_code_(FLAG_debug_code) {
275 Isolate* isolate = Isolate::Current();
276 allow_peephole_optimization_ = FLAG_peephole_optimization; 276 allow_peephole_optimization_ = FLAG_peephole_optimization;
277 if (buffer == NULL) { 277 if (buffer == NULL) {
278 // Do our own buffer management. 278 // Do our own buffer management.
279 if (buffer_size <= kMinimalBufferSize) { 279 if (buffer_size <= kMinimalBufferSize) {
280 buffer_size = kMinimalBufferSize; 280 buffer_size = kMinimalBufferSize;
281 281
282 if (spare_buffer_ != NULL) { 282 if (isolate->assembler_spare_buffer() != NULL) {
283 buffer = spare_buffer_; 283 buffer = isolate->assembler_spare_buffer();
284 spare_buffer_ = NULL; 284 isolate->set_assembler_spare_buffer(NULL);
285 } 285 }
286 } 286 }
287 if (buffer == NULL) { 287 if (buffer == NULL) {
288 buffer_ = NewArray<byte>(buffer_size); 288 buffer_ = NewArray<byte>(buffer_size);
289 } else { 289 } else {
290 buffer_ = static_cast<byte*>(buffer); 290 buffer_ = static_cast<byte*>(buffer);
291 } 291 }
292 buffer_size_ = buffer_size; 292 buffer_size_ = buffer_size;
293 own_buffer_ = true; 293 own_buffer_ = true;
294 294
(...skipping 12 matching lines...) Expand all
307 num_prinfo_ = 0; 307 num_prinfo_ = 0;
308 next_buffer_check_ = 0; 308 next_buffer_check_ = 0;
309 const_pool_blocked_nesting_ = 0; 309 const_pool_blocked_nesting_ = 0;
310 no_const_pool_before_ = 0; 310 no_const_pool_before_ = 0;
311 last_const_pool_end_ = 0; 311 last_const_pool_end_ = 0;
312 last_bound_pos_ = 0; 312 last_bound_pos_ = 0;
313 } 313 }
314 314
315 315
316 Assembler::~Assembler() { 316 Assembler::~Assembler() {
317 Isolate* isolate = Isolate::Current();
317 ASSERT(const_pool_blocked_nesting_ == 0); 318 ASSERT(const_pool_blocked_nesting_ == 0);
318 if (own_buffer_) { 319 if (own_buffer_) {
319 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { 320 if (isolate->assembler_spare_buffer() == NULL &&
320 spare_buffer_ = buffer_; 321 buffer_size_ == kMinimalBufferSize) {
322 isolate->set_assembler_spare_buffer(buffer_);
321 } else { 323 } else {
322 DeleteArray(buffer_); 324 DeleteArray(buffer_);
323 } 325 }
324 } 326 }
325 } 327 }
326 328
327 329
328 void Assembler::GetCode(CodeDesc* desc) { 330 void Assembler::GetCode(CodeDesc* desc) {
329 // Emit constant pool if necessary. 331 // Emit constant pool if necessary.
330 CheckConstPool(true, false); 332 CheckConstPool(true, false);
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after
707 } 709 }
708 } 710 }
709 // If the opcode is one with a complementary version and the complementary 711 // If the opcode is one with a complementary version and the complementary
710 // immediate fits, change the opcode. 712 // immediate fits, change the opcode.
711 if (instr != NULL) { 713 if (instr != NULL) {
712 if ((*instr & kMovMvnMask) == kMovMvnPattern) { 714 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
713 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { 715 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
714 *instr ^= kMovMvnFlip; 716 *instr ^= kMovMvnFlip;
715 return true; 717 return true;
716 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { 718 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
717 if (CpuFeatures::IsSupported(ARMv7)) { 719 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
718 if (imm32 < 0x10000) { 720 if (imm32 < 0x10000) {
719 *instr ^= kMovwLeaveCCFlip; 721 *instr ^= kMovwLeaveCCFlip;
720 *instr |= EncodeMovwImmediate(imm32); 722 *instr |= EncodeMovwImmediate(imm32);
721 *rotate_imm = *immed_8 = 0; // Not used for movw. 723 *rotate_imm = *immed_8 = 0; // Not used for movw.
722 return true; 724 return true;
723 } 725 }
724 } 726 }
725 } 727 }
726 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { 728 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
727 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { 729 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
770 772
771 bool Operand::is_single_instruction(Instr instr) const { 773 bool Operand::is_single_instruction(Instr instr) const {
772 if (rm_.is_valid()) return true; 774 if (rm_.is_valid()) return true;
773 uint32_t dummy1, dummy2; 775 uint32_t dummy1, dummy2;
774 if (must_use_constant_pool() || 776 if (must_use_constant_pool() ||
775 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { 777 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
776 // The immediate operand cannot be encoded as a shifter operand, or use of 778 // The immediate operand cannot be encoded as a shifter operand, or use of
777 // constant pool is required. For a mov instruction not setting the 779 // constant pool is required. For a mov instruction not setting the
778 // condition code additional instruction conventions can be used. 780 // condition code additional instruction conventions can be used.
779 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 781 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
780 if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { 782 if (must_use_constant_pool() ||
783 !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
781 // mov instruction will be an ldr from constant pool (one instruction). 784 // mov instruction will be an ldr from constant pool (one instruction).
782 return true; 785 return true;
783 } else { 786 } else {
784 // mov instruction will be a mov or movw followed by movt (two 787 // mov instruction will be a mov or movw followed by movt (two
785 // instructions). 788 // instructions).
786 return false; 789 return false;
787 } 790 }
788 } else { 791 } else {
789 // If this is not a mov or mvn instruction there will always an additional 792 // If this is not a mov or mvn instruction there will always an additional
790 // instructions - either mov or ldr. The mov might actually be two 793 // instructions - either mov or ldr. The mov might actually be two
(...skipping 21 matching lines...) Expand all
812 uint32_t immed_8; 815 uint32_t immed_8;
813 if (x.must_use_constant_pool() || 816 if (x.must_use_constant_pool() ||
814 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { 817 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
815 // The immediate operand cannot be encoded as a shifter operand, so load 818 // The immediate operand cannot be encoded as a shifter operand, so load
816 // it first to register ip and change the original instruction to use ip. 819 // it first to register ip and change the original instruction to use ip.
817 // However, if the original instruction is a 'mov rd, x' (not setting the 820 // However, if the original instruction is a 'mov rd, x' (not setting the
818 // condition code), then replace it with a 'ldr rd, [pc]'. 821 // condition code), then replace it with a 'ldr rd, [pc]'.
819 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed 822 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
820 Condition cond = Instruction::ConditionField(instr); 823 Condition cond = Instruction::ConditionField(instr);
821 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 824 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
822 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { 825 if (x.must_use_constant_pool() ||
826 !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
823 RecordRelocInfo(x.rmode_, x.imm32_); 827 RecordRelocInfo(x.rmode_, x.imm32_);
824 ldr(rd, MemOperand(pc, 0), cond); 828 ldr(rd, MemOperand(pc, 0), cond);
825 } else { 829 } else {
826 // Will probably use movw, will certainly not use constant pool. 830 // Will probably use movw, will certainly not use constant pool.
827 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); 831 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
828 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); 832 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
829 } 833 }
830 } else { 834 } else {
831 // If this is not a mov or mvn instruction we may still be able to avoid 835 // If this is not a mov or mvn instruction we may still be able to avoid
832 // a constant pool entry by using mvn or movw. 836 // a constant pool entry by using mvn or movw.
(...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after
1255 1259
1256 1260
1257 // Saturating instructions. 1261 // Saturating instructions.
1258 1262
1259 // Unsigned saturate. 1263 // Unsigned saturate.
1260 void Assembler::usat(Register dst, 1264 void Assembler::usat(Register dst,
1261 int satpos, 1265 int satpos,
1262 const Operand& src, 1266 const Operand& src,
1263 Condition cond) { 1267 Condition cond) {
1264 // v6 and above. 1268 // v6 and above.
1265 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1269 ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
1266 ASSERT(!dst.is(pc) && !src.rm_.is(pc)); 1270 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1267 ASSERT((satpos >= 0) && (satpos <= 31)); 1271 ASSERT((satpos >= 0) && (satpos <= 31));
1268 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); 1272 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1269 ASSERT(src.rs_.is(no_reg)); 1273 ASSERT(src.rs_.is(no_reg));
1270 1274
1271 int sh = 0; 1275 int sh = 0;
1272 if (src.shift_op_ == ASR) { 1276 if (src.shift_op_ == ASR) {
1273 sh = 1; 1277 sh = 1;
1274 } 1278 }
1275 1279
1276 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 | 1280 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1277 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code()); 1281 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1278 } 1282 }
1279 1283
1280 1284
1281 // Bitfield manipulation instructions. 1285 // Bitfield manipulation instructions.
1282 1286
1283 // Unsigned bit field extract. 1287 // Unsigned bit field extract.
1284 // Extracts #width adjacent bits from position #lsb in a register, and 1288 // Extracts #width adjacent bits from position #lsb in a register, and
1285 // writes them to the low bits of a destination register. 1289 // writes them to the low bits of a destination register.
1286 // ubfx dst, src, #lsb, #width 1290 // ubfx dst, src, #lsb, #width
1287 void Assembler::ubfx(Register dst, 1291 void Assembler::ubfx(Register dst,
1288 Register src, 1292 Register src,
1289 int lsb, 1293 int lsb,
1290 int width, 1294 int width,
1291 Condition cond) { 1295 Condition cond) {
1292 // v7 and above. 1296 // v7 and above.
1293 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1297 ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
1294 ASSERT(!dst.is(pc) && !src.is(pc)); 1298 ASSERT(!dst.is(pc) && !src.is(pc));
1295 ASSERT((lsb >= 0) && (lsb <= 31)); 1299 ASSERT((lsb >= 0) && (lsb <= 31));
1296 ASSERT((width >= 1) && (width <= (32 - lsb))); 1300 ASSERT((width >= 1) && (width <= (32 - lsb)));
1297 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 | 1301 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1298 lsb*B7 | B6 | B4 | src.code()); 1302 lsb*B7 | B6 | B4 | src.code());
1299 } 1303 }
1300 1304
1301 1305
1302 // Signed bit field extract. 1306 // Signed bit field extract.
1303 // Extracts #width adjacent bits from position #lsb in a register, and 1307 // Extracts #width adjacent bits from position #lsb in a register, and
1304 // writes them to the low bits of a destination register. The extracted 1308 // writes them to the low bits of a destination register. The extracted
1305 // value is sign extended to fill the destination register. 1309 // value is sign extended to fill the destination register.
1306 // sbfx dst, src, #lsb, #width 1310 // sbfx dst, src, #lsb, #width
1307 void Assembler::sbfx(Register dst, 1311 void Assembler::sbfx(Register dst,
1308 Register src, 1312 Register src,
1309 int lsb, 1313 int lsb,
1310 int width, 1314 int width,
1311 Condition cond) { 1315 Condition cond) {
1312 // v7 and above. 1316 // v7 and above.
1313 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1317 ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
1314 ASSERT(!dst.is(pc) && !src.is(pc)); 1318 ASSERT(!dst.is(pc) && !src.is(pc));
1315 ASSERT((lsb >= 0) && (lsb <= 31)); 1319 ASSERT((lsb >= 0) && (lsb <= 31));
1316 ASSERT((width >= 1) && (width <= (32 - lsb))); 1320 ASSERT((width >= 1) && (width <= (32 - lsb)));
1317 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 | 1321 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1318 lsb*B7 | B6 | B4 | src.code()); 1322 lsb*B7 | B6 | B4 | src.code());
1319 } 1323 }
1320 1324
1321 1325
1322 // Bit field clear. 1326 // Bit field clear.
1323 // Sets #width adjacent bits at position #lsb in the destination register 1327 // Sets #width adjacent bits at position #lsb in the destination register
1324 // to zero, preserving the value of the other bits. 1328 // to zero, preserving the value of the other bits.
1325 // bfc dst, #lsb, #width 1329 // bfc dst, #lsb, #width
1326 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { 1330 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1327 // v7 and above. 1331 // v7 and above.
1328 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1332 ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
1329 ASSERT(!dst.is(pc)); 1333 ASSERT(!dst.is(pc));
1330 ASSERT((lsb >= 0) && (lsb <= 31)); 1334 ASSERT((lsb >= 0) && (lsb <= 31));
1331 ASSERT((width >= 1) && (width <= (32 - lsb))); 1335 ASSERT((width >= 1) && (width <= (32 - lsb)));
1332 int msb = lsb + width - 1; 1336 int msb = lsb + width - 1;
1333 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf); 1337 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1334 } 1338 }
1335 1339
1336 1340
1337 // Bit field insert. 1341 // Bit field insert.
1338 // Inserts #width adjacent bits from the low bits of the source register 1342 // Inserts #width adjacent bits from the low bits of the source register
1339 // into position #lsb of the destination register. 1343 // into position #lsb of the destination register.
1340 // bfi dst, src, #lsb, #width 1344 // bfi dst, src, #lsb, #width
1341 void Assembler::bfi(Register dst, 1345 void Assembler::bfi(Register dst,
1342 Register src, 1346 Register src,
1343 int lsb, 1347 int lsb,
1344 int width, 1348 int width,
1345 Condition cond) { 1349 Condition cond) {
1346 // v7 and above. 1350 // v7 and above.
1347 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1351 ASSERT(Isolate::Current()->cpu_features()->IsSupported(ARMv7));
1348 ASSERT(!dst.is(pc) && !src.is(pc)); 1352 ASSERT(!dst.is(pc) && !src.is(pc));
1349 ASSERT((lsb >= 0) && (lsb <= 31)); 1353 ASSERT((lsb >= 0) && (lsb <= 31));
1350 ASSERT((width >= 1) && (width <= (32 - lsb))); 1354 ASSERT((width >= 1) && (width <= (32 - lsb)));
1351 int msb = lsb + width - 1; 1355 int msb = lsb + width - 1;
1352 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 1356 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1353 src.code()); 1357 src.code());
1354 } 1358 }
1355 1359
1356 1360
1357 // Status register access instructions. 1361 // Status register access instructions.
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
1609 } 1613 }
1610 1614
1611 1615
1612 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { 1616 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1613 addrmod3(cond | L | B7 | S6 | H | B4, dst, src); 1617 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1614 } 1618 }
1615 1619
1616 1620
1617 void Assembler::ldrd(Register dst1, Register dst2, 1621 void Assembler::ldrd(Register dst1, Register dst2,
1618 const MemOperand& src, Condition cond) { 1622 const MemOperand& src, Condition cond) {
1619 ASSERT(CpuFeatures::IsEnabled(ARMv7)); 1623 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(ARMv7));
1620 ASSERT(src.rm().is(no_reg)); 1624 ASSERT(src.rm().is(no_reg));
1621 ASSERT(!dst1.is(lr)); // r14. 1625 ASSERT(!dst1.is(lr)); // r14.
1622 ASSERT_EQ(0, dst1.code() % 2); 1626 ASSERT_EQ(0, dst1.code() % 2);
1623 ASSERT_EQ(dst1.code() + 1, dst2.code()); 1627 ASSERT_EQ(dst1.code() + 1, dst2.code());
1624 addrmod3(cond | B7 | B6 | B4, dst1, src); 1628 addrmod3(cond | B7 | B6 | B4, dst1, src);
1625 } 1629 }
1626 1630
1627 1631
1628 void Assembler::strd(Register src1, Register src2, 1632 void Assembler::strd(Register src1, Register src2,
1629 const MemOperand& dst, Condition cond) { 1633 const MemOperand& dst, Condition cond) {
1630 ASSERT(dst.rm().is(no_reg)); 1634 ASSERT(dst.rm().is(no_reg));
1631 ASSERT(!src1.is(lr)); // r14. 1635 ASSERT(!src1.is(lr)); // r14.
1632 ASSERT_EQ(0, src1.code() % 2); 1636 ASSERT_EQ(0, src1.code() % 2);
1633 ASSERT_EQ(src1.code() + 1, src2.code()); 1637 ASSERT_EQ(src1.code() + 1, src2.code());
1634 ASSERT(CpuFeatures::IsEnabled(ARMv7)); 1638 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(ARMv7));
1635 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); 1639 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1636 } 1640 }
1637 1641
1638 // Load/Store multiple instructions. 1642 // Load/Store multiple instructions.
1639 void Assembler::ldm(BlockAddrMode am, 1643 void Assembler::ldm(BlockAddrMode am,
1640 Register base, 1644 Register base,
1641 RegList dst, 1645 RegList dst,
1642 Condition cond) { 1646 Condition cond) {
1643 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. 1647 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1644 ASSERT(base.is(sp) || (dst & sp.bit()) == 0); 1648 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1860 // Support for VFP. 1864 // Support for VFP.
1861 1865
1862 void Assembler::vldr(const DwVfpRegister dst, 1866 void Assembler::vldr(const DwVfpRegister dst,
1863 const Register base, 1867 const Register base,
1864 int offset, 1868 int offset,
1865 const Condition cond) { 1869 const Condition cond) {
1866 // Ddst = MEM(Rbase + offset). 1870 // Ddst = MEM(Rbase + offset).
1867 // Instruction details available in ARM DDI 0406A, A8-628. 1871 // Instruction details available in ARM DDI 0406A, A8-628.
1868 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | 1872 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1869 // Vdst(15-12) | 1011(11-8) | offset 1873 // Vdst(15-12) | 1011(11-8) | offset
1870 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1874 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
1871 int u = 1; 1875 int u = 1;
1872 if (offset < 0) { 1876 if (offset < 0) {
1873 offset = -offset; 1877 offset = -offset;
1874 u = 0; 1878 u = 0;
1875 } 1879 }
1876 1880
1877 ASSERT(offset >= 0); 1881 ASSERT(offset >= 0);
1878 if ((offset % 4) == 0 && (offset / 4) < 256) { 1882 if ((offset % 4) == 0 && (offset / 4) < 256) {
1879 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | 1883 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1880 0xB*B8 | ((offset / 4) & 255)); 1884 0xB*B8 | ((offset / 4) & 255));
(...skipping 21 matching lines...) Expand all
1902 1906
1903 1907
1904 void Assembler::vldr(const SwVfpRegister dst, 1908 void Assembler::vldr(const SwVfpRegister dst,
1905 const Register base, 1909 const Register base,
1906 int offset, 1910 int offset,
1907 const Condition cond) { 1911 const Condition cond) {
1908 // Sdst = MEM(Rbase + offset). 1912 // Sdst = MEM(Rbase + offset).
1909 // Instruction details available in ARM DDI 0406A, A8-628. 1913 // Instruction details available in ARM DDI 0406A, A8-628.
1910 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | 1914 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1911 // Vdst(15-12) | 1010(11-8) | offset 1915 // Vdst(15-12) | 1010(11-8) | offset
1912 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1916 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
1913 int u = 1; 1917 int u = 1;
1914 if (offset < 0) { 1918 if (offset < 0) {
1915 offset = -offset; 1919 offset = -offset;
1916 u = 0; 1920 u = 0;
1917 } 1921 }
1918 int sd, d; 1922 int sd, d;
1919 dst.split_code(&sd, &d); 1923 dst.split_code(&sd, &d);
1920 ASSERT(offset >= 0); 1924 ASSERT(offset >= 0);
1921 1925
1922 if ((offset % 4) == 0 && (offset / 4) < 256) { 1926 if ((offset % 4) == 0 && (offset / 4) < 256) {
(...skipping 23 matching lines...) Expand all
1946 1950
1947 1951
1948 void Assembler::vstr(const DwVfpRegister src, 1952 void Assembler::vstr(const DwVfpRegister src,
1949 const Register base, 1953 const Register base,
1950 int offset, 1954 int offset,
1951 const Condition cond) { 1955 const Condition cond) {
1952 // MEM(Rbase + offset) = Dsrc. 1956 // MEM(Rbase + offset) = Dsrc.
1953 // Instruction details available in ARM DDI 0406A, A8-786. 1957 // Instruction details available in ARM DDI 0406A, A8-786.
1954 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | 1958 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
1955 // Vsrc(15-12) | 1011(11-8) | (offset/4) 1959 // Vsrc(15-12) | 1011(11-8) | (offset/4)
1956 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1960 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
1957 int u = 1; 1961 int u = 1;
1958 if (offset < 0) { 1962 if (offset < 0) {
1959 offset = -offset; 1963 offset = -offset;
1960 u = 0; 1964 u = 0;
1961 } 1965 }
1962 ASSERT(offset >= 0); 1966 ASSERT(offset >= 0);
1963 if ((offset % 4) == 0 && (offset / 4) < 256) { 1967 if ((offset % 4) == 0 && (offset / 4) < 256) {
1964 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | 1968 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1965 0xB*B8 | ((offset / 4) & 255)); 1969 0xB*B8 | ((offset / 4) & 255));
1966 } else { 1970 } else {
(...skipping 20 matching lines...) Expand all
1987 1991
1988 1992
1989 void Assembler::vstr(const SwVfpRegister src, 1993 void Assembler::vstr(const SwVfpRegister src,
1990 const Register base, 1994 const Register base,
1991 int offset, 1995 int offset,
1992 const Condition cond) { 1996 const Condition cond) {
1993 // MEM(Rbase + offset) = SSrc. 1997 // MEM(Rbase + offset) = SSrc.
1994 // Instruction details available in ARM DDI 0406A, A8-786. 1998 // Instruction details available in ARM DDI 0406A, A8-786.
1995 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | 1999 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
1996 // Vdst(15-12) | 1010(11-8) | (offset/4) 2000 // Vdst(15-12) | 1010(11-8) | (offset/4)
1997 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2001 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
1998 int u = 1; 2002 int u = 1;
1999 if (offset < 0) { 2003 if (offset < 0) {
2000 offset = -offset; 2004 offset = -offset;
2001 u = 0; 2005 u = 0;
2002 } 2006 }
2003 int sd, d; 2007 int sd, d;
2004 src.split_code(&sd, &d); 2008 src.split_code(&sd, &d);
2005 ASSERT(offset >= 0); 2009 ASSERT(offset >= 0);
2006 if ((offset % 4) == 0 && (offset / 4) < 256) { 2010 if ((offset % 4) == 0 && (offset / 4) < 256) {
2007 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | 2011 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
(...skipping 25 matching lines...) Expand all
2033 uint64_t i; 2037 uint64_t i;
2034 memcpy(&i, &d, 8); 2038 memcpy(&i, &d, 8);
2035 2039
2036 *lo = i & 0xffffffff; 2040 *lo = i & 0xffffffff;
2037 *hi = i >> 32; 2041 *hi = i >> 32;
2038 } 2042 }
2039 2043
2040 // Only works for little endian floating point formats. 2044 // Only works for little endian floating point formats.
2041 // We don't support VFP on the mixed endian floating point platform. 2045 // We don't support VFP on the mixed endian floating point platform.
2042 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { 2046 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2043 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2047 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2044 2048
2045 // VMOV can accept an immediate of the form: 2049 // VMOV can accept an immediate of the form:
2046 // 2050 //
2047 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 2051 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2048 // 2052 //
2049 // The immediate is encoded using an 8-bit quantity, comprised of two 2053 // The immediate is encoded using an 8-bit quantity, comprised of two
2050 // 4-bit fields. For an 8-bit immediate of the form: 2054 // 4-bit fields. For an 8-bit immediate of the form:
2051 // 2055 //
2052 // [abcdefgh] 2056 // [abcdefgh]
2053 // 2057 //
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2086 2090
2087 return true; 2091 return true;
2088 } 2092 }
2089 2093
2090 2094
2091 void Assembler::vmov(const DwVfpRegister dst, 2095 void Assembler::vmov(const DwVfpRegister dst,
2092 double imm, 2096 double imm,
2093 const Condition cond) { 2097 const Condition cond) {
2094 // Dd = immediate 2098 // Dd = immediate
2095 // Instruction details available in ARM DDI 0406B, A8-640. 2099 // Instruction details available in ARM DDI 0406B, A8-640.
2096 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2100 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2097 2101
2098 uint32_t enc; 2102 uint32_t enc;
2099 if (FitsVMOVDoubleImmediate(imm, &enc)) { 2103 if (FitsVMOVDoubleImmediate(imm, &enc)) {
2100 // The double can be encoded in the instruction. 2104 // The double can be encoded in the instruction.
2101 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc); 2105 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
2102 } else { 2106 } else {
2103 // Synthesise the double from ARM immediates. This could be implemented 2107 // Synthesise the double from ARM immediates. This could be implemented
2104 // using vldr from a constant pool. 2108 // using vldr from a constant pool.
2105 uint32_t lo, hi; 2109 uint32_t lo, hi;
2106 DoubleAsTwoUInt32(imm, &lo, &hi); 2110 DoubleAsTwoUInt32(imm, &lo, &hi);
(...skipping 16 matching lines...) Expand all
2123 } 2127 }
2124 } 2128 }
2125 } 2129 }
2126 2130
2127 2131
2128 void Assembler::vmov(const SwVfpRegister dst, 2132 void Assembler::vmov(const SwVfpRegister dst,
2129 const SwVfpRegister src, 2133 const SwVfpRegister src,
2130 const Condition cond) { 2134 const Condition cond) {
2131 // Sd = Sm 2135 // Sd = Sm
2132 // Instruction details available in ARM DDI 0406B, A8-642. 2136 // Instruction details available in ARM DDI 0406B, A8-642.
2133 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2137 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2134 int sd, d, sm, m; 2138 int sd, d, sm, m;
2135 dst.split_code(&sd, &d); 2139 dst.split_code(&sd, &d);
2136 src.split_code(&sm, &m); 2140 src.split_code(&sm, &m);
2137 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm); 2141 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2138 } 2142 }
2139 2143
2140 2144
2141 void Assembler::vmov(const DwVfpRegister dst, 2145 void Assembler::vmov(const DwVfpRegister dst,
2142 const DwVfpRegister src, 2146 const DwVfpRegister src,
2143 const Condition cond) { 2147 const Condition cond) {
2144 // Dd = Dm 2148 // Dd = Dm
2145 // Instruction details available in ARM DDI 0406B, A8-642. 2149 // Instruction details available in ARM DDI 0406B, A8-642.
2146 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2150 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2147 emit(cond | 0xE*B24 | 0xB*B20 | 2151 emit(cond | 0xE*B24 | 0xB*B20 |
2148 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code()); 2152 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2149 } 2153 }
2150 2154
2151 2155
2152 void Assembler::vmov(const DwVfpRegister dst, 2156 void Assembler::vmov(const DwVfpRegister dst,
2153 const Register src1, 2157 const Register src1,
2154 const Register src2, 2158 const Register src2,
2155 const Condition cond) { 2159 const Condition cond) {
2156 // Dm = <Rt,Rt2>. 2160 // Dm = <Rt,Rt2>.
2157 // Instruction details available in ARM DDI 0406A, A8-646. 2161 // Instruction details available in ARM DDI 0406A, A8-646.
2158 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | 2162 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2159 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm 2163 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2160 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2164 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2161 ASSERT(!src1.is(pc) && !src2.is(pc)); 2165 ASSERT(!src1.is(pc) && !src2.is(pc));
2162 emit(cond | 0xC*B24 | B22 | src2.code()*B16 | 2166 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2163 src1.code()*B12 | 0xB*B8 | B4 | dst.code()); 2167 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2164 } 2168 }
2165 2169
2166 2170
2167 void Assembler::vmov(const Register dst1, 2171 void Assembler::vmov(const Register dst1,
2168 const Register dst2, 2172 const Register dst2,
2169 const DwVfpRegister src, 2173 const DwVfpRegister src,
2170 const Condition cond) { 2174 const Condition cond) {
2171 // <Rt,Rt2> = Dm. 2175 // <Rt,Rt2> = Dm.
2172 // Instruction details available in ARM DDI 0406A, A8-646. 2176 // Instruction details available in ARM DDI 0406A, A8-646.
2173 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | 2177 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2174 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm 2178 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2175 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2179 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2176 ASSERT(!dst1.is(pc) && !dst2.is(pc)); 2180 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2177 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | 2181 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2178 dst1.code()*B12 | 0xB*B8 | B4 | src.code()); 2182 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2179 } 2183 }
2180 2184
2181 2185
2182 void Assembler::vmov(const SwVfpRegister dst, 2186 void Assembler::vmov(const SwVfpRegister dst,
2183 const Register src, 2187 const Register src,
2184 const Condition cond) { 2188 const Condition cond) {
2185 // Sn = Rt. 2189 // Sn = Rt.
2186 // Instruction details available in ARM DDI 0406A, A8-642. 2190 // Instruction details available in ARM DDI 0406A, A8-642.
2187 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | 2191 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2188 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) 2192 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2189 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2193 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2190 ASSERT(!src.is(pc)); 2194 ASSERT(!src.is(pc));
2191 int sn, n; 2195 int sn, n;
2192 dst.split_code(&sn, &n); 2196 dst.split_code(&sn, &n);
2193 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); 2197 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2194 } 2198 }
2195 2199
2196 2200
2197 void Assembler::vmov(const Register dst, 2201 void Assembler::vmov(const Register dst,
2198 const SwVfpRegister src, 2202 const SwVfpRegister src,
2199 const Condition cond) { 2203 const Condition cond) {
2200 // Rt = Sn. 2204 // Rt = Sn.
2201 // Instruction details available in ARM DDI 0406A, A8-642. 2205 // Instruction details available in ARM DDI 0406A, A8-642.
2202 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | 2206 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2203 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) 2207 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2204 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2208 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2205 ASSERT(!dst.is(pc)); 2209 ASSERT(!dst.is(pc));
2206 int sn, n; 2210 int sn, n;
2207 src.split_code(&sn, &n); 2211 src.split_code(&sn, &n);
2208 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); 2212 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2209 } 2213 }
2210 2214
2211 2215
2212 // Type of data to read from or write to VFP register. 2216 // Type of data to read from or write to VFP register.
2213 // Used as specifier in generic vcvt instruction. 2217 // Used as specifier in generic vcvt instruction.
2214 enum VFPType { S32, U32, F32, F64 }; 2218 enum VFPType { S32, U32, F32, F64 };
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
2319 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | 2323 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2320 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm); 2324 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2321 } 2325 }
2322 } 2326 }
2323 2327
2324 2328
2325 void Assembler::vcvt_f64_s32(const DwVfpRegister dst, 2329 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2326 const SwVfpRegister src, 2330 const SwVfpRegister src,
2327 VFPConversionMode mode, 2331 VFPConversionMode mode,
2328 const Condition cond) { 2332 const Condition cond) {
2329 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2333 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2330 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); 2334 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2331 } 2335 }
2332 2336
2333 2337
2334 void Assembler::vcvt_f32_s32(const SwVfpRegister dst, 2338 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2335 const SwVfpRegister src, 2339 const SwVfpRegister src,
2336 VFPConversionMode mode, 2340 VFPConversionMode mode,
2337 const Condition cond) { 2341 const Condition cond) {
2338 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2342 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2339 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); 2343 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2340 } 2344 }
2341 2345
2342 2346
2343 void Assembler::vcvt_f64_u32(const DwVfpRegister dst, 2347 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2344 const SwVfpRegister src, 2348 const SwVfpRegister src,
2345 VFPConversionMode mode, 2349 VFPConversionMode mode,
2346 const Condition cond) { 2350 const Condition cond) {
2347 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2351 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2348 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); 2352 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2349 } 2353 }
2350 2354
2351 2355
2352 void Assembler::vcvt_s32_f64(const SwVfpRegister dst, 2356 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2353 const DwVfpRegister src, 2357 const DwVfpRegister src,
2354 VFPConversionMode mode, 2358 VFPConversionMode mode,
2355 const Condition cond) { 2359 const Condition cond) {
2356 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2360 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2357 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); 2361 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2358 } 2362 }
2359 2363
2360 2364
2361 void Assembler::vcvt_u32_f64(const SwVfpRegister dst, 2365 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2362 const DwVfpRegister src, 2366 const DwVfpRegister src,
2363 VFPConversionMode mode, 2367 VFPConversionMode mode,
2364 const Condition cond) { 2368 const Condition cond) {
2365 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2369 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2366 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); 2370 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2367 } 2371 }
2368 2372
2369 2373
2370 void Assembler::vcvt_f64_f32(const DwVfpRegister dst, 2374 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2371 const SwVfpRegister src, 2375 const SwVfpRegister src,
2372 VFPConversionMode mode, 2376 VFPConversionMode mode,
2373 const Condition cond) { 2377 const Condition cond) {
2374 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2378 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2375 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); 2379 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2376 } 2380 }
2377 2381
2378 2382
2379 void Assembler::vcvt_f32_f64(const SwVfpRegister dst, 2383 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2380 const DwVfpRegister src, 2384 const DwVfpRegister src,
2381 VFPConversionMode mode, 2385 VFPConversionMode mode,
2382 const Condition cond) { 2386 const Condition cond) {
2383 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2387 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2384 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); 2388 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2385 } 2389 }
2386 2390
2387 2391
2388 void Assembler::vneg(const DwVfpRegister dst, 2392 void Assembler::vneg(const DwVfpRegister dst,
2389 const DwVfpRegister src, 2393 const DwVfpRegister src,
2390 const Condition cond) { 2394 const Condition cond) {
2391 emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 | 2395 emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2392 0x5*B9 | B8 | B6 | src.code()); 2396 0x5*B9 | B8 | B6 | src.code());
2393 } 2397 }
2394 2398
2395 2399
2396 void Assembler::vabs(const DwVfpRegister dst, 2400 void Assembler::vabs(const DwVfpRegister dst,
2397 const DwVfpRegister src, 2401 const DwVfpRegister src,
2398 const Condition cond) { 2402 const Condition cond) {
2399 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 2403 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2400 0x5*B9 | B8 | 0x3*B6 | src.code()); 2404 0x5*B9 | B8 | 0x3*B6 | src.code());
2401 } 2405 }
2402 2406
2403 2407
2404 void Assembler::vadd(const DwVfpRegister dst, 2408 void Assembler::vadd(const DwVfpRegister dst,
2405 const DwVfpRegister src1, 2409 const DwVfpRegister src1,
2406 const DwVfpRegister src2, 2410 const DwVfpRegister src2,
2407 const Condition cond) { 2411 const Condition cond) {
2408 // Dd = vadd(Dn, Dm) double precision floating point addition. 2412 // Dd = vadd(Dn, Dm) double precision floating point addition.
2409 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 2413 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2410 // Instruction details available in ARM DDI 0406A, A8-536. 2414 // Instruction details available in ARM DDI 0406A, A8-536.
2411 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | 2415 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2412 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) 2416 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2413 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2417 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2414 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | 2418 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2415 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 2419 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2416 } 2420 }
2417 2421
2418 2422
2419 void Assembler::vsub(const DwVfpRegister dst, 2423 void Assembler::vsub(const DwVfpRegister dst,
2420 const DwVfpRegister src1, 2424 const DwVfpRegister src1,
2421 const DwVfpRegister src2, 2425 const DwVfpRegister src2,
2422 const Condition cond) { 2426 const Condition cond) {
2423 // Dd = vsub(Dn, Dm) double precision floating point subtraction. 2427 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2424 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 2428 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2425 // Instruction details available in ARM DDI 0406A, A8-784. 2429 // Instruction details available in ARM DDI 0406A, A8-784.
2426 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | 2430 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2427 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) 2431 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2428 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2432 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2429 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | 2433 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2430 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); 2434 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2431 } 2435 }
2432 2436
2433 2437
2434 void Assembler::vmul(const DwVfpRegister dst, 2438 void Assembler::vmul(const DwVfpRegister dst,
2435 const DwVfpRegister src1, 2439 const DwVfpRegister src1,
2436 const DwVfpRegister src2, 2440 const DwVfpRegister src2,
2437 const Condition cond) { 2441 const Condition cond) {
2438 // Dd = vmul(Dn, Dm) double precision floating point multiplication. 2442 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2439 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 2443 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2440 // Instruction details available in ARM DDI 0406A, A8-784. 2444 // Instruction details available in ARM DDI 0406A, A8-784.
2441 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | 2445 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2442 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) 2446 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2443 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2447 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2444 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | 2448 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2445 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 2449 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2446 } 2450 }
2447 2451
2448 2452
2449 void Assembler::vdiv(const DwVfpRegister dst, 2453 void Assembler::vdiv(const DwVfpRegister dst,
2450 const DwVfpRegister src1, 2454 const DwVfpRegister src1,
2451 const DwVfpRegister src2, 2455 const DwVfpRegister src2,
2452 const Condition cond) { 2456 const Condition cond) {
2453 // Dd = vdiv(Dn, Dm) double precision floating point division. 2457 // Dd = vdiv(Dn, Dm) double precision floating point division.
2454 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 2458 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2455 // Instruction details available in ARM DDI 0406A, A8-584. 2459 // Instruction details available in ARM DDI 0406A, A8-584.
2456 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | 2460 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2457 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) 2461 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2458 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2462 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2459 emit(cond | 0xE*B24 | B23 | src1.code()*B16 | 2463 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2460 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 2464 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2461 } 2465 }
2462 2466
2463 2467
2464 void Assembler::vcmp(const DwVfpRegister src1, 2468 void Assembler::vcmp(const DwVfpRegister src1,
2465 const DwVfpRegister src2, 2469 const DwVfpRegister src2,
2466 const Condition cond) { 2470 const Condition cond) {
2467 // vcmp(Dd, Dm) double precision floating point comparison. 2471 // vcmp(Dd, Dm) double precision floating point comparison.
2468 // Instruction details available in ARM DDI 0406A, A8-570. 2472 // Instruction details available in ARM DDI 0406A, A8-570.
2469 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | 2473 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2470 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0) 2474 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2471 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2475 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2472 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | 2476 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2473 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); 2477 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2474 } 2478 }
2475 2479
2476 2480
2477 void Assembler::vcmp(const DwVfpRegister src1, 2481 void Assembler::vcmp(const DwVfpRegister src1,
2478 const double src2, 2482 const double src2,
2479 const Condition cond) { 2483 const Condition cond) {
2480 // vcmp(Dd, Dm) double precision floating point comparison. 2484 // vcmp(Dd, Dm) double precision floating point comparison.
2481 // Instruction details available in ARM DDI 0406A, A8-570. 2485 // Instruction details available in ARM DDI 0406A, A8-570.
2482 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) | 2486 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2483 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0) 2487 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2484 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2488 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2485 ASSERT(src2 == 0.0); 2489 ASSERT(src2 == 0.0);
2486 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 | 2490 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2487 src1.code()*B12 | 0x5*B9 | B8 | B6); 2491 src1.code()*B12 | 0x5*B9 | B8 | B6);
2488 } 2492 }
2489 2493
2490 2494
2491 void Assembler::vmsr(Register dst, Condition cond) { 2495 void Assembler::vmsr(Register dst, Condition cond) {
2492 // Instruction details available in ARM DDI 0406A, A8-652. 2496 // Instruction details available in ARM DDI 0406A, A8-652.
2493 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | 2497 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2494 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) 2498 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2495 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2499 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2496 emit(cond | 0xE*B24 | 0xE*B20 | B16 | 2500 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2497 dst.code()*B12 | 0xA*B8 | B4); 2501 dst.code()*B12 | 0xA*B8 | B4);
2498 } 2502 }
2499 2503
2500 2504
2501 void Assembler::vmrs(Register dst, Condition cond) { 2505 void Assembler::vmrs(Register dst, Condition cond) {
2502 // Instruction details available in ARM DDI 0406A, A8-652. 2506 // Instruction details available in ARM DDI 0406A, A8-652.
2503 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | 2507 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2504 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) 2508 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2505 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2509 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2506 emit(cond | 0xE*B24 | 0xF*B20 | B16 | 2510 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2507 dst.code()*B12 | 0xA*B8 | B4); 2511 dst.code()*B12 | 0xA*B8 | B4);
2508 } 2512 }
2509 2513
2510 2514
2511 void Assembler::vsqrt(const DwVfpRegister dst, 2515 void Assembler::vsqrt(const DwVfpRegister dst,
2512 const DwVfpRegister src, 2516 const DwVfpRegister src,
2513 const Condition cond) { 2517 const Condition cond) {
2514 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) | 2518 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2515 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0) 2519 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2516 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2520 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
2517 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 | 2521 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2518 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code()); 2522 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2519 } 2523 }
2520 2524
2521 2525
2522 // Pseudo instructions. 2526 // Pseudo instructions.
2523 void Assembler::nop(int type) { 2527 void Assembler::nop(int type) {
2524 // This is mov rx, rx. 2528 // This is mov rx, rx.
2525 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. 2529 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2526 emit(al | 13*B21 | type*B12 | type); 2530 emit(al | 13*B21 | type*B12 | type);
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
2780 2784
2781 // Since a constant pool was just emitted, move the check offset forward by 2785 // Since a constant pool was just emitted, move the check offset forward by
2782 // the standard interval. 2786 // the standard interval.
2783 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2787 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2784 } 2788 }
2785 2789
2786 2790
2787 } } // namespace v8::internal 2791 } } // namespace v8::internal
2788 2792
2789 #endif // V8_TARGET_ARCH_ARM 2793 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698