Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(524)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 7535004: Merge bleeding edge up to 8774 into the GC branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | src/array.js » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4168 matching lines...) Expand 10 before | Expand all | Expand 10 after
4179 __ mov(r0, r4); 4179 __ mov(r0, r4);
4180 __ Ret(); 4180 __ Ret();
4181 4181
4182 __ bind(&miss_force_generic); 4182 __ bind(&miss_force_generic);
4183 Code* stub = masm->isolate()->builtins()->builtin( 4183 Code* stub = masm->isolate()->builtins()->builtin(
4184 Builtins::kKeyedLoadIC_MissForceGeneric); 4184 Builtins::kKeyedLoadIC_MissForceGeneric);
4185 __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET); 4185 __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
4186 } 4186 }
4187 4187
4188 4188
4189 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
4190 MacroAssembler* masm) {
4191 // ----------- S t a t e -------------
4192 // -- lr : return address
4193 // -- r0 : key
4194 // -- r1 : receiver
4195 // -----------------------------------
4196 Label miss_force_generic, slow_allocate_heapnumber;
4197
4198 Register key_reg = r0;
4199 Register receiver_reg = r1;
4200 Register elements_reg = r2;
4201 Register heap_number_reg = r2;
4202 Register indexed_double_offset = r3;
4203 Register scratch = r4;
4204 Register scratch2 = r5;
4205 Register scratch3 = r6;
4206 Register heap_number_map = r7;
4207
4208 // This stub is meant to be tail-jumped to, the receiver must already
4209 // have been verified by the caller to not be a smi.
4210
4211 // Check that the key is a smi.
4212 __ JumpIfNotSmi(key_reg, &miss_force_generic);
4213
4214 // Get the elements array.
4215 __ ldr(elements_reg,
4216 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4217
4218 // Check that the key is within bounds.
4219 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4220 __ cmp(key_reg, Operand(scratch));
4221 __ b(hs, &miss_force_generic);
4222
4223 // Load the upper word of the double in the fixed array and test for NaN.
4224 __ add(indexed_double_offset, elements_reg,
4225 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
4226 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4227 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4228 __ cmp(scratch, Operand(kHoleNanUpper32));
4229 __ b(&miss_force_generic, eq);
4230
4231 // Non-NaN. Allocate a new heap number and copy the double value into it.
4232 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4233 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4234 heap_number_map, &slow_allocate_heapnumber);
4235
4236 // Don't need to reload the upper 32 bits of the double, it's already in
4237 // scratch.
4238 __ str(scratch, FieldMemOperand(heap_number_reg,
4239 HeapNumber::kExponentOffset));
4240 __ ldr(scratch, FieldMemOperand(indexed_double_offset,
4241 FixedArray::kHeaderSize));
4242 __ str(scratch, FieldMemOperand(heap_number_reg,
4243 HeapNumber::kMantissaOffset));
4244
4245 __ mov(r0, heap_number_reg);
4246 __ Ret();
4247
4248 __ bind(&slow_allocate_heapnumber);
4249 Handle<Code> slow_ic =
4250 masm->isolate()->builtins()->KeyedLoadIC_Slow();
4251 __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4252
4253 __ bind(&miss_force_generic);
4254 Handle<Code> miss_ic =
4255 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4256 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4257 }
4258
4259
4189 void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, 4260 void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
4190 bool is_js_array) { 4261 bool is_js_array) {
4191 // ----------- S t a t e ------------- 4262 // ----------- S t a t e -------------
4192 // -- r0 : value 4263 // -- r0 : value
4193 // -- r1 : key 4264 // -- r1 : key
4194 // -- r2 : receiver 4265 // -- r2 : receiver
4195 // -- lr : return address 4266 // -- lr : return address
4196 // -- r3 : scratch 4267 // -- r3 : scratch
4197 // -- r4 : scratch (elements) 4268 // -- r4 : scratch (elements)
4198 // ----------------------------------- 4269 // -----------------------------------
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
4247 // Done. 4318 // Done.
4248 __ Ret(); 4319 __ Ret();
4249 4320
4250 __ bind(&miss_force_generic); 4321 __ bind(&miss_force_generic);
4251 Handle<Code> ic = 4322 Handle<Code> ic =
4252 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); 4323 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4253 __ Jump(ic, RelocInfo::CODE_TARGET); 4324 __ Jump(ic, RelocInfo::CODE_TARGET);
4254 } 4325 }
4255 4326
4256 4327
4328 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
4329 MacroAssembler* masm,
4330 bool is_js_array) {
4331 // ----------- S t a t e -------------
4332 // -- r0 : value
4333 // -- r1 : key
4334 // -- r2 : receiver
4335 // -- lr : return address
4336 // -- r3 : scratch
4337 // -- r4 : scratch
4338 // -- r5 : scratch
4339 // -----------------------------------
4340 Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
4341
4342 Register value_reg = r0;
4343 Register key_reg = r1;
4344 Register receiver_reg = r2;
4345 Register scratch = r3;
4346 Register elements_reg = r4;
4347 Register mantissa_reg = r5;
4348 Register exponent_reg = r6;
4349 Register scratch4 = r7;
4350
4351 // This stub is meant to be tail-jumped to, the receiver must already
4352 // have been verified by the caller to not be a smi.
4353 __ JumpIfNotSmi(key_reg, &miss_force_generic);
4354
4355 __ ldr(elements_reg,
4356 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4357
4358 // Check that the key is within bounds.
4359 if (is_js_array) {
4360 __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4361 } else {
4362 __ ldr(scratch,
4363 FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4364 }
4365 // Compare smis, unsigned compare catches both negative and out-of-bound
4366 // indexes.
4367 __ cmp(key_reg, scratch);
4368 __ b(hs, &miss_force_generic);
4369
4370 // Handle smi values specially.
4371 __ JumpIfSmi(value_reg, &smi_value);
4372
4373 // Ensure that the object is a heap number
4374 __ CheckMap(value_reg,
4375 scratch,
4376 masm->isolate()->factory()->heap_number_map(),
4377 &miss_force_generic,
4378 DONT_DO_SMI_CHECK);
4379
4380 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
4381 // in the exponent.
4382 __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
4383 __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
4384 __ cmp(exponent_reg, scratch);
4385 __ b(ge, &maybe_nan);
4386
4387 __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
4388
4389 __ bind(&have_double_value);
4390 __ add(scratch, elements_reg,
4391 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
4392 __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
4393 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
4394 __ str(exponent_reg, FieldMemOperand(scratch, offset));
4395 __ Ret();
4396
4397 __ bind(&maybe_nan);
4398 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
4399 // it's an Infinity, and the non-NaN code path applies.
4400 __ b(gt, &is_nan);
4401 __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
4402 __ cmp(mantissa_reg, Operand(0));
4403 __ b(eq, &have_double_value);
4404 __ bind(&is_nan);
4405 // Load canonical NaN for storing into the double array.
4406 uint64_t nan_int64 = BitCast<uint64_t>(
4407 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4408 __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
4409 __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
4410 __ jmp(&have_double_value);
4411
4412 __ bind(&smi_value);
4413 __ add(scratch, elements_reg,
4414 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4415 __ add(scratch, scratch,
4416 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
4417 // scratch is now effective address of the double element
4418
4419 FloatingPointHelper::Destination destination;
4420 if (CpuFeatures::IsSupported(VFP3)) {
4421 destination = FloatingPointHelper::kVFPRegisters;
4422 } else {
4423 destination = FloatingPointHelper::kCoreRegisters;
4424 }
4425 __ SmiUntag(value_reg, value_reg);
4426 FloatingPointHelper::ConvertIntToDouble(
4427 masm, value_reg, destination,
4428 d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
4429 scratch4, s2); // These are: scratch2, single_scratch.
4430 if (destination == FloatingPointHelper::kVFPRegisters) {
4431 CpuFeatures::Scope scope(VFP3);
4432 __ vstr(d0, scratch, 0);
4433 } else {
4434 __ str(mantissa_reg, MemOperand(scratch, 0));
4435 __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
4436 }
4437 __ Ret();
4438
4439 // Handle store cache miss, replacing the ic with the generic stub.
4440 __ bind(&miss_force_generic);
4441 Handle<Code> ic =
4442 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4443 __ Jump(ic, RelocInfo::CODE_TARGET);
4444 }
4445
4446
4257 #undef __ 4447 #undef __
4258 4448
4259 } } // namespace v8::internal 4449 } } // namespace v8::internal
4260 4450
4261 #endif // V8_TARGET_ARCH_ARM 4451 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | src/array.js » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698