Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(433)

Unified Diff: src/ia32/codegen-ia32.cc

Issue 6880010: Merge (7265, 7271] from bleeding_edge to experimental/gc branch.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/ia32/codegen-ia32.cc
===================================================================
--- src/ia32/codegen-ia32.cc (revision 7563)
+++ src/ia32/codegen-ia32.cc (working copy)
@@ -154,7 +154,8 @@
safe_int32_mode_enabled_(true),
function_return_is_shadowed_(false),
in_spilled_code_(false),
- jit_cookie_((FLAG_mask_constants_with_cookie) ? V8::RandomPrivate() : 0) {
+ jit_cookie_((FLAG_mask_constants_with_cookie) ?
+ V8::RandomPrivate(Isolate::Current()) : 0) {
}
@@ -182,7 +183,7 @@
ASSERT_EQ(0, loop_nesting_);
loop_nesting_ = info->is_in_loop() ? 1 : 0;
- JumpTarget::set_compiling_deferred_code(false);
+ Isolate::Current()->set_jump_target_compiling_deferred_code(false);
{
CodeGenState state(this);
@@ -288,7 +289,7 @@
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
+ frame_->Push(FACTORY->the_hole_value());
StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
}
@@ -324,7 +325,7 @@
if (!scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
+ bool is_builtin = info->isolate()->bootstrapper()->IsActive();
bool should_trace =
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
@@ -341,7 +342,7 @@
ASSERT(!function_return_is_shadowed_);
CodeForReturnPosition(info->function());
frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
+ Result undefined(FACTORY->undefined_value());
if (function_return_.is_bound()) {
function_return_.Jump(&undefined);
} else {
@@ -373,9 +374,9 @@
// Process any deferred code using the register allocator.
if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
+ info->isolate()->set_jump_target_compiling_deferred_code(true);
ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
+ info->isolate()->set_jump_target_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
@@ -559,7 +560,7 @@
__ sar(val, 1);
// If there was an overflow, bits 30 and 31 of the original number disagree.
__ xor_(val, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvtsi2sd(xmm0, Operand(val));
} else {
@@ -577,7 +578,7 @@
no_reg, &allocation_failed);
VirtualFrame* clone = new VirtualFrame(frame_);
scratch.Unuse();
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
} else {
@@ -590,7 +591,7 @@
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
// Pop the value from the floating point stack.
__ fstp(0);
}
@@ -617,7 +618,7 @@
safe_int32_mode_enabled() &&
expr->side_effect_free() &&
expr->num_bit_ops() > 2 &&
- CpuFeatures::IsSupported(SSE2)) {
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
BreakTarget unsafe_bailout;
JumpTarget done;
unsafe_bailout.set_expected_height(frame_->height());
@@ -638,12 +639,12 @@
if (dest.false_was_fall_through()) {
// The false target was just bound.
JumpTarget loaded;
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
// There may be dangling jumps to the true target.
if (true_target.is_linked()) {
loaded.Jump();
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
loaded.Bind();
}
@@ -651,11 +652,11 @@
// There is true, and possibly false, control flow (with true as
// the fall through).
JumpTarget loaded;
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
loaded.Bind();
}
@@ -670,14 +671,14 @@
loaded.Jump(); // Don't lose the current TOS.
if (true_target.is_linked()) {
true_target.Bind();
- frame_->Push(Factory::true_value());
+ frame_->Push(FACTORY->true_value());
if (false_target.is_linked()) {
loaded.Jump();
}
}
if (false_target.is_linked()) {
false_target.Bind();
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
loaded.Bind();
}
@@ -755,7 +756,7 @@
// When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- frame_->Push(Factory::arguments_marker());
+ frame_->Push(FACTORY->arguments_marker());
} else {
ArgumentsAccessStub stub(is_strict_mode()
? ArgumentsAccessStub::NEW_STRICT
@@ -787,7 +788,7 @@
// been assigned a proper value.
skip_arguments = !probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
probe.Unuse();
done.Branch(not_equal);
}
@@ -918,15 +919,15 @@
} else {
// Fast case checks.
// 'false' => false.
- __ cmp(value.reg(), Factory::false_value());
+ __ cmp(value.reg(), FACTORY->false_value());
dest->false_target()->Branch(equal);
// 'true' => true.
- __ cmp(value.reg(), Factory::true_value());
+ __ cmp(value.reg(), FACTORY->true_value());
dest->true_target()->Branch(equal);
// 'undefined' => false.
- __ cmp(value.reg(), Factory::undefined_value());
+ __ cmp(value.reg(), FACTORY->undefined_value());
dest->false_target()->Branch(equal);
// Smi => false iff zero.
@@ -997,7 +998,8 @@
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
- if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
+ if (Token::IsBitOp(op_) &&
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
return &non_smi_input_;
} else {
return entry_label();
@@ -1020,7 +1022,7 @@
void DeferredInlineBinaryOperation::Generate() {
// Registers are not saved implicitly for this stub, so we should not
// tread on the registers that were not passed to us.
- if (CpuFeatures::IsSupported(SSE2) &&
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
((op_ == Token::ADD) ||
(op_ == Token::SUB) ||
(op_ == Token::MUL) ||
@@ -1033,7 +1035,7 @@
__ j(zero, &left_smi);
if (!left_info_.IsNumber()) {
__ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
}
__ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
@@ -1062,7 +1064,7 @@
__ j(zero, &right_smi);
if (!right_info_.IsNumber()) {
__ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &call_runtime);
}
__ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
@@ -1156,7 +1158,7 @@
// The left_ and right_ registers have not been initialized yet.
__ mov(right_, Immediate(smi_value_));
__ mov(left_, Operand(dst_));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
__ jmp(entry_label());
return;
} else {
@@ -1269,7 +1271,8 @@
// This trashes right_.
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
__ bind(&allocation_ok);
- if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
+ op_ != Token::SHR) {
CpuFeatures::Scope use_sse2(SSE2);
ASSERT(Token::IsBitOp(op_));
// Signed conversion.
@@ -1511,7 +1514,7 @@
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
+ Object* answer_object = HEAP->undefined_value();
switch (op) {
case Token::ADD:
if (Smi::IsValid(left + right)) {
@@ -1583,7 +1586,7 @@
UNREACHABLE();
break;
}
- if (answer_object == Heap::undefined_value()) {
+ if (answer_object->IsUndefined()) {
return false;
}
frame_->Push(Handle<Object>(answer_object));
@@ -3032,13 +3035,14 @@
// Jump or fall through to here if we are comparing a non-smi to a
// constant smi. If the non-smi is a heap number and this is not
// a loop condition, inline the floating point code.
- if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
+ if (!is_loop_condition &&
+ Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_number.Branch(not_equal, left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
@@ -3104,7 +3108,7 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
__ j(zero, &done);
__ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, not_taken);
__ bind(&done);
}
@@ -3171,7 +3175,7 @@
__ j(zero, &smi);
if (!operand->type_info().IsNumber()) {
__ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(FACTORY->heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, taken);
}
__ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
@@ -3196,7 +3200,7 @@
ASSERT(right_side->is_register());
JumpTarget not_numbers;
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
// Load left and right operand into registers xmm0 and xmm1 and compare.
@@ -3278,7 +3282,7 @@
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
__ nop();
@@ -3310,7 +3314,7 @@
if (probe.is_constant()) {
try_lazy = probe.handle()->IsArgumentsMarker();
} else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
probe.Unuse();
__ j(not_equal, &slow);
}
@@ -3346,7 +3350,8 @@
__ j(not_equal, &build_args);
__ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
__ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ Handle<Code> apply_code(Isolate::Current()->builtins()->builtin(
+ Builtins::FunctionApply));
__ cmp(Operand(ecx), Immediate(apply_code));
__ j(not_equal, &build_args);
@@ -3573,7 +3578,7 @@
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Immediate(Factory::the_hole_value()));
+ frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
@@ -3589,7 +3594,7 @@
// If we have a function or a constant, we need to initialize the variable.
Expression* val = NULL;
if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
+ val = new Literal(FACTORY->the_hole_value());
} else {
val = node->fun(); // NULL if we don't have a function
}
@@ -4370,9 +4375,9 @@
frame_->EmitPop(eax);
// eax: value to be iterated over
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, FACTORY->undefined_value());
exit.Branch(equal);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, FACTORY->null_value());
exit.Branch(equal);
// Stack layout in body:
@@ -4411,14 +4416,14 @@
loop.Bind();
// Check that there are no elements.
__ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
call_runtime.Branch(not_equal);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
call_runtime.Branch(equal);
// Check that there in an enum cache in the non-empty instance
// descriptors. This is the case if the next enumeration index
@@ -4430,12 +4435,12 @@
__ cmp(ecx, Operand(eax));
check_prototype.Branch(equal);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
call_runtime.Branch(not_equal);
check_prototype.Bind();
// Load the prototype from the map and loop if non-null.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
loop.Branch(not_equal);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -4454,7 +4459,7 @@
// Runtime::kGetPropertyNamesFast)
__ mov(edx, Operand(eax));
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, Factory::meta_map());
+ __ cmp(ecx, FACTORY->meta_map());
fixed_array.Branch(not_equal);
use_cache.Bind();
@@ -4646,7 +4651,7 @@
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// Make sure that there's nothing left on the stack above the
// handler structure.
@@ -4772,7 +4777,7 @@
function_return_is_shadowed_ = function_return_was_shadowed;
// Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address);
// If we can fall off the end of the try block, unlink from the try
// chain and set the state on the frame to FALLING.
@@ -4784,7 +4789,7 @@
// Fake a top of stack value (unneeded when FALLING) and set the
// state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
+ frame_->EmitPush(Immediate(FACTORY->undefined_value()));
__ Set(ecx, Immediate(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) {
finally_block.Jump();
@@ -4827,7 +4832,7 @@
frame_->EmitPush(eax);
} else {
// Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
+ frame_->EmitPush(Immediate(FACTORY->undefined_value()));
}
__ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
if (--nof_unlinks > 0) {
@@ -4935,8 +4940,8 @@
frame()->EmitPush(esi);
frame()->EmitPush(Immediate(function_info));
frame()->EmitPush(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? FACTORY->true_value()
+ : FACTORY->false_value()));
return frame()->CallRuntime(Runtime::kNewClosure, 3);
}
}
@@ -5044,9 +5049,9 @@
Comment cmnt(masm_, "[ Load const");
Label exit;
__ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
+ __ cmp(ecx, FACTORY->the_hole_value());
__ j(not_equal, &exit);
- __ mov(ecx, Factory::undefined_value());
+ __ mov(ecx, FACTORY->undefined_value());
__ bind(&exit);
frame()->EmitPush(ecx);
@@ -5096,7 +5101,7 @@
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker()));
+ __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
frame()->Push(&result);
exit.Branch(not_equal);
@@ -5150,7 +5155,7 @@
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::global_context_map()));
+ Immediate(FACTORY->global_context_map()));
__ j(equal, &fast);
// Check that extension is NULL.
__ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
@@ -5210,9 +5215,9 @@
__ mov(result->reg(),
ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result->reg(), Factory::the_hole_value());
+ __ cmp(result->reg(), FACTORY->the_hole_value());
done->Branch(not_equal, result);
- __ mov(result->reg(), Factory::undefined_value());
+ __ mov(result->reg(), FACTORY->undefined_value());
}
done->Jump(result);
} else if (rewrite != NULL) {
@@ -5299,7 +5304,7 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Init const");
__ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
+ __ cmp(ecx, FACTORY->the_hole_value());
exit.Branch(not_equal);
}
@@ -5475,7 +5480,7 @@
Register target,
int registers_to_save = 0)
: size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
+ ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
ASSERT_EQ(0, registers_to_save & target.bit());
set_comment("[ DeferredAllocateInNewSpace");
}
@@ -5536,7 +5541,7 @@
// jump to the deferred code passing the literals array.
DeferredRegExpLiteral* deferred =
new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
+ __ cmp(boilerplate.reg(), FACTORY->undefined_value());
deferred->Branch(equal);
deferred->BindExit();
@@ -5694,11 +5699,11 @@
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1);
} else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -6105,7 +6110,7 @@
Load(function);
// Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
// Load the arguments.
int arg_count = args->length();
@@ -6137,7 +6142,7 @@
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
@@ -6159,7 +6164,7 @@
if (arg_count > 0) {
frame_->PushElementAt(arg_count);
} else {
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
frame_->PushParameterAt(-1);
@@ -6453,7 +6458,7 @@
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
@@ -6496,13 +6501,13 @@
__ bind(&need_conversion_);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result_, Immediate(Factory::undefined_value()));
+ __ Set(result_, Immediate(FACTORY->undefined_value()));
__ jmp(exit_label());
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result_, Immediate(Factory::nan_value()));
+ __ Set(result_, Immediate(FACTORY->nan_value()));
__ jmp(exit_label());
}
@@ -6625,7 +6630,7 @@
__ bind(&index_out_of_range_);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result_, Immediate(Factory::empty_string()));
+ __ Set(result_, Immediate(FACTORY->empty_string()));
__ jmp(exit_label());
}
@@ -6743,7 +6748,7 @@
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ sar(array_length, 1);
__ j(not_zero, &non_trivial_array);
- __ mov(result_operand, Factory::empty_string());
+ __ mov(result_operand, FACTORY->empty_string());
__ jmp(&done);
// Save the array length.
@@ -6954,7 +6959,7 @@
__ bind(&bailout);
- __ mov(result_operand, Factory::undefined_value());
+ __ mov(result_operand, FACTORY->undefined_value());
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
@@ -6995,7 +7000,7 @@
__ test(obj.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
- __ cmp(obj.reg(), Factory::null_value());
+ __ cmp(obj.reg(), FACTORY->null_value());
destination()->true_target()->Branch(equal);
Result map = allocator()->Allocate();
@@ -7066,7 +7071,7 @@
// Check for fast case object. Generate false result for slow case object.
__ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
__ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ cmp(scratch1_, Factory::hash_table_map());
+ __ cmp(scratch1_, FACTORY->hash_table_map());
__ j(equal, &false_result);
// Look for valueOf symbol in the descriptor array, and indicate false if
@@ -7093,7 +7098,7 @@
__ jmp(&entry);
__ bind(&loop);
__ mov(scratch2_, FieldOperand(map_result_, 0));
- __ cmp(scratch2_, Factory::value_of_symbol());
+ __ cmp(scratch2_, FACTORY->value_of_symbol());
__ j(equal, &false_result);
__ add(Operand(map_result_), Immediate(kPointerSize));
__ bind(&entry);
@@ -7308,17 +7313,17 @@
// Functions have class 'Function'.
function.Bind();
- frame_->Push(Factory::function_class_symbol());
+ frame_->Push(FACTORY->function_class_symbol());
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
+ frame_->Push(FACTORY->Object_symbol());
leave.Jump();
// Non-JS objects have class null.
null.Bind();
- frame_->Push(Factory::null_value());
+ frame_->Push(FACTORY->null_value());
// All done.
leave.Bind();
@@ -7462,7 +7467,7 @@
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
// This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
@@ -7679,10 +7684,10 @@
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ Isolate::Current()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
return;
}
@@ -7799,7 +7804,7 @@
// Check the object's elements are in fast case and writable.
__ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
deferred->Branch(not_equal);
// Smi-tagging is equivalent to multiplying by 2.
@@ -7849,7 +7854,7 @@
__ bind(&done);
deferred->BindExit();
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
}
@@ -7877,7 +7882,7 @@
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->Push(&res);
} else {
@@ -7918,7 +7923,7 @@
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
__ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
@@ -7969,7 +7974,7 @@
// on doubles.
__ bind(&exponent_nonsmi);
__ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
// Test if exponent is nan.
@@ -7985,7 +7990,7 @@
__ jmp(&handle_special_cases);
__ bind(&base_not_smi);
__ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
call_runtime.Branch(not_equal);
__ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
__ and_(answer.reg(), HeapNumber::kExponentMask);
@@ -8094,7 +8099,7 @@
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
frame()->Push(&result);
} else {
@@ -8116,7 +8121,7 @@
__ jmp(&load_done);
__ bind(&non_smi);
__ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
+ FACTORY->heap_number_map());
__ j(not_equal, &runtime);
__ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
@@ -8222,7 +8227,7 @@
ZoneList<Expression*>* args = node->arguments();
Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
if (function == NULL) {
// Push the builtins object found in the current global object.
@@ -8305,12 +8310,12 @@
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- frame_->Push(Factory::false_value());
+ frame_->Push(FACTORY->false_value());
}
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
+ frame_->SetElementAt(0, FACTORY->true_value());
}
} else if (op == Token::TYPEOF) {
@@ -8331,10 +8336,10 @@
expression->AsLiteral()->IsNull())) {
// Omit evaluating the value of the primitive literal.
// It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
+ frame_->Push(FACTORY->undefined_value());
} else {
Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
+ frame_->SetElementAt(0, FACTORY->undefined_value());
}
} else {
@@ -9136,16 +9141,16 @@
Result answer = frame_->Pop();
answer.ToRegister();
- if (check->Equals(Heap::number_symbol())) {
+ if (check->Equals(HEAP->number_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->true_target()->Branch(zero);
frame_->Spill(answer.reg());
__ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ cmp(answer.reg(), Factory::heap_number_map());
+ __ cmp(answer.reg(), FACTORY->heap_number_map());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::string_symbol())) {
+ } else if (check->Equals(HEAP->string_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
@@ -9161,15 +9166,15 @@
answer.Unuse();
destination()->Split(below);
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(answer.reg(), Factory::true_value());
+ } else if (check->Equals(HEAP->boolean_symbol())) {
+ __ cmp(answer.reg(), FACTORY->true_value());
destination()->true_target()->Branch(equal);
- __ cmp(answer.reg(), Factory::false_value());
+ __ cmp(answer.reg(), FACTORY->false_value());
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(answer.reg(), Factory::undefined_value());
+ } else if (check->Equals(HEAP->undefined_symbol())) {
+ __ cmp(answer.reg(), FACTORY->undefined_value());
destination()->true_target()->Branch(equal);
__ test(answer.reg(), Immediate(kSmiTagMask));
@@ -9183,7 +9188,7 @@
answer.Unuse();
destination()->Split(not_zero);
- } else if (check->Equals(Heap::function_symbol())) {
+ } else if (check->Equals(HEAP->function_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
frame_->Spill(answer.reg());
@@ -9193,10 +9198,10 @@
__ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
- } else if (check->Equals(Heap::object_symbol())) {
+ } else if (check->Equals(HEAP->object_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
- __ cmp(answer.reg(), Factory::null_value());
+ __ cmp(answer.reg(), FACTORY->null_value());
destination()->true_target()->Branch(equal);
Result map = allocator()->Allocate();
@@ -9239,7 +9244,7 @@
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
__ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
- __ cmp(scratch.reg(), Factory::heap_number_map());
+ __ cmp(scratch.reg(), FACTORY->heap_number_map());
JumpTarget not_a_number;
not_a_number.Branch(not_equal, &lhs);
__ mov(scratch.reg(),
@@ -9326,7 +9331,7 @@
Load(node->expression());
Result operand = frame_->Pop();
operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
+ __ cmp(operand.reg(), FACTORY->null_value());
if (node->is_strict()) {
operand.Unuse();
destination()->Split(equal);
@@ -9334,7 +9339,7 @@
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
destination()->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
+ __ cmp(operand.reg(), FACTORY->undefined_value());
destination()->true_target()->Branch(equal);
__ test(operand.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(equal);
@@ -9407,7 +9412,8 @@
__ mov(eax, receiver_);
}
__ Set(ecx, Immediate(name_));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::LoadIC_Initialize));
RelocInfo::Mode mode = is_contextual_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
@@ -9426,13 +9432,13 @@
// instruction that gets patched and coverage code gets in the way.
if (is_contextual_) {
masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_inline_miss(), 1);
if (is_dont_delete_) {
- __ IncrementCounter(&Counters::dont_delete_hint_miss, 1);
+ __ IncrementCounter(COUNTERS->dont_delete_hint_miss(), 1);
}
} else {
masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1);
}
if (!dst_.is(eax)) __ mov(dst_, eax);
@@ -9486,7 +9492,8 @@
// it in the IC initialization code and patch the cmp instruction.
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
@@ -9497,7 +9504,7 @@
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline_miss(), 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -9533,7 +9540,7 @@
void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline_miss(), 1);
// Move value_ to eax, key_ to ecx, and receiver_ to edx.
Register old_value = value_;
@@ -9587,7 +9594,7 @@
}
// Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -9612,7 +9619,7 @@
bool contextual_load_in_builtin =
is_contextual &&
- (Bootstrapper::IsActive() ||
+ (Isolate::Current()->bootstrapper()->IsActive() ||
(!info_->closure().is_null() && info_->closure()->IsBuiltin()));
Result result;
@@ -9658,7 +9665,7 @@
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
// This branch is always a forwards branch so it's always a fixed size
// which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal);
@@ -9670,14 +9677,16 @@
if (is_contextual) {
// Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), Factory::null_value());
+ masm()->mov(result.reg(), FACTORY->null_value());
if (FLAG_debug_code) {
__ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::global_property_cell_map());
+ FACTORY->global_property_cell_map());
__ Assert(equal, "Uninitialized inlined contextual load");
}
__ mov(result.reg(),
FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
+ __ cmp(result.reg(), FACTORY->the_hole_value());
+ deferred->Branch(equal);
bool is_dont_delete = false;
if (!info_->closure().is_null()) {
// When doing lazy compilation we can check if the global cell
@@ -9696,15 +9705,15 @@
}
deferred->set_is_dont_delete(is_dont_delete);
if (!is_dont_delete) {
- __ cmp(result.reg(), Factory::the_hole_value());
+ __ cmp(result.reg(), FACTORY->the_hole_value());
deferred->Branch(equal);
} else if (FLAG_debug_code) {
- __ cmp(result.reg(), Factory::the_hole_value());
+ __ cmp(result.reg(), FACTORY->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(COUNTERS->named_load_global_inline(), 1);
if (is_dont_delete) {
- __ IncrementCounter(&Counters::dont_delete_hint_hit, 1);
+ __ IncrementCounter(COUNTERS->dont_delete_hint_hit(), 1);
}
} else {
// The initial (invalid) offset has to be large enough to force a 32-bit
@@ -9712,7 +9721,7 @@
// kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
+ __ IncrementCounter(COUNTERS->named_load_inline(), 1);
}
deferred->BindExit();
@@ -9758,7 +9767,7 @@
// Initially use an invalid map to force a failure.
__ bind(&patch_site);
masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
// This branch is always a forwards branch so it's always a fixed size
// which allows the assert below to succeed and patching to work.
slow.Branch(not_equal, &value, &receiver);
@@ -9873,7 +9882,7 @@
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(FACTORY->null_value()));
deferred->Branch(not_equal);
// Check that the key is a smi.
@@ -9903,9 +9912,9 @@
times_2,
FixedArray::kHeaderSize));
result = elements;
- __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_load_inline(), 1);
deferred->BindExit();
} else {
@@ -9998,7 +10007,7 @@
// which will allow the debugger to break for fast case stores.
__ bind(deferred->patch_site());
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the length of
@@ -10011,7 +10020,7 @@
// Store the value.
__ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
+ __ IncrementCounter(COUNTERS->keyed_store_inline(), 1);
deferred->BindExit();
} else {
@@ -10214,7 +10223,7 @@
__ int3();
__ bind(&ok);
}
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
CpuFeatures::Scope enable(SSE2);
__ push(edi);
__ push(esi);
@@ -10242,7 +10251,7 @@
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
- __ IncrementCounter(&Counters::memcopy_aligned, 1);
+ __ IncrementCounter(COUNTERS->memcopy_aligned(), 1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
@@ -10290,7 +10299,7 @@
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ IncrementCounter(&Counters::memcopy_unaligned, 1);
+ __ IncrementCounter(COUNTERS->memcopy_unaligned(), 1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
@@ -10334,7 +10343,7 @@
}
} else {
- __ IncrementCounter(&Counters::memcopy_noxmm, 1);
+ __ IncrementCounter(COUNTERS->memcopy_noxmm(), 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);

Powered by Google App Engine
This is Rietveld 408576698