Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: src/IceTargetLoweringX86BaseImpl.h

Issue 1738443002: Subzero. Performance tweaks. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Addresses comments -- all of them Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLoweringX86Base.h ('k') | src/PNaClTranslator.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==// 1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
145 void dump(const Cfg *Func) const; 145 void dump(const Cfg *Func) const;
146 146
147 private: 147 private:
148 /// Returns true if Producers contains a valid entry for the given VarNum. 148 /// Returns true if Producers contains a valid entry for the given VarNum.
149 bool containsValid(SizeT VarNum) const { 149 bool containsValid(SizeT VarNum) const {
150 auto Element = Producers.find(VarNum); 150 auto Element = Producers.find(VarNum);
151 return Element != Producers.end() && Element->second.Instr != nullptr; 151 return Element != Producers.end() && Element->second.Instr != nullptr;
152 } 152 }
153 void setInvalid(SizeT VarNum) { Producers[VarNum].Instr = nullptr; } 153 void setInvalid(SizeT VarNum) { Producers[VarNum].Instr = nullptr; }
154 /// Producers maps Variable::Number to a BoolFoldingEntry. 154 /// Producers maps Variable::Number to a BoolFoldingEntry.
155 std::unordered_map<SizeT, BoolFoldingEntry<Traits>> Producers; 155 CfgUnorderedMap<SizeT, BoolFoldingEntry<Traits>> Producers;
156 }; 156 };
157 157
158 template <typename Traits> 158 template <typename Traits>
159 BoolFoldingEntry<Traits>::BoolFoldingEntry(Inst *I) 159 BoolFoldingEntry<Traits>::BoolFoldingEntry(Inst *I)
160 : Instr(I), IsComplex(BoolFolding<Traits>::hasComplexLowering(I)) {} 160 : Instr(I), IsComplex(BoolFolding<Traits>::hasComplexLowering(I)) {}
161 161
162 template <typename Traits> 162 template <typename Traits>
163 typename BoolFolding<Traits>::BoolFoldingProducerKind 163 typename BoolFolding<Traits>::BoolFoldingProducerKind
164 BoolFolding<Traits>::getProducerKind(const Inst *Instr) { 164 BoolFolding<Traits>::getProducerKind(const Inst *Instr) {
165 if (llvm::isa<InstIcmp>(Instr)) { 165 if (llvm::isa<InstIcmp>(Instr)) {
(...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after
955 // registers (as a side effect, this gives variables a second chance at 955 // registers (as a side effect, this gives variables a second chance at
956 // physical register assignment). 956 // physical register assignment).
957 // 957 //
958 // A middle ground approach is to leverage sparsity and allocate one block of 958 // A middle ground approach is to leverage sparsity and allocate one block of
959 // space on the frame for globals (variables with multi-block lifetime), and 959 // space on the frame for globals (variables with multi-block lifetime), and
960 // one block to share for locals (single-block lifetime). 960 // one block to share for locals (single-block lifetime).
961 961
962 Context.init(Node); 962 Context.init(Node);
963 Context.setInsertPoint(Context.getCur()); 963 Context.setInsertPoint(Context.getCur());
964 964
965 llvm::SmallBitVector CalleeSaves = 965 SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None);
966 getRegisterSet(RegSet_CalleeSave, RegSet_None); 966 RegsUsed = SmallBitVector(CalleeSaves.size());
967 RegsUsed = llvm::SmallBitVector(CalleeSaves.size());
968 VarList SortedSpilledVariables, VariablesLinkedToSpillSlots; 967 VarList SortedSpilledVariables, VariablesLinkedToSpillSlots;
969 size_t GlobalsSize = 0; 968 size_t GlobalsSize = 0;
970 // If there is a separate locals area, this represents that area. Otherwise 969 // If there is a separate locals area, this represents that area. Otherwise
971 // it counts any variable not counted by GlobalsSize. 970 // it counts any variable not counted by GlobalsSize.
972 SpillAreaSizeBytes = 0; 971 SpillAreaSizeBytes = 0;
973 // If there is a separate locals area, this specifies the alignment for it. 972 // If there is a separate locals area, this specifies the alignment for it.
974 uint32_t LocalsSlotsAlignmentBytes = 0; 973 uint32_t LocalsSlotsAlignmentBytes = 0;
975 // The entire spill locations area gets aligned to largest natural alignment 974 // The entire spill locations area gets aligned to largest natural alignment
976 // of the variables that have a spill slot. 975 // of the variables that have a spill slot.
977 uint32_t SpillAreaAlignmentBytes = 0; 976 uint32_t SpillAreaAlignmentBytes = 0;
(...skipping 15 matching lines...) Expand all
993 // Compute the list of spilled variables and bounds for GlobalsSize, etc. 992 // Compute the list of spilled variables and bounds for GlobalsSize, etc.
994 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize, 993 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize,
995 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes, 994 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes,
996 &LocalsSlotsAlignmentBytes, TargetVarHook); 995 &LocalsSlotsAlignmentBytes, TargetVarHook);
997 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes; 996 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes;
998 SpillAreaSizeBytes += GlobalsSize; 997 SpillAreaSizeBytes += GlobalsSize;
999 998
1000 // Add push instructions for preserved registers. 999 // Add push instructions for preserved registers.
1001 uint32_t NumCallee = 0; 1000 uint32_t NumCallee = 0;
1002 size_t PreservedRegsSizeBytes = 0; 1001 size_t PreservedRegsSizeBytes = 0;
1003 llvm::SmallBitVector Pushed(CalleeSaves.size()); 1002 SmallBitVector Pushed(CalleeSaves.size());
1004 for (RegNumT i : RegNumBVIter(CalleeSaves)) { 1003 for (RegNumT i : RegNumBVIter(CalleeSaves)) {
1005 const auto Canonical = Traits::getBaseReg(i); 1004 const auto Canonical = Traits::getBaseReg(i);
1006 assert(Canonical == Traits::getBaseReg(Canonical)); 1005 assert(Canonical == Traits::getBaseReg(Canonical));
1007 if (RegsUsed[i]) { 1006 if (RegsUsed[i]) {
1008 Pushed[Canonical] = true; 1007 Pushed[Canonical] = true;
1009 } 1008 }
1010 } 1009 }
1011 for (RegNumT RegNum : RegNumBVIter(Pushed)) { 1010 for (RegNumT RegNum : RegNumBVIter(Pushed)) {
1012 assert(RegNum == Traits::getBaseReg(RegNum)); 1011 assert(RegNum == Traits::getBaseReg(RegNum));
1013 ++NumCallee; 1012 ++NumCallee;
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
1249 if (IsEbpBasedFrame) { 1248 if (IsEbpBasedFrame) {
1250 _unlink_bp(); 1249 _unlink_bp();
1251 } else { 1250 } else {
1252 // add stackptr, SpillAreaSizeBytes 1251 // add stackptr, SpillAreaSizeBytes
1253 if (SpillAreaSizeBytes != 0) { 1252 if (SpillAreaSizeBytes != 0) {
1254 _add_sp(Ctx->getConstantInt32(SpillAreaSizeBytes)); 1253 _add_sp(Ctx->getConstantInt32(SpillAreaSizeBytes));
1255 } 1254 }
1256 } 1255 }
1257 1256
1258 // Add pop instructions for preserved registers. 1257 // Add pop instructions for preserved registers.
1259 llvm::SmallBitVector CalleeSaves = 1258 SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None);
1260 getRegisterSet(RegSet_CalleeSave, RegSet_None); 1259 SmallBitVector Popped(CalleeSaves.size());
1261 llvm::SmallBitVector Popped(CalleeSaves.size());
1262 for (int32_t i = CalleeSaves.size() - 1; i >= 0; --i) { 1260 for (int32_t i = CalleeSaves.size() - 1; i >= 0; --i) {
1263 const auto RegNum = RegNumT::fromInt(i); 1261 const auto RegNum = RegNumT::fromInt(i);
1264 if (RegNum == getFrameReg() && IsEbpBasedFrame) 1262 if (RegNum == getFrameReg() && IsEbpBasedFrame)
1265 continue; 1263 continue;
1266 const RegNumT Canonical = Traits::getBaseReg(RegNum); 1264 const RegNumT Canonical = Traits::getBaseReg(RegNum);
1267 if (CalleeSaves[i] && RegsUsed[i]) { 1265 if (CalleeSaves[i] && RegsUsed[i]) {
1268 Popped[Canonical] = true; 1266 Popped[Canonical] = true;
1269 } 1267 }
1270 } 1268 }
1271 for (int32_t i = Popped.size() - 1; i >= 0; --i) { 1269 for (int32_t i = Popped.size() - 1; i >= 0; --i) {
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
1354 // Test if the Offset is an eligible i32 constants for randomization and 1352 // Test if the Offset is an eligible i32 constants for randomization and
1355 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem 1353 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem
1356 // operand. 1354 // operand.
1357 return legalize(MemOperand); 1355 return legalize(MemOperand);
1358 } 1356 }
1359 llvm_unreachable("Unsupported operand type"); 1357 llvm_unreachable("Unsupported operand type");
1360 return nullptr; 1358 return nullptr;
1361 } 1359 }
1362 1360
1363 template <typename TraitsType> 1361 template <typename TraitsType>
1364 llvm::SmallBitVector 1362 SmallBitVector
1365 TargetX86Base<TraitsType>::getRegisterSet(RegSetMask Include, 1363 TargetX86Base<TraitsType>::getRegisterSet(RegSetMask Include,
1366 RegSetMask Exclude) const { 1364 RegSetMask Exclude) const {
1367 return Traits::getRegisterSet(Ctx->getFlags(), Include, Exclude); 1365 return Traits::getRegisterSet(Ctx->getFlags(), Include, Exclude);
1368 } 1366 }
1369 1367
1370 template <typename TraitsType> 1368 template <typename TraitsType>
1371 void TargetX86Base<TraitsType>::lowerAlloca(const InstAlloca *Instr) { 1369 void TargetX86Base<TraitsType>::lowerAlloca(const InstAlloca *Instr) {
1372 // Conservatively require the stack to be aligned. Some stack adjustment 1370 // Conservatively require the stack to be aligned. Some stack adjustment
1373 // operations implemented below assume that the stack is aligned before the 1371 // operations implemented below assume that the stack is aligned before the
1374 // alloca. All the alloca code ensures that the stack alignment is preserved 1372 // alloca. All the alloca code ensures that the stack alignment is preserved
(...skipping 2934 matching lines...) Expand 10 before | Expand all | Expand 10 after
4309 // I is currently the InstIntrinsicCall. Peek past that. 4307 // I is currently the InstIntrinsicCall. Peek past that.
4310 // This assumes that the atomic cmpxchg has not been lowered yet, 4308 // This assumes that the atomic cmpxchg has not been lowered yet,
4311 // so that the instructions seen in the scan from "Cur" is simple. 4309 // so that the instructions seen in the scan from "Cur" is simple.
4312 assert(llvm::isa<InstIntrinsicCall>(*I)); 4310 assert(llvm::isa<InstIntrinsicCall>(*I));
4313 Inst *NextInst = Context.getNextInst(I); 4311 Inst *NextInst = Context.getNextInst(I);
4314 if (!NextInst) 4312 if (!NextInst)
4315 return false; 4313 return false;
4316 // There might be phi assignments right before the compare+branch, since this 4314 // There might be phi assignments right before the compare+branch, since this
4317 // could be a backward branch for a loop. This placement of assignments is 4315 // could be a backward branch for a loop. This placement of assignments is
4318 // determined by placePhiStores(). 4316 // determined by placePhiStores().
4319 std::vector<InstAssign *> PhiAssigns; 4317 CfgVector<InstAssign *> PhiAssigns;
4320 while (auto *PhiAssign = llvm::dyn_cast<InstAssign>(NextInst)) { 4318 while (auto *PhiAssign = llvm::dyn_cast<InstAssign>(NextInst)) {
4321 if (PhiAssign->getDest() == Dest) 4319 if (PhiAssign->getDest() == Dest)
4322 return false; 4320 return false;
4323 PhiAssigns.push_back(PhiAssign); 4321 PhiAssigns.push_back(PhiAssign);
4324 NextInst = Context.getNextInst(I); 4322 NextInst = Context.getNextInst(I);
4325 if (!NextInst) 4323 if (!NextInst)
4326 return false; 4324 return false;
4327 } 4325 }
4328 if (auto *NextCmp = llvm::dyn_cast<InstIcmp>(NextInst)) { 4326 if (auto *NextCmp = llvm::dyn_cast<InstIcmp>(NextInst)) {
4329 if (!(NextCmp->getCondition() == InstIcmp::Eq && 4327 if (!(NextCmp->getCondition() == InstIcmp::Eq &&
(...skipping 2044 matching lines...) Expand 10 before | Expand all | Expand 10 after
6374 InstCall *Call = makeHelperCall(HelperName, CallDest, MaxSrcs); 6372 InstCall *Call = makeHelperCall(HelperName, CallDest, MaxSrcs);
6375 Call->addArg(Src0); 6373 Call->addArg(Src0);
6376 StackArgumentsSize = getCallStackArgumentsSizeBytes(Call); 6374 StackArgumentsSize = getCallStackArgumentsSizeBytes(Call);
6377 Context.insert(Call); 6375 Context.insert(Call);
6378 // The PNaCl ABI disallows i8/i16 return types, so truncate the helper call 6376 // The PNaCl ABI disallows i8/i16 return types, so truncate the helper call
6379 // result to the appropriate type as necessary. 6377 // result to the appropriate type as necessary.
6380 if (CallDest->getType() != Dest->getType()) 6378 if (CallDest->getType() != Dest->getType())
6381 Context.insert<InstCast>(InstCast::Trunc, Dest, CallDest); 6379 Context.insert<InstCast>(InstCast::Trunc, Dest, CallDest);
6382 Cast->setDeleted(); 6380 Cast->setDeleted();
6383 } else if (auto *Intrinsic = llvm::dyn_cast<InstIntrinsicCall>(Instr)) { 6381 } else if (auto *Intrinsic = llvm::dyn_cast<InstIntrinsicCall>(Instr)) {
6384 std::vector<Type> ArgTypes; 6382 CfgVector<Type> ArgTypes;
6385 Type ReturnType = IceType_void; 6383 Type ReturnType = IceType_void;
6386 switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID) { 6384 switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID) {
6387 default: 6385 default:
6388 return; 6386 return;
6389 case Intrinsics::Ctpop: { 6387 case Intrinsics::Ctpop: {
6390 Operand *Val = Intrinsic->getArg(0); 6388 Operand *Val = Intrinsic->getArg(0);
6391 Type ValTy = Val->getType(); 6389 Type ValTy = Val->getType();
6392 if (ValTy == IceType_i64) 6390 if (ValTy == IceType_i64)
6393 ArgTypes = {IceType_i64}; 6391 ArgTypes = {IceType_i64};
6394 else 6392 else
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
6432 StackArgumentsSize = typeWidthInBytes(ReturnType); 6430 StackArgumentsSize = typeWidthInBytes(ReturnType);
6433 } else { 6431 } else {
6434 return; 6432 return;
6435 } 6433 }
6436 StackArgumentsSize = Traits::applyStackAlignment(StackArgumentsSize); 6434 StackArgumentsSize = Traits::applyStackAlignment(StackArgumentsSize);
6437 updateMaxOutArgsSizeBytes(StackArgumentsSize); 6435 updateMaxOutArgsSizeBytes(StackArgumentsSize);
6438 } 6436 }
6439 6437
6440 template <typename TraitsType> 6438 template <typename TraitsType>
6441 uint32_t TargetX86Base<TraitsType>::getCallStackArgumentsSizeBytes( 6439 uint32_t TargetX86Base<TraitsType>::getCallStackArgumentsSizeBytes(
6442 const std::vector<Type> &ArgTypes, Type ReturnType) { 6440 const CfgVector<Type> &ArgTypes, Type ReturnType) {
6443 uint32_t OutArgumentsSizeBytes = 0; 6441 uint32_t OutArgumentsSizeBytes = 0;
6444 uint32_t XmmArgCount = 0; 6442 uint32_t XmmArgCount = 0;
6445 uint32_t GprArgCount = 0; 6443 uint32_t GprArgCount = 0;
6446 for (Type Ty : ArgTypes) { 6444 for (Type Ty : ArgTypes) {
6447 // The PNaCl ABI requires the width of arguments to be at least 32 bits. 6445 // The PNaCl ABI requires the width of arguments to be at least 32 bits.
6448 assert(typeWidthInBytes(Ty) >= 4); 6446 assert(typeWidthInBytes(Ty) >= 4);
6449 if (isVectorType(Ty) && XmmArgCount < Traits::X86_MAX_XMM_ARGS) { 6447 if (isVectorType(Ty) && XmmArgCount < Traits::X86_MAX_XMM_ARGS) {
6450 ++XmmArgCount; 6448 ++XmmArgCount;
6451 } else if (isScalarIntegerType(Ty) && 6449 } else if (isScalarIntegerType(Ty) &&
6452 GprArgCount < Traits::X86_MAX_GPR_ARGS) { 6450 GprArgCount < Traits::X86_MAX_GPR_ARGS) {
(...skipping 16 matching lines...) Expand all
6469 std::max(OutArgumentsSizeBytes, 6467 std::max(OutArgumentsSizeBytes,
6470 static_cast<uint32_t>(typeWidthInBytesOnStack(ReturnType))); 6468 static_cast<uint32_t>(typeWidthInBytesOnStack(ReturnType)));
6471 } 6469 }
6472 return OutArgumentsSizeBytes; 6470 return OutArgumentsSizeBytes;
6473 } 6471 }
6474 6472
6475 template <typename TraitsType> 6473 template <typename TraitsType>
6476 uint32_t TargetX86Base<TraitsType>::getCallStackArgumentsSizeBytes( 6474 uint32_t TargetX86Base<TraitsType>::getCallStackArgumentsSizeBytes(
6477 const InstCall *Instr) { 6475 const InstCall *Instr) {
6478 // Build a vector of the arguments' types. 6476 // Build a vector of the arguments' types.
6479 std::vector<Type> ArgTypes; 6477 const SizeT NumArgs = Instr->getNumArgs();
6480 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) { 6478 CfgVector<Type> ArgTypes;
6479 ArgTypes.reserve(NumArgs);
6480 for (SizeT i = 0; i < NumArgs; ++i) {
6481 Operand *Arg = Instr->getArg(i); 6481 Operand *Arg = Instr->getArg(i);
6482 ArgTypes.emplace_back(Arg->getType()); 6482 ArgTypes.emplace_back(Arg->getType());
6483 } 6483 }
6484 // Compute the return type (if any); 6484 // Compute the return type (if any);
6485 Type ReturnType = IceType_void; 6485 Type ReturnType = IceType_void;
6486 Variable *Dest = Instr->getDest(); 6486 Variable *Dest = Instr->getDest();
6487 if (Dest != nullptr) 6487 if (Dest != nullptr)
6488 ReturnType = Dest->getType(); 6488 ReturnType = Dest->getType();
6489 return getCallStackArgumentsSizeBytes(ArgTypes, ReturnType); 6489 return getCallStackArgumentsSizeBytes(ArgTypes, ReturnType);
6490 } 6490 }
(...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after
6988 template <typename TraitsType> void TargetX86Base<TraitsType>::postLower() { 6988 template <typename TraitsType> void TargetX86Base<TraitsType>::postLower() {
6989 if (Ctx->getFlags().getOptLevel() == Opt_m1) 6989 if (Ctx->getFlags().getOptLevel() == Opt_m1)
6990 return; 6990 return;
6991 markRedefinitions(); 6991 markRedefinitions();
6992 Context.availabilityUpdate(); 6992 Context.availabilityUpdate();
6993 } 6993 }
6994 6994
6995 template <typename TraitsType> 6995 template <typename TraitsType>
6996 void TargetX86Base<TraitsType>::makeRandomRegisterPermutation( 6996 void TargetX86Base<TraitsType>::makeRandomRegisterPermutation(
6997 llvm::SmallVectorImpl<RegNumT> &Permutation, 6997 llvm::SmallVectorImpl<RegNumT> &Permutation,
6998 const llvm::SmallBitVector &ExcludeRegisters, uint64_t Salt) const { 6998 const SmallBitVector &ExcludeRegisters, uint64_t Salt) const {
6999 Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation, 6999 Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation,
7000 ExcludeRegisters, Salt); 7000 ExcludeRegisters, Salt);
7001 } 7001 }
7002 7002
7003 template <typename TraitsType> 7003 template <typename TraitsType>
7004 void TargetX86Base<TraitsType>::emit(const ConstantInteger32 *C) const { 7004 void TargetX86Base<TraitsType>::emit(const ConstantInteger32 *C) const {
7005 if (!BuildDefs::dump()) 7005 if (!BuildDefs::dump())
7006 return; 7006 return;
7007 Ostream &Str = Ctx->getStrEmit(); 7007 Ostream &Str = Ctx->getStrEmit();
7008 Str << "$" << C->getValue(); 7008 Str << "$" << C->getValue();
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after
7403 emitGlobal(*Var, SectionSuffix); 7403 emitGlobal(*Var, SectionSuffix);
7404 } 7404 }
7405 } 7405 }
7406 } break; 7406 } break;
7407 } 7407 }
7408 } 7408 }
7409 } // end of namespace X86NAMESPACE 7409 } // end of namespace X86NAMESPACE
7410 } // end of namespace Ice 7410 } // end of namespace Ice
7411 7411
7412 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H 7412 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H
OLDNEW
« no previous file with comments | « src/IceTargetLoweringX86Base.h ('k') | src/PNaClTranslator.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698