OLD | NEW |
| (Empty) |
1 //===-- JSBackend.cpp - Library for converting LLVM code to JS -----===// | |
2 // | |
3 // The LLVM Compiler Infrastructure | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 // | |
10 // This file implements compiling of LLVM IR, which is assumed to have been | |
11 // simplified using the PNaCl passes, i64 legalization, and other necessary | |
12 // transformations, into JavaScript in asm.js format, suitable for passing | |
13 // to emscripten for final processing. | |
14 // | |
15 //===----------------------------------------------------------------------===// | |
16 | |
17 #include "JSTargetMachine.h" | |
18 #include "MCTargetDesc/JSBackendMCTargetDesc.h" | |
19 #include "AllocaManager.h" | |
20 #include "llvm/Analysis/ValueTracking.h" | |
21 #include "llvm/ADT/SmallPtrSet.h" | |
22 #include "llvm/ADT/SmallString.h" | |
23 #include "llvm/ADT/StringExtras.h" | |
24 #include "llvm/ADT/STLExtras.h" | |
25 #include "llvm/Config/config.h" | |
26 #include "llvm/IR/Constants.h" | |
27 #include "llvm/IR/DerivedTypes.h" | |
28 #include "llvm/IR/InlineAsm.h" | |
29 #include "llvm/IR/Instruction.h" | |
30 #include "llvm/IR/Instructions.h" | |
31 #include "llvm/IR/Intrinsics.h" | |
32 #include "llvm/IR/Module.h" | |
33 #include "llvm/IR/Operator.h" | |
34 #include "llvm/Pass.h" | |
35 #include "llvm/IR/LegacyPassManager.h" | |
36 #include "llvm/IR/CallSite.h" | |
37 #include "llvm/Support/CommandLine.h" | |
38 #include "llvm/Support/ErrorHandling.h" | |
39 #include "llvm/IR/GetElementPtrTypeIterator.h" | |
40 #include "llvm/Support/MathExtras.h" | |
41 #include "llvm/Support/TargetRegistry.h" | |
42 #include "llvm/IR/DebugInfo.h" | |
43 #include "llvm/Transforms/NaCl.h" | |
44 #include <algorithm> | |
45 #include <cstdio> | |
46 #include <map> | |
47 #include <set> // TODO: unordered_set? | |
48 using namespace llvm; | |
49 | |
50 #include <OptPasses.h> | |
51 #include <Relooper.h> | |
52 | |
53 #ifdef NDEBUG | |
54 #undef assert | |
55 #define assert(x) { if (!(x)) report_fatal_error(#x); } | |
56 #endif | |
57 | |
58 raw_ostream &prettyWarning() { | |
59 errs().changeColor(raw_ostream::YELLOW); | |
60 errs() << "warning:"; | |
61 errs().resetColor(); | |
62 errs() << " "; | |
63 return errs(); | |
64 } | |
65 | |
66 static cl::opt<bool> | |
67 PreciseF32("emscripten-precise-f32", | |
68 cl::desc("Enables Math.fround usage to implement precise float32 sema
ntics and performance (see emscripten PRECISE_F32 option)"), | |
69 cl::init(false)); | |
70 | |
71 static cl::opt<bool> | |
72 WarnOnUnaligned("emscripten-warn-unaligned", | |
73 cl::desc("Warns about unaligned loads and stores (which can nega
tively affect performance)"), | |
74 cl::init(false)); | |
75 | |
76 static cl::opt<int> | |
77 ReservedFunctionPointers("emscripten-reserved-function-pointers", | |
78 cl::desc("Number of reserved slots in function tables f
or functions to be added at runtime (see emscripten RESERVED_FUNCTION_POINTERS o
ption)"), | |
79 cl::init(0)); | |
80 | |
81 static cl::opt<int> | |
82 EmscriptenAssertions("emscripten-assertions", | |
83 cl::desc("Additional JS-specific assertions (see emscripten
ASSERTIONS)"), | |
84 cl::init(0)); | |
85 | |
86 static cl::opt<bool> | |
87 NoAliasingFunctionPointers("emscripten-no-aliasing-function-pointers", | |
88 cl::desc("Forces function pointers to not alias (this
is more correct, but rarely needed, and has the cost of much larger function ta
bles; it is useful for debugging though; see emscripten ALIASING_FUNCTION_POINTE
RS option)"), | |
89 cl::init(false)); | |
90 | |
91 static cl::opt<int> | |
92 GlobalBase("emscripten-global-base", | |
93 cl::desc("Where global variables start out in memory (see emscripten
GLOBAL_BASE option)"), | |
94 cl::init(8)); | |
95 | |
96 | |
97 extern "C" void LLVMInitializeJSBackendTarget() { | |
98 // Register the target. | |
99 RegisterTargetMachine<JSTargetMachine> X(TheJSBackendTarget); | |
100 } | |
101 | |
102 namespace { | |
103 #define ASM_SIGNED 0 | |
104 #define ASM_UNSIGNED 1 | |
105 #define ASM_NONSPECIFIC 2 // nonspecific means to not differentiate ints. |0 f
or all, regardless of size and sign | |
106 #define ASM_FFI_IN 4 // FFI return values are limited to things that work in f
fis | |
107 #define ASM_FFI_OUT 8 // params to FFIs are limited to things that work in ffi
s | |
108 #define ASM_MUST_CAST 16 // this value must be explicitly cast (or be an integ
er constant) | |
109 typedef unsigned AsmCast; | |
110 | |
111 const char *const SIMDLane = "XYZW"; | |
112 const char *const simdLane = "xyzw"; | |
113 | |
114 typedef std::map<const Value*,std::string> ValueMap; | |
115 typedef std::set<std::string> NameSet; | |
116 typedef std::vector<unsigned char> HeapData; | |
117 typedef std::pair<unsigned, unsigned> Address; | |
118 typedef std::map<std::string, Type *> VarMap; | |
119 typedef std::map<std::string, Address> GlobalAddressMap; | |
120 typedef std::vector<std::string> FunctionTable; | |
121 typedef std::map<std::string, FunctionTable> FunctionTableMap; | |
122 typedef std::map<std::string, std::string> StringMap; | |
123 typedef std::map<std::string, unsigned> NameIntMap; | |
124 typedef std::map<const BasicBlock*, unsigned> BlockIndexMap; | |
125 typedef std::map<const Function*, BlockIndexMap> BlockAddressMap; | |
126 typedef std::map<const BasicBlock*, Block*> LLVMToRelooperMap; | |
127 | |
128 /// JSWriter - This class is the main chunk of code that converts an LLVM | |
129 /// module to JavaScript. | |
130 class JSWriter : public ModulePass { | |
131 raw_pwrite_stream &Out; | |
132 const Module *TheModule; | |
133 unsigned UniqueNum; | |
134 unsigned NextFunctionIndex; // used with NoAliasingFunctionPointers | |
135 ValueMap ValueNames; | |
136 VarMap UsedVars; | |
137 AllocaManager Allocas; | |
138 HeapData GlobalData8; | |
139 HeapData GlobalData32; | |
140 HeapData GlobalData64; | |
141 GlobalAddressMap GlobalAddresses; | |
142 NameSet Externals; // vars | |
143 NameSet Declares; // funcs | |
144 StringMap Redirects; // library function redirects actually used, needed for
wrapper funcs in tables | |
145 std::string PostSets; | |
146 NameIntMap NamedGlobals; // globals that we export as metadata to JS, so it
can access them by name | |
147 std::map<std::string, unsigned> IndexedFunctions; // name -> index | |
148 FunctionTableMap FunctionTables; // sig => list of functions | |
149 std::vector<std::string> GlobalInitializers; | |
150 std::vector<std::string> Exports; // additional exports | |
151 BlockAddressMap BlockAddresses; | |
152 | |
153 std::string CantValidate; | |
154 bool UsesSIMD; | |
155 int InvokeState; // cycles between 0, 1 after preInvoke, 2 after call, 0 aga
in after postInvoke. hackish, no argument there. | |
156 CodeGenOpt::Level OptLevel; | |
157 const DataLayout *DL; | |
158 bool StackBumped; | |
159 | |
160 #include "CallHandlers.h" | |
161 | |
162 public: | |
163 static char ID; | |
164 JSWriter(raw_pwrite_stream &o, CodeGenOpt::Level OptLevel) | |
165 : ModulePass(ID), Out(o), UniqueNum(0), NextFunctionIndex(0), CantValidate
(""), UsesSIMD(false), InvokeState(0), | |
166 OptLevel(OptLevel), StackBumped(false) {} | |
167 | |
168 virtual const char *getPassName() const { return "JavaScript backend"; } | |
169 | |
170 virtual bool runOnModule(Module &M); | |
171 | |
172 virtual void getAnalysisUsage(AnalysisUsage &AU) const { | |
173 AU.setPreservesAll(); | |
174 ModulePass::getAnalysisUsage(AU); | |
175 } | |
176 | |
177 void printProgram(const std::string& fname, const std::string& modName ); | |
178 void printModule(const std::string& fname, const std::string& modName ); | |
179 void printFunction(const Function *F); | |
180 | |
181 LLVM_ATTRIBUTE_NORETURN void error(const std::string& msg); | |
182 | |
183 raw_pwrite_stream& nl(raw_pwrite_stream &Out, int delta = 0); | |
184 | |
185 private: | |
186 void printCommaSeparated(const HeapData v); | |
187 | |
188 // parsing of constants has two phases: calculate, and then emit | |
189 void parseConstant(const std::string& name, const Constant* CV, bool calcula
te); | |
190 | |
191 #define MEM_ALIGN 8 | |
192 #define MEM_ALIGN_BITS 64 | |
193 #define STACK_ALIGN 16 | |
194 #define STACK_ALIGN_BITS 128 | |
195 | |
196 unsigned stackAlign(unsigned x) { | |
197 return RoundUpToAlignment(x, STACK_ALIGN); | |
198 } | |
199 std::string stackAlignStr(std::string x) { | |
200 return "((" + x + "+" + utostr(STACK_ALIGN-1) + ")&-" + utostr(STACK_ALIGN
) + ")"; | |
201 } | |
202 | |
203 HeapData *allocateAddress(const std::string& Name, unsigned Bits = MEM_ALIGN
_BITS) { | |
204 assert(Bits == 64); // FIXME when we use optimal alignments | |
205 HeapData *GlobalData = NULL; | |
206 switch (Bits) { | |
207 case 8: GlobalData = &GlobalData8; break; | |
208 case 32: GlobalData = &GlobalData32; break; | |
209 case 64: GlobalData = &GlobalData64; break; | |
210 default: llvm_unreachable("Unsupported data element size"); | |
211 } | |
212 while (GlobalData->size() % (Bits/8) != 0) GlobalData->push_back(0); | |
213 GlobalAddresses[Name] = Address(GlobalData->size(), Bits); | |
214 return GlobalData; | |
215 } | |
216 | |
217 // return the absolute offset of a global | |
218 unsigned getGlobalAddress(const std::string &s) { | |
219 GlobalAddressMap::const_iterator I = GlobalAddresses.find(s); | |
220 if (I == GlobalAddresses.end()) { | |
221 report_fatal_error("cannot find global address " + Twine(s)); | |
222 } | |
223 Address a = I->second; | |
224 assert(a.second == 64); // FIXME when we use optimal alignments | |
225 unsigned Ret; | |
226 switch (a.second) { | |
227 case 64: | |
228 assert((a.first + GlobalBase)%8 == 0); | |
229 Ret = a.first + GlobalBase; | |
230 break; | |
231 case 32: | |
232 assert((a.first + GlobalBase)%4 == 0); | |
233 Ret = a.first + GlobalBase + GlobalData64.size(); | |
234 break; | |
235 case 8: | |
236 Ret = a.first + GlobalBase + GlobalData64.size() + GlobalData32.size()
; | |
237 break; | |
238 default: | |
239 report_fatal_error("bad global address " + Twine(s) + ": " | |
240 "count=" + Twine(a.first) + " " | |
241 "elementsize=" + Twine(a.second)); | |
242 } | |
243 return Ret; | |
244 } | |
245 // returns the internal offset inside the proper block: GlobalData8, 32, 64 | |
246 unsigned getRelativeGlobalAddress(const std::string &s) { | |
247 GlobalAddressMap::const_iterator I = GlobalAddresses.find(s); | |
248 if (I == GlobalAddresses.end()) { | |
249 report_fatal_error("cannot find global address " + Twine(s)); | |
250 } | |
251 Address a = I->second; | |
252 return a.first; | |
253 } | |
254 char getFunctionSignatureLetter(Type *T) { | |
255 if (T->isVoidTy()) return 'v'; | |
256 else if (T->isFloatingPointTy()) { | |
257 if (PreciseF32 && T->isFloatTy()) { | |
258 return 'f'; | |
259 } else { | |
260 return 'd'; | |
261 } | |
262 } else if (VectorType *VT = dyn_cast<VectorType>(T)) { | |
263 checkVectorType(VT); | |
264 if (VT->getElementType()->isIntegerTy()) { | |
265 return 'I'; | |
266 } else { | |
267 return 'F'; | |
268 } | |
269 } else { | |
270 return 'i'; | |
271 } | |
272 } | |
273 std::string getFunctionSignature(const FunctionType *F, const std::string *N
ame=NULL) { | |
274 std::string Ret; | |
275 Ret += getFunctionSignatureLetter(F->getReturnType()); | |
276 for (FunctionType::param_iterator AI = F->param_begin(), | |
277 AE = F->param_end(); AI != AE; ++AI) { | |
278 Ret += getFunctionSignatureLetter(*AI); | |
279 } | |
280 return Ret; | |
281 } | |
282 FunctionTable& ensureFunctionTable(const FunctionType *FT) { | |
283 FunctionTable &Table = FunctionTables[getFunctionSignature(FT)]; | |
284 unsigned MinSize = ReservedFunctionPointers ? 2*(ReservedFunctionPointers+
1) : 1; // each reserved slot must be 2-aligned | |
285 while (Table.size() < MinSize) Table.push_back("0"); | |
286 return Table; | |
287 } | |
288 unsigned getFunctionIndex(const Function *F) { | |
289 const std::string &Name = getJSName(F); | |
290 if (IndexedFunctions.find(Name) != IndexedFunctions.end()) return IndexedF
unctions[Name]; | |
291 std::string Sig = getFunctionSignature(F->getFunctionType(), &Name); | |
292 FunctionTable& Table = ensureFunctionTable(F->getFunctionType()); | |
293 if (NoAliasingFunctionPointers) { | |
294 while (Table.size() < NextFunctionIndex) Table.push_back("0"); | |
295 } | |
296 // XXX this is wrong, it's always 1. but, that's fine in the ARM-like ABI | |
297 // we have which allows unaligned func the one risk is if someone forces a | |
298 // function to be aligned, and relies on that. Could do F->getAlignment() | |
299 // instead. | |
300 unsigned Alignment = 1; | |
301 while (Table.size() % Alignment) Table.push_back("0"); | |
302 unsigned Index = Table.size(); | |
303 Table.push_back(Name); | |
304 IndexedFunctions[Name] = Index; | |
305 if (NoAliasingFunctionPointers) { | |
306 NextFunctionIndex = Index+1; | |
307 } | |
308 | |
309 // invoke the callHandler for this, if there is one. the function may only
be indexed but never called directly, and we may need to do things in the handl
er | |
310 CallHandlerMap::const_iterator CH = CallHandlers.find(Name); | |
311 if (CH != CallHandlers.end()) { | |
312 (this->*(CH->second))(NULL, Name, -1); | |
313 } | |
314 | |
315 return Index; | |
316 } | |
317 | |
318 unsigned getBlockAddress(const Function *F, const BasicBlock *BB) { | |
319 BlockIndexMap& Blocks = BlockAddresses[F]; | |
320 if (Blocks.find(BB) == Blocks.end()) { | |
321 Blocks[BB] = Blocks.size(); // block addresses start from 0 | |
322 } | |
323 return Blocks[BB]; | |
324 } | |
325 | |
326 unsigned getBlockAddress(const BlockAddress *BA) { | |
327 return getBlockAddress(BA->getFunction(), BA->getBasicBlock()); | |
328 } | |
329 | |
330 const Value *resolveFully(const Value *V) { | |
331 bool More = true; | |
332 while (More) { | |
333 More = false; | |
334 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { | |
335 V = GA->getAliasee(); | |
336 More = true; | |
337 } | |
338 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { | |
339 V = CE->getOperand(0); // ignore bitcasts | |
340 More = true; | |
341 } | |
342 } | |
343 return V; | |
344 } | |
345 | |
346 // Return a constant we are about to write into a global as a numeric offset
. If the | |
347 // value is not known at compile time, emit a postSet to that location. | |
348 unsigned getConstAsOffset(const Value *V, unsigned AbsoluteTarget) { | |
349 V = resolveFully(V); | |
350 if (const Function *F = dyn_cast<const Function>(V)) { | |
351 return getFunctionIndex(F); | |
352 } else if (const BlockAddress *BA = dyn_cast<const BlockAddress>(V)) { | |
353 return getBlockAddress(BA); | |
354 } else { | |
355 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { | |
356 if (!GV->hasInitializer()) { | |
357 // We don't have a constant to emit here, so we must emit a postSet | |
358 // All postsets are of external values, so they are pointers, hence
32-bit | |
359 std::string Name = getOpName(V); | |
360 Externals.insert(Name); | |
361 PostSets += "HEAP32[" + utostr(AbsoluteTarget>>2) + "] = " + Name +
';'; | |
362 return 0; // emit zero in there for now, until the postSet | |
363 } | |
364 } | |
365 return getGlobalAddress(V->getName().str()); | |
366 } | |
367 } | |
368 | |
369 // Test whether the given value is known to be an absolute value or one we t
urn into an absolute value | |
370 bool isAbsolute(const Value *P) { | |
371 if (const IntToPtrInst *ITP = dyn_cast<IntToPtrInst>(P)) { | |
372 return isa<ConstantInt>(ITP->getOperand(0)); | |
373 } | |
374 if (isa<ConstantPointerNull>(P) || isa<UndefValue>(P)) { | |
375 return true; | |
376 } | |
377 return false; | |
378 } | |
379 | |
380 void checkVectorType(Type *T) { | |
381 VectorType *VT = cast<VectorType>(T); | |
382 // LLVM represents the results of vector comparison as vectors of i1. We | |
383 // represent them as vectors of integers the size of the vector elements | |
384 // of the compare that produced them. | |
385 assert(VT->getElementType()->getPrimitiveSizeInBits() == 32 || | |
386 VT->getElementType()->getPrimitiveSizeInBits() == 1); | |
387 assert(VT->getBitWidth() <= 128); | |
388 assert(VT->getNumElements() <= 4); | |
389 UsesSIMD = true; | |
390 } | |
391 | |
392 std::string ensureCast(std::string S, Type *T, AsmCast sign) { | |
393 if (sign & ASM_MUST_CAST) return getCast(S, T); | |
394 return S; | |
395 } | |
396 | |
397 std::string ftostr(const ConstantFP *CFP, AsmCast sign) { | |
398 const APFloat &flt = CFP->getValueAPF(); | |
399 | |
400 // Emscripten has its own spellings for infinity and NaN. | |
401 if (flt.getCategory() == APFloat::fcInfinity) return ensureCast(flt.isNega
tive() ? "-inf" : "inf", CFP->getType(), sign); | |
402 else if (flt.getCategory() == APFloat::fcNaN) return ensureCast("nan", CFP
->getType(), sign); | |
403 | |
404 // Request 9 or 17 digits, aka FLT_DECIMAL_DIG or DBL_DECIMAL_DIG (our | |
405 // long double is the the same as our double), to avoid rounding errors. | |
406 SmallString<29> Str; | |
407 flt.toString(Str, PreciseF32 && CFP->getType()->isFloatTy() ? 9 : 17); | |
408 | |
409 // asm.js considers literals to be floating-point literals when they conta
in a | |
410 // dot, however our output may be processed by UglifyJS, which doesn't | |
411 // currently preserve dots in all cases. Mark floating-point literals with | |
412 // unary plus to force them to floating-point. | |
413 if (APFloat(flt).roundToIntegral(APFloat::rmNearestTiesToEven) == APFloat:
:opOK) { | |
414 return '+' + Str.str().str(); | |
415 } | |
416 | |
417 return Str.str().str(); | |
418 } | |
419 | |
420 std::string getPtrLoad(const Value* Ptr); | |
421 std::string getHeapAccess(const std::string& Name, unsigned Bytes, bool Inte
ger=true); | |
422 std::string getPtrUse(const Value* Ptr); | |
423 std::string getConstant(const Constant*, AsmCast sign=ASM_SIGNED); | |
424 std::string getConstantVector(Type *ElementType, std::string x, std::string
y, std::string z, std::string w); | |
425 std::string getValueAsStr(const Value*, AsmCast sign=ASM_SIGNED); | |
426 std::string getValueAsCastStr(const Value*, AsmCast sign=ASM_SIGNED); | |
427 std::string getValueAsParenStr(const Value*); | |
428 std::string getValueAsCastParenStr(const Value*, AsmCast sign=ASM_SIGNED); | |
429 | |
430 const std::string &getJSName(const Value* val); | |
431 | |
432 std::string getPhiCode(const BasicBlock *From, const BasicBlock *To); | |
433 | |
434 void printAttributes(const AttributeSet &PAL, const std::string &name); | |
435 void printType(Type* Ty); | |
436 void printTypes(const Module* M); | |
437 | |
438 std::string getAdHocAssign(const StringRef &, Type *); | |
439 std::string getAssign(const Instruction *I); | |
440 std::string getAssignIfNeeded(const Value *V); | |
441 std::string getCast(const StringRef &, Type *, AsmCast sign=ASM_SIGNED); | |
442 std::string getParenCast(const StringRef &, Type *, AsmCast sign=ASM_SIGNED)
; | |
443 std::string getDoubleToInt(const StringRef &); | |
444 std::string getIMul(const Value *, const Value *); | |
445 std::string getLoad(const Instruction *I, const Value *P, Type *T, unsigned
Alignment, char sep=';'); | |
446 std::string getStore(const Instruction *I, const Value *P, Type *T, const st
d::string& VS, unsigned Alignment, char sep=';'); | |
447 std::string getStackBump(unsigned Size); | |
448 std::string getStackBump(const std::string &Size); | |
449 | |
450 void addBlock(const BasicBlock *BB, Relooper& R, LLVMToRelooperMap& LLVMToRe
looper); | |
451 void printFunctionBody(const Function *F); | |
452 void generateInsertElementExpression(const InsertElementInst *III, raw_strin
g_ostream& Code); | |
453 void generateExtractElementExpression(const ExtractElementInst *EEI, raw_str
ing_ostream& Code); | |
454 void generateShuffleVectorExpression(const ShuffleVectorInst *SVI, raw_strin
g_ostream& Code); | |
455 void generateICmpExpression(const ICmpInst *I, raw_string_ostream& Code); | |
456 void generateFCmpExpression(const FCmpInst *I, raw_string_ostream& Code); | |
457 void generateShiftExpression(const BinaryOperator *I, raw_string_ostream& Co
de); | |
458 void generateUnrolledExpression(const User *I, raw_string_ostream& Code); | |
459 bool generateSIMDExpression(const User *I, raw_string_ostream& Code); | |
460 void generateExpression(const User *I, raw_string_ostream& Code); | |
461 | |
462 std::string getOpName(const Value*); | |
463 | |
464 void processConstants(); | |
465 | |
466 // nativization | |
467 | |
468 typedef std::set<const Value*> NativizedVarsMap; | |
469 NativizedVarsMap NativizedVars; | |
470 | |
471 void calculateNativizedVars(const Function *F); | |
472 | |
473 // special analyses | |
474 | |
475 bool canReloop(const Function *F); | |
476 | |
477 // main entry point | |
478 | |
479 void printModuleBody(); | |
480 }; | |
481 } // end anonymous namespace. | |
482 | |
483 raw_pwrite_stream &JSWriter::nl(raw_pwrite_stream &Out, int delta) { | |
484 Out << '\n'; | |
485 return Out; | |
486 } | |
487 | |
488 static inline char halfCharToHex(unsigned char half) { | |
489 assert(half <= 15); | |
490 if (half <= 9) { | |
491 return '0' + half; | |
492 } else { | |
493 return 'A' + half - 10; | |
494 } | |
495 } | |
496 | |
497 static inline void sanitizeGlobal(std::string& str) { | |
498 // Global names are prefixed with "_" to prevent them from colliding with | |
499 // names of things in normal JS. | |
500 str = "_" + str; | |
501 | |
502 // functions and globals should already be in C-style format, | |
503 // in addition to . for llvm intrinsics and possibly $ and so forth. | |
504 // There is a risk of collisions here, we just lower all these | |
505 // invalid characters to _, but this should not happen in practice. | |
506 // TODO: in debug mode, check for such collisions. | |
507 size_t OriginalSize = str.size(); | |
508 for (size_t i = 1; i < OriginalSize; ++i) { | |
509 unsigned char c = str[i]; | |
510 if (!isalnum(c) && c != '_') str[i] = '_'; | |
511 } | |
512 } | |
513 | |
514 static inline void sanitizeLocal(std::string& str) { | |
515 // Local names are prefixed with "$" to prevent them from colliding with | |
516 // global names. | |
517 str = "$" + str; | |
518 | |
519 // We need to convert every string that is not a valid JS identifier into | |
520 // a valid one, without collisions - we cannot turn "x.a" into "x_a" while | |
521 // also leaving "x_a" as is, for example. | |
522 // | |
523 // We leave valid characters 0-9a-zA-Z and _ unchanged. Anything else | |
524 // we replace with $ and append a hex representation of that value, | |
525 // so for example x.a turns into x$a2e, x..a turns into x$$a2e2e. | |
526 // | |
527 // As an optimization, we replace . with $ without appending anything, | |
528 // unless there is another illegal character. The reason is that . is | |
529 // a common illegal character, and we want to avoid resizing strings | |
530 // for perf reasons, and we If we do see we need to append something, then | |
531 // for . we just append Z (one character, instead of the hex code). | |
532 // | |
533 | |
534 size_t OriginalSize = str.size(); | |
535 int Queued = 0; | |
536 for (size_t i = 1; i < OriginalSize; ++i) { | |
537 unsigned char c = str[i]; | |
538 if (!isalnum(c) && c != '_') { | |
539 str[i] = '$'; | |
540 if (c == '.') { | |
541 Queued++; | |
542 } else { | |
543 size_t s = str.size(); | |
544 str.resize(s+2+Queued); | |
545 for (int i = 0; i < Queued; i++) { | |
546 str[s++] = 'Z'; | |
547 } | |
548 Queued = 0; | |
549 str[s] = halfCharToHex(c >> 4); | |
550 str[s+1] = halfCharToHex(c & 0xf); | |
551 } | |
552 } | |
553 } | |
554 } | |
555 | |
556 static inline std::string ensureFloat(const std::string &S, Type *T) { | |
557 if (PreciseF32 && T->isFloatTy()) { | |
558 return "Math_fround(" + S + ")"; | |
559 } | |
560 return S; | |
561 } | |
562 | |
563 static void emitDebugInfo(raw_ostream& Code, const Instruction *I) { | |
564 auto &Loc = I->getDebugLoc(); | |
565 if (Loc) { | |
566 unsigned Line = Loc.getLine(); | |
567 StringRef File = cast<MDLocation>(Loc.getScope())->getFilename(); | |
568 Code << " //@line " << utostr(Line) << " \"" << (File.size() > 0 ? File.str(
) : "?") << "\""; | |
569 } | |
570 } | |
571 | |
572 void JSWriter::error(const std::string& msg) { | |
573 report_fatal_error(msg); | |
574 } | |
575 | |
576 std::string JSWriter::getPhiCode(const BasicBlock *From, const BasicBlock *To) { | |
577 // FIXME this is all quite inefficient, and also done once per incoming to eac
h phi | |
578 | |
579 // Find the phis, and generate assignments and dependencies | |
580 std::set<std::string> PhiVars; | |
581 for (BasicBlock::const_iterator I = To->begin(), E = To->end(); | |
582 I != E; ++I) { | |
583 const PHINode* P = dyn_cast<PHINode>(I); | |
584 if (!P) break; | |
585 PhiVars.insert(getJSName(P)); | |
586 } | |
587 typedef std::map<std::string, std::string> StringMap; | |
588 StringMap assigns; // variable -> assign statement | |
589 std::map<std::string, const Value*> values; // variable -> Value | |
590 StringMap deps; // variable -> dependency | |
591 StringMap undeps; // reverse: dependency -> variable | |
592 for (BasicBlock::const_iterator I = To->begin(), E = To->end(); | |
593 I != E; ++I) { | |
594 const PHINode* P = dyn_cast<PHINode>(I); | |
595 if (!P) break; | |
596 int index = P->getBasicBlockIndex(From); | |
597 if (index < 0) continue; | |
598 // we found it | |
599 const std::string &name = getJSName(P); | |
600 assigns[name] = getAssign(P); | |
601 // Get the operand, and strip pointer casts, since normal expression | |
602 // translation also strips pointer casts, and we want to see the same | |
603 // thing so that we can detect any resulting dependencies. | |
604 const Value *V = P->getIncomingValue(index)->stripPointerCasts(); | |
605 values[name] = V; | |
606 std::string vname = getValueAsStr(V); | |
607 if (const Instruction *VI = dyn_cast<const Instruction>(V)) { | |
608 if (VI->getParent() == To && PhiVars.find(vname) != PhiVars.end()) { | |
609 deps[name] = vname; | |
610 undeps[vname] = name; | |
611 } | |
612 } | |
613 } | |
614 // Emit assignments+values, taking into account dependencies, and breaking cyc
les | |
615 std::string pre = "", post = ""; | |
616 while (assigns.size() > 0) { | |
617 bool emitted = false; | |
618 for (StringMap::iterator I = assigns.begin(); I != assigns.end();) { | |
619 StringMap::iterator last = I; | |
620 std::string curr = last->first; | |
621 const Value *V = values[curr]; | |
622 std::string CV = getValueAsStr(V); | |
623 I++; // advance now, as we may erase | |
624 // if we have no dependencies, or we found none to emit and are at the end
(so there is a cycle), emit | |
625 StringMap::const_iterator dep = deps.find(curr); | |
626 if (dep == deps.end() || (!emitted && I == assigns.end())) { | |
627 if (dep != deps.end()) { | |
628 // break a cycle | |
629 std::string depString = dep->second; | |
630 std::string temp = curr + "$phi"; | |
631 pre += getAdHocAssign(temp, V->getType()) + CV + ';'; | |
632 CV = temp; | |
633 deps.erase(curr); | |
634 undeps.erase(depString); | |
635 } | |
636 post += assigns[curr] + CV + ';'; | |
637 assigns.erase(last); | |
638 emitted = true; | |
639 } | |
640 } | |
641 } | |
642 return pre + post; | |
643 } | |
644 | |
645 const std::string &JSWriter::getJSName(const Value* val) { | |
646 ValueMap::const_iterator I = ValueNames.find(val); | |
647 if (I != ValueNames.end() && I->first == val) | |
648 return I->second; | |
649 | |
650 // If this is an alloca we've replaced with another, use the other name. | |
651 if (const AllocaInst *AI = dyn_cast<AllocaInst>(val)) { | |
652 if (AI->isStaticAlloca()) { | |
653 const AllocaInst *Rep = Allocas.getRepresentative(AI); | |
654 if (Rep != AI) { | |
655 return getJSName(Rep); | |
656 } | |
657 } | |
658 } | |
659 | |
660 std::string name; | |
661 if (val->hasName()) { | |
662 name = val->getName().str(); | |
663 } else { | |
664 name = utostr(UniqueNum++); | |
665 } | |
666 | |
667 if (isa<Constant>(val)) { | |
668 sanitizeGlobal(name); | |
669 } else { | |
670 sanitizeLocal(name); | |
671 } | |
672 | |
673 return ValueNames[val] = name; | |
674 } | |
675 | |
676 std::string JSWriter::getAdHocAssign(const StringRef &s, Type *t) { | |
677 UsedVars[s] = t; | |
678 return (s + " = ").str(); | |
679 } | |
680 | |
681 std::string JSWriter::getAssign(const Instruction *I) { | |
682 return getAdHocAssign(getJSName(I), I->getType()); | |
683 } | |
684 | |
685 std::string JSWriter::getAssignIfNeeded(const Value *V) { | |
686 if (const Instruction *I = dyn_cast<Instruction>(V)) { | |
687 if (!I->use_empty()) return getAssign(I); | |
688 } | |
689 return std::string(); | |
690 } | |
691 | |
692 std::string JSWriter::getCast(const StringRef &s, Type *t, AsmCast sign) { | |
693 switch (t->getTypeID()) { | |
694 default: { | |
695 errs() << *t << "\n"; | |
696 assert(false && "Unsupported type"); | |
697 } | |
698 case Type::VectorTyID: | |
699 return (cast<VectorType>(t)->getElementType()->isIntegerTy() ? | |
700 "SIMD_int32x4_check(" + s + ")" : | |
701 "SIMD_float32x4_check(" + s + ")").str(); | |
702 case Type::FloatTyID: { | |
703 if (PreciseF32 && !(sign & ASM_FFI_OUT)) { | |
704 if (sign & ASM_FFI_IN) { | |
705 return ("Math_fround(+(" + s + "))").str(); | |
706 } else { | |
707 return ("Math_fround(" + s + ")").str(); | |
708 } | |
709 } | |
710 // otherwise fall through to double | |
711 } | |
712 case Type::DoubleTyID: return ("+" + s).str(); | |
713 case Type::IntegerTyID: { | |
714 // fall through to the end for nonspecific | |
715 switch (t->getIntegerBitWidth()) { | |
716 case 1: if (!(sign & ASM_NONSPECIFIC)) return sign == ASM_UNSIGNED ? (s
+ "&1").str() : (s + "<<31>>31").str(); | |
717 case 8: if (!(sign & ASM_NONSPECIFIC)) return sign == ASM_UNSIGNED ? (s
+ "&255").str() : (s + "<<24>>24").str(); | |
718 case 16: if (!(sign & ASM_NONSPECIFIC)) return sign == ASM_UNSIGNED ? (s
+ "&65535").str() : (s + "<<16>>16").str(); | |
719 case 32: return (sign == ASM_SIGNED || (sign & ASM_NONSPECIFIC) ? s + "|
0" : s + ">>>0").str(); | |
720 default: llvm_unreachable("Unsupported integer cast bitwidth"); | |
721 } | |
722 } | |
723 case Type::PointerTyID: | |
724 return (sign == ASM_SIGNED || (sign & ASM_NONSPECIFIC) ? s + "|0" : s + ">
>>0").str(); | |
725 } | |
726 } | |
727 | |
728 std::string JSWriter::getParenCast(const StringRef &s, Type *t, AsmCast sign) { | |
729 return getCast(("(" + s + ")").str(), t, sign); | |
730 } | |
731 | |
732 std::string JSWriter::getDoubleToInt(const StringRef &s) { | |
733 return ("~~(" + s + ")").str(); | |
734 } | |
735 | |
736 std::string JSWriter::getIMul(const Value *V1, const Value *V2) { | |
737 const ConstantInt *CI = NULL; | |
738 const Value *Other = NULL; | |
739 if ((CI = dyn_cast<ConstantInt>(V1))) { | |
740 Other = V2; | |
741 } else if ((CI = dyn_cast<ConstantInt>(V2))) { | |
742 Other = V1; | |
743 } | |
744 // we ignore optimizing the case of multiplying two constants - optimizer woul
d have removed those | |
745 if (CI) { | |
746 std::string OtherStr = getValueAsStr(Other); | |
747 unsigned C = CI->getZExtValue(); | |
748 if (C == 0) return "0"; | |
749 if (C == 1) return OtherStr; | |
750 unsigned Orig = C, Shifts = 0; | |
751 while (C) { | |
752 if ((C & 1) && (C != 1)) break; // not power of 2 | |
753 C >>= 1; | |
754 Shifts++; | |
755 if (C == 0) return OtherStr + "<<" + utostr(Shifts-1); // power of 2, emit
shift | |
756 } | |
757 if (Orig < (1<<20)) return "(" + OtherStr + "*" + utostr(Orig) + ")|0"; // s
mall enough, avoid imul | |
758 } | |
759 return "Math_imul(" + getValueAsStr(V1) + ", " + getValueAsStr(V2) + ")|0"; //
unknown or too large, emit imul | |
760 } | |
761 | |
762 std::string JSWriter::getLoad(const Instruction *I, const Value *P, Type *T, uns
igned Alignment, char sep) { | |
763 std::string Assign = getAssign(I); | |
764 unsigned Bytes = DL->getTypeAllocSize(T); | |
765 std::string text; | |
766 if (Bytes <= Alignment || Alignment == 0) { | |
767 text = Assign + getPtrLoad(P); | |
768 if (isAbsolute(P)) { | |
769 // loads from an absolute constants are either intentional segfaults (int
x = *((int*)0)), or code problems | |
770 text += "; abort() /* segfault, load from absolute addr */"; | |
771 } | |
772 } else { | |
773 // unaligned in some manner | |
774 if (WarnOnUnaligned) { | |
775 errs() << "emcc: warning: unaligned load in " << I->getParent()->getParen
t()->getName() << ":" << *I << " | "; | |
776 emitDebugInfo(errs(), I); | |
777 errs() << "\n"; | |
778 } | |
779 std::string PS = getValueAsStr(P); | |
780 switch (Bytes) { | |
781 case 8: { | |
782 switch (Alignment) { | |
783 case 4: { | |
784 text = "HEAP32[tempDoublePtr>>2]=HEAP32[" + PS + ">>2]" + sep + | |
785 "HEAP32[tempDoublePtr+4>>2]=HEAP32[" + PS + "+4>>2]"; | |
786 break; | |
787 } | |
788 case 2: { | |
789 text = "HEAP16[tempDoublePtr>>1]=HEAP16[" + PS + ">>1]" + sep + | |
790 "HEAP16[tempDoublePtr+2>>1]=HEAP16[" + PS + "+2>>1]" + sep + | |
791 "HEAP16[tempDoublePtr+4>>1]=HEAP16[" + PS + "+4>>1]" + sep + | |
792 "HEAP16[tempDoublePtr+6>>1]=HEAP16[" + PS + "+6>>1]"; | |
793 break; | |
794 } | |
795 case 1: { | |
796 text = "HEAP8[tempDoublePtr>>0]=HEAP8[" + PS + ">>0]" + sep + | |
797 "HEAP8[tempDoublePtr+1>>0]=HEAP8[" + PS + "+1>>0]" + sep + | |
798 "HEAP8[tempDoublePtr+2>>0]=HEAP8[" + PS + "+2>>0]" + sep + | |
799 "HEAP8[tempDoublePtr+3>>0]=HEAP8[" + PS + "+3>>0]" + sep + | |
800 "HEAP8[tempDoublePtr+4>>0]=HEAP8[" + PS + "+4>>0]" + sep + | |
801 "HEAP8[tempDoublePtr+5>>0]=HEAP8[" + PS + "+5>>0]" + sep + | |
802 "HEAP8[tempDoublePtr+6>>0]=HEAP8[" + PS + "+6>>0]" + sep + | |
803 "HEAP8[tempDoublePtr+7>>0]=HEAP8[" + PS + "+7>>0]"; | |
804 break; | |
805 } | |
806 default: assert(0 && "bad 8 store"); | |
807 } | |
808 text += sep + Assign + "+HEAPF64[tempDoublePtr>>3]"; | |
809 break; | |
810 } | |
811 case 4: { | |
812 if (T->isIntegerTy() || T->isPointerTy()) { | |
813 switch (Alignment) { | |
814 case 2: { | |
815 text = Assign + "HEAPU16[" + PS + ">>1]|" + | |
816 "(HEAPU16[" + PS + "+2>>1]<<16)"; | |
817 break; | |
818 } | |
819 case 1: { | |
820 text = Assign + "HEAPU8[" + PS + ">>0]|" + | |
821 "(HEAPU8[" + PS + "+1>>0]<<8)|" + | |
822 "(HEAPU8[" + PS + "+2>>0]<<16)|" + | |
823 "(HEAPU8[" + PS + "+3>>0]<<24)"; | |
824 break; | |
825 } | |
826 default: assert(0 && "bad 4i store"); | |
827 } | |
828 } else { // float | |
829 assert(T->isFloatingPointTy()); | |
830 switch (Alignment) { | |
831 case 2: { | |
832 text = "HEAP16[tempDoublePtr>>1]=HEAP16[" + PS + ">>1]" + sep + | |
833 "HEAP16[tempDoublePtr+2>>1]=HEAP16[" + PS + "+2>>1]"; | |
834 break; | |
835 } | |
836 case 1: { | |
837 text = "HEAP8[tempDoublePtr>>0]=HEAP8[" + PS + ">>0]" + sep + | |
838 "HEAP8[tempDoublePtr+1>>0]=HEAP8[" + PS + "+1>>0]" + sep + | |
839 "HEAP8[tempDoublePtr+2>>0]=HEAP8[" + PS + "+2>>0]" + sep + | |
840 "HEAP8[tempDoublePtr+3>>0]=HEAP8[" + PS + "+3>>0]"; | |
841 break; | |
842 } | |
843 default: assert(0 && "bad 4f store"); | |
844 } | |
845 text += sep + Assign + getCast("HEAPF32[tempDoublePtr>>2]", Type::getF
loatTy(TheModule->getContext())); | |
846 } | |
847 break; | |
848 } | |
849 case 2: { | |
850 text = Assign + "HEAPU8[" + PS + ">>0]|" + | |
851 "(HEAPU8[" + PS + "+1>>0]<<8)"; | |
852 break; | |
853 } | |
854 default: assert(0 && "bad store"); | |
855 } | |
856 } | |
857 return text; | |
858 } | |
859 | |
860 std::string JSWriter::getStore(const Instruction *I, const Value *P, Type *T, co
nst std::string& VS, unsigned Alignment, char sep) { | |
861 assert(sep == ';'); // FIXME when we need that | |
862 unsigned Bytes = DL->getTypeAllocSize(T); | |
863 std::string text; | |
864 if (Bytes <= Alignment || Alignment == 0) { | |
865 text = getPtrUse(P) + " = " + VS; | |
866 if (Alignment == 536870912) text += "; abort() /* segfault */"; | |
867 } else { | |
868 // unaligned in some manner | |
869 if (WarnOnUnaligned) { | |
870 errs() << "emcc: warning: unaligned store in " << I->getParent()->getParen
t()->getName() << ":" << *I << " | "; | |
871 emitDebugInfo(errs(), I); | |
872 errs() << "\n"; | |
873 } | |
874 std::string PS = getValueAsStr(P); | |
875 switch (Bytes) { | |
876 case 8: { | |
877 text = "HEAPF64[tempDoublePtr>>3]=" + VS + ';'; | |
878 switch (Alignment) { | |
879 case 4: { | |
880 text += "HEAP32[" + PS + ">>2]=HEAP32[tempDoublePtr>>2];" + | |
881 "HEAP32[" + PS + "+4>>2]=HEAP32[tempDoublePtr+4>>2]"; | |
882 break; | |
883 } | |
884 case 2: { | |
885 text += "HEAP16[" + PS + ">>1]=HEAP16[tempDoublePtr>>1];" + | |
886 "HEAP16[" + PS + "+2>>1]=HEAP16[tempDoublePtr+2>>1];" + | |
887 "HEAP16[" + PS + "+4>>1]=HEAP16[tempDoublePtr+4>>1];" + | |
888 "HEAP16[" + PS + "+6>>1]=HEAP16[tempDoublePtr+6>>1]"; | |
889 break; | |
890 } | |
891 case 1: { | |
892 text += "HEAP8[" + PS + ">>0]=HEAP8[tempDoublePtr>>0];" + | |
893 "HEAP8[" + PS + "+1>>0]=HEAP8[tempDoublePtr+1>>0];" + | |
894 "HEAP8[" + PS + "+2>>0]=HEAP8[tempDoublePtr+2>>0];" + | |
895 "HEAP8[" + PS + "+3>>0]=HEAP8[tempDoublePtr+3>>0];" + | |
896 "HEAP8[" + PS + "+4>>0]=HEAP8[tempDoublePtr+4>>0];" + | |
897 "HEAP8[" + PS + "+5>>0]=HEAP8[tempDoublePtr+5>>0];" + | |
898 "HEAP8[" + PS + "+6>>0]=HEAP8[tempDoublePtr+6>>0];" + | |
899 "HEAP8[" + PS + "+7>>0]=HEAP8[tempDoublePtr+7>>0]"; | |
900 break; | |
901 } | |
902 default: assert(0 && "bad 8 store"); | |
903 } | |
904 break; | |
905 } | |
906 case 4: { | |
907 if (T->isIntegerTy() || T->isPointerTy()) { | |
908 switch (Alignment) { | |
909 case 2: { | |
910 text = "HEAP16[" + PS + ">>1]=" + VS + "&65535;" + | |
911 "HEAP16[" + PS + "+2>>1]=" + VS + ">>>16"; | |
912 break; | |
913 } | |
914 case 1: { | |
915 text = "HEAP8[" + PS + ">>0]=" + VS + "&255;" + | |
916 "HEAP8[" + PS + "+1>>0]=(" + VS + ">>8)&255;" + | |
917 "HEAP8[" + PS + "+2>>0]=(" + VS + ">>16)&255;" + | |
918 "HEAP8[" + PS + "+3>>0]=" + VS + ">>24"; | |
919 break; | |
920 } | |
921 default: assert(0 && "bad 4i store"); | |
922 } | |
923 } else { // float | |
924 assert(T->isFloatingPointTy()); | |
925 text = "HEAPF32[tempDoublePtr>>2]=" + VS + ';'; | |
926 switch (Alignment) { | |
927 case 2: { | |
928 text += "HEAP16[" + PS + ">>1]=HEAP16[tempDoublePtr>>1];" + | |
929 "HEAP16[" + PS + "+2>>1]=HEAP16[tempDoublePtr+2>>1]"; | |
930 break; | |
931 } | |
932 case 1: { | |
933 text += "HEAP8[" + PS + ">>0]=HEAP8[tempDoublePtr>>0];" + | |
934 "HEAP8[" + PS + "+1>>0]=HEAP8[tempDoublePtr+1>>0];" + | |
935 "HEAP8[" + PS + "+2>>0]=HEAP8[tempDoublePtr+2>>0];" + | |
936 "HEAP8[" + PS + "+3>>0]=HEAP8[tempDoublePtr+3>>0]"; | |
937 break; | |
938 } | |
939 default: assert(0 && "bad 4f store"); | |
940 } | |
941 } | |
942 break; | |
943 } | |
944 case 2: { | |
945 text = "HEAP8[" + PS + ">>0]=" + VS + "&255;" + | |
946 "HEAP8[" + PS + "+1>>0]=" + VS + ">>8"; | |
947 break; | |
948 } | |
949 default: assert(0 && "bad store"); | |
950 } | |
951 } | |
952 return text; | |
953 } | |
954 | |
955 std::string JSWriter::getStackBump(unsigned Size) { | |
956 return getStackBump(utostr(Size)); | |
957 } | |
958 | |
959 std::string JSWriter::getStackBump(const std::string &Size) { | |
960 std::string ret = "STACKTOP = STACKTOP + " + Size + "|0;"; | |
961 if (EmscriptenAssertions) { | |
962 ret += " if ((STACKTOP|0) >= (STACK_MAX|0)) abort();"; | |
963 } | |
964 return ret; | |
965 } | |
966 | |
967 std::string JSWriter::getOpName(const Value* V) { // TODO: remove this | |
968 return getJSName(V); | |
969 } | |
970 | |
971 std::string JSWriter::getPtrLoad(const Value* Ptr) { | |
972 Type *t = cast<PointerType>(Ptr->getType())->getElementType(); | |
973 return getCast(getPtrUse(Ptr), t, ASM_NONSPECIFIC); | |
974 } | |
975 | |
976 std::string JSWriter::getHeapAccess(const std::string& Name, unsigned Bytes, boo
l Integer) { | |
977 switch (Bytes) { | |
978 default: llvm_unreachable("Unsupported type"); | |
979 case 8: return "HEAPF64[" + Name + ">>3]"; | |
980 case 4: { | |
981 if (Integer) { | |
982 return "HEAP32[" + Name + ">>2]"; | |
983 } else { | |
984 return "HEAPF32[" + Name + ">>2]"; | |
985 } | |
986 } | |
987 case 2: return "HEAP16[" + Name + ">>1]"; | |
988 case 1: return "HEAP8[" + Name + ">>0]"; | |
989 } | |
990 } | |
991 | |
992 std::string JSWriter::getPtrUse(const Value* Ptr) { | |
993 Type *t = cast<PointerType>(Ptr->getType())->getElementType(); | |
994 unsigned Bytes = DL->getTypeAllocSize(t); | |
995 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { | |
996 std::string text = ""; | |
997 unsigned Addr = getGlobalAddress(GV->getName().str()); | |
998 switch (Bytes) { | |
999 default: llvm_unreachable("Unsupported type"); | |
1000 case 8: return "HEAPF64[" + utostr(Addr >> 3) + "]"; | |
1001 case 4: { | |
1002 if (t->isIntegerTy() || t->isPointerTy()) { | |
1003 return "HEAP32[" + utostr(Addr >> 2) + "]"; | |
1004 } else { | |
1005 assert(t->isFloatingPointTy()); | |
1006 return "HEAPF32[" + utostr(Addr >> 2) + "]"; | |
1007 } | |
1008 } | |
1009 case 2: return "HEAP16[" + utostr(Addr >> 1) + "]"; | |
1010 case 1: return "HEAP8[" + utostr(Addr) + "]"; | |
1011 } | |
1012 } else { | |
1013 return getHeapAccess(getValueAsStr(Ptr), Bytes, t->isIntegerTy() || t->isPoi
nterTy()); | |
1014 } | |
1015 } | |
1016 | |
1017 std::string JSWriter::getConstant(const Constant* CV, AsmCast sign) { | |
1018 if (isa<ConstantPointerNull>(CV)) return "0"; | |
1019 | |
1020 if (const Function *F = dyn_cast<Function>(CV)) { | |
1021 return utostr(getFunctionIndex(F)); | |
1022 } | |
1023 | |
1024 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) { | |
1025 if (GV->isDeclaration()) { | |
1026 std::string Name = getOpName(GV); | |
1027 Externals.insert(Name); | |
1028 return Name; | |
1029 } | |
1030 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(CV)) { | |
1031 // Since we don't currently support linking of our output, we don't need | |
1032 // to worry about weak or other kinds of aliases. | |
1033 return getConstant(GA->getAliasee(), sign); | |
1034 } | |
1035 return utostr(getGlobalAddress(GV->getName().str())); | |
1036 } | |
1037 | |
1038 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) { | |
1039 std::string S = ftostr(CFP, sign); | |
1040 if (PreciseF32 && CV->getType()->isFloatTy() && !(sign & ASM_FFI_OUT)) { | |
1041 S = "Math_fround(" + S + ")"; | |
1042 } | |
1043 return S; | |
1044 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { | |
1045 if (sign != ASM_UNSIGNED && CI->getValue().getBitWidth() == 1) { | |
1046 sign = ASM_UNSIGNED; // bools must always be unsigned: either 0 or 1 | |
1047 } | |
1048 return CI->getValue().toString(10, sign != ASM_UNSIGNED); | |
1049 } else if (isa<UndefValue>(CV)) { | |
1050 std::string S; | |
1051 if (VectorType *VT = dyn_cast<VectorType>(CV->getType())) { | |
1052 checkVectorType(VT); | |
1053 if (VT->getElementType()->isIntegerTy()) { | |
1054 S = "SIMD_int32x4_splat(0)"; | |
1055 } else { | |
1056 S = "SIMD_float32x4_splat(Math_fround(0))"; | |
1057 } | |
1058 } else { | |
1059 S = CV->getType()->isFloatingPointTy() ? "+0" : "0"; // XXX refactor this | |
1060 if (PreciseF32 && CV->getType()->isFloatTy() && !(sign & ASM_FFI_OUT)) { | |
1061 S = "Math_fround(" + S + ")"; | |
1062 } | |
1063 } | |
1064 return S; | |
1065 } else if (isa<ConstantAggregateZero>(CV)) { | |
1066 if (VectorType *VT = dyn_cast<VectorType>(CV->getType())) { | |
1067 checkVectorType(VT); | |
1068 if (VT->getElementType()->isIntegerTy()) { | |
1069 return "SIMD_int32x4_splat(0)"; | |
1070 } else { | |
1071 return "SIMD_float32x4_splat(Math_fround(0))"; | |
1072 } | |
1073 } else { | |
1074 // something like [0 x i8*] zeroinitializer, which clang can emit for land
ingpads | |
1075 return "0"; | |
1076 } | |
1077 } else if (const ConstantDataVector *DV = dyn_cast<ConstantDataVector>(CV)) { | |
1078 checkVectorType(DV->getType()); | |
1079 unsigned NumElts = cast<VectorType>(DV->getType())->getNumElements(); | |
1080 Type *EltTy = cast<VectorType>(DV->getType())->getElementType(); | |
1081 Constant *Undef = UndefValue::get(EltTy); | |
1082 return getConstantVector(EltTy, | |
1083 getConstant(NumElts > 0 ? DV->getElementAsConstant(
0) : Undef), | |
1084 getConstant(NumElts > 1 ? DV->getElementAsConstant(
1) : Undef), | |
1085 getConstant(NumElts > 2 ? DV->getElementAsConstant(
2) : Undef), | |
1086 getConstant(NumElts > 3 ? DV->getElementAsConstant(
3) : Undef)); | |
1087 } else if (const ConstantVector *V = dyn_cast<ConstantVector>(CV)) { | |
1088 checkVectorType(V->getType()); | |
1089 unsigned NumElts = cast<VectorType>(CV->getType())->getNumElements(); | |
1090 Type *EltTy = cast<VectorType>(CV->getType())->getElementType(); | |
1091 Constant *Undef = UndefValue::get(EltTy); | |
1092 return getConstantVector(cast<VectorType>(V->getType())->getElementType(), | |
1093 getConstant(NumElts > 0 ? V->getOperand(0) : Undef)
, | |
1094 getConstant(NumElts > 1 ? V->getOperand(1) : Undef)
, | |
1095 getConstant(NumElts > 2 ? V->getOperand(2) : Undef)
, | |
1096 getConstant(NumElts > 3 ? V->getOperand(3) : Undef)
); | |
1097 } else if (const ConstantArray *CA = dyn_cast<const ConstantArray>(CV)) { | |
1098 // handle things like [i8* bitcast (<{ i32, i32, i32 }>* @_ZTISt9bad_alloc t
o i8*)] which clang can emit for landingpads | |
1099 assert(CA->getNumOperands() == 1); | |
1100 CV = CA->getOperand(0); | |
1101 const ConstantExpr *CE = cast<ConstantExpr>(CV); | |
1102 CV = CE->getOperand(0); // ignore bitcast | |
1103 return getConstant(CV); | |
1104 } else if (const BlockAddress *BA = dyn_cast<const BlockAddress>(CV)) { | |
1105 return utostr(getBlockAddress(BA)); | |
1106 } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { | |
1107 std::string Code; | |
1108 raw_string_ostream CodeStream(Code); | |
1109 CodeStream << '('; | |
1110 generateExpression(CE, CodeStream); | |
1111 CodeStream << ')'; | |
1112 return CodeStream.str(); | |
1113 } else { | |
1114 CV->dump(); | |
1115 llvm_unreachable("Unsupported constant kind"); | |
1116 } | |
1117 } | |
1118 | |
1119 std::string JSWriter::getConstantVector(Type *ElementType, std::string x, std::s
tring y, std::string z, std::string w) { | |
1120 // Check for a splat. | |
1121 if (x == y && x == z && x == w) { | |
1122 if (ElementType->isIntegerTy()) { | |
1123 return "SIMD_int32x4_splat(" + x + ')'; | |
1124 } else { | |
1125 return "SIMD_float32x4_splat(Math_fround(" + x + "))"; | |
1126 } | |
1127 } | |
1128 | |
1129 if (ElementType->isIntegerTy()) { | |
1130 return "SIMD_int32x4(" + x + ',' + y + ',' + z + ',' + w + ')'; | |
1131 } else { | |
1132 return "SIMD_float32x4(Math_fround(" + x + "),Math_fround(" + y + "),Math_fr
ound(" + z + "),Math_fround(" + w + "))"; | |
1133 } | |
1134 } | |
1135 | |
1136 std::string JSWriter::getValueAsStr(const Value* V, AsmCast sign) { | |
1137 // Skip past no-op bitcasts and zero-index geps. | |
1138 V = V->stripPointerCasts(); | |
1139 | |
1140 if (const Constant *CV = dyn_cast<Constant>(V)) { | |
1141 return getConstant(CV, sign); | |
1142 } else { | |
1143 return getJSName(V); | |
1144 } | |
1145 } | |
1146 | |
1147 std::string JSWriter::getValueAsCastStr(const Value* V, AsmCast sign) { | |
1148 // Skip past no-op bitcasts and zero-index geps. | |
1149 V = V->stripPointerCasts(); | |
1150 | |
1151 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) { | |
1152 return getConstant(cast<Constant>(V), sign); | |
1153 } else { | |
1154 return getCast(getValueAsStr(V), V->getType(), sign); | |
1155 } | |
1156 } | |
1157 | |
1158 std::string JSWriter::getValueAsParenStr(const Value* V) { | |
1159 // Skip past no-op bitcasts and zero-index geps. | |
1160 V = V->stripPointerCasts(); | |
1161 | |
1162 if (const Constant *CV = dyn_cast<Constant>(V)) { | |
1163 return getConstant(CV); | |
1164 } else { | |
1165 return "(" + getValueAsStr(V) + ")"; | |
1166 } | |
1167 } | |
1168 | |
1169 std::string JSWriter::getValueAsCastParenStr(const Value* V, AsmCast sign) { | |
1170 // Skip past no-op bitcasts and zero-index geps. | |
1171 V = V->stripPointerCasts(); | |
1172 | |
1173 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) { | |
1174 return getConstant(cast<Constant>(V), sign); | |
1175 } else { | |
1176 return "(" + getCast(getValueAsStr(V), V->getType(), sign) + ")"; | |
1177 } | |
1178 } | |
1179 | |
1180 void JSWriter::generateInsertElementExpression(const InsertElementInst *III, raw
_string_ostream& Code) { | |
1181 // LLVM has no vector type constructor operator; it uses chains of | |
1182 // insertelement instructions instead. It also has no splat operator; it | |
1183 // uses an insertelement followed by a shuffle instead. If this insertelement | |
1184 // is part of either such sequence, skip it for now; we'll process it when we | |
1185 // reach the end. | |
1186 if (III->hasOneUse()) { | |
1187 const User *U = *III->user_begin(); | |
1188 if (isa<InsertElementInst>(U)) | |
1189 return; | |
1190 if (isa<ShuffleVectorInst>(U) && | |
1191 isa<ConstantAggregateZero>(cast<ShuffleVectorInst>(U)->getMask()) && | |
1192 !isa<InsertElementInst>(III->getOperand(0)) && | |
1193 isa<ConstantInt>(III->getOperand(2)) && | |
1194 cast<ConstantInt>(III->getOperand(2))->isZero()) | |
1195 { | |
1196 return; | |
1197 } | |
1198 } | |
1199 | |
1200 // This insertelement is at the base of a chain of single-user insertelement | |
1201 // instructions. Collect all the inserted elements so that we can categorize | |
1202 // the chain as either a splat, a constructor, or an actual series of inserts. | |
1203 VectorType *VT = III->getType(); | |
1204 unsigned NumElems = VT->getNumElements(); | |
1205 unsigned NumInserted = 0; | |
1206 SmallVector<const Value *, 8> Operands(NumElems, NULL); | |
1207 const Value *Splat = III->getOperand(1); | |
1208 const Value *Base = III; | |
1209 do { | |
1210 const InsertElementInst *BaseIII = cast<InsertElementInst>(Base); | |
1211 const ConstantInt *IndexInt = cast<ConstantInt>(BaseIII->getOperand(2)); | |
1212 unsigned Index = IndexInt->getZExtValue(); | |
1213 if (Operands[Index] == NULL) | |
1214 ++NumInserted; | |
1215 Value *Op = BaseIII->getOperand(1); | |
1216 if (Operands[Index] == NULL) { | |
1217 Operands[Index] = Op; | |
1218 if (Op != Splat) | |
1219 Splat = NULL; | |
1220 } | |
1221 Base = BaseIII->getOperand(0); | |
1222 } while (Base->hasOneUse() && isa<InsertElementInst>(Base)); | |
1223 | |
1224 // Emit code for the chain. | |
1225 Code << getAssignIfNeeded(III); | |
1226 if (NumInserted == NumElems) { | |
1227 if (Splat) { | |
1228 // Emit splat code. | |
1229 if (VT->getElementType()->isIntegerTy()) { | |
1230 Code << "SIMD_int32x4_splat(" << getValueAsStr(Splat) << ")"; | |
1231 } else { | |
1232 std::string operand = getValueAsStr(Splat); | |
1233 if (!PreciseF32) { | |
1234 // SIMD_float32x4_splat requires an actual float32 even if we're | |
1235 // otherwise not being precise about it. | |
1236 operand = "Math_fround(" + operand + ")"; | |
1237 } | |
1238 Code << "SIMD_float32x4_splat(" << operand << ")"; | |
1239 } | |
1240 } else { | |
1241 // Emit constructor code. | |
1242 if (VT->getElementType()->isIntegerTy()) { | |
1243 Code << "SIMD_int32x4("; | |
1244 } else { | |
1245 Code << "SIMD_float32x4("; | |
1246 } | |
1247 for (unsigned Index = 0; Index < NumElems; ++Index) { | |
1248 if (Index != 0) | |
1249 Code << ", "; | |
1250 std::string operand = getValueAsStr(Operands[Index]); | |
1251 if (!PreciseF32 && VT->getElementType()->isFloatTy()) { | |
1252 // SIMD_float32x4_splat requires an actual float32 even if we're | |
1253 // otherwise not being precise about it. | |
1254 operand = "Math_fround(" + operand + ")"; | |
1255 } | |
1256 Code << operand; | |
1257 } | |
1258 Code << ")"; | |
1259 } | |
1260 } else { | |
1261 // Emit a series of inserts. | |
1262 std::string Result = getValueAsStr(Base); | |
1263 for (unsigned Index = 0; Index < NumElems; ++Index) { | |
1264 std::string with; | |
1265 if (!Operands[Index]) | |
1266 continue; | |
1267 if (VT->getElementType()->isIntegerTy()) { | |
1268 with = "SIMD_int32x4_with"; | |
1269 } else { | |
1270 with = "SIMD_float32x4_with"; | |
1271 } | |
1272 std::string operand = getValueAsStr(Operands[Index]); | |
1273 if (!PreciseF32) { | |
1274 operand = "Math_fround(" + operand + ")"; | |
1275 } | |
1276 Result = with + SIMDLane[Index] + "(" + Result + ',' + operand + ')'; | |
1277 } | |
1278 Code << Result; | |
1279 } | |
1280 } | |
1281 | |
1282 void JSWriter::generateExtractElementExpression(const ExtractElementInst *EEI, r
aw_string_ostream& Code) { | |
1283 VectorType *VT = cast<VectorType>(EEI->getVectorOperand()->getType()); | |
1284 checkVectorType(VT); | |
1285 const ConstantInt *IndexInt = dyn_cast<const ConstantInt>(EEI->getIndexOperand
()); | |
1286 if (IndexInt) { | |
1287 unsigned Index = IndexInt->getZExtValue(); | |
1288 assert(Index <= 3); | |
1289 Code << getAssignIfNeeded(EEI); | |
1290 std::string OperandCode; | |
1291 raw_string_ostream CodeStream(OperandCode); | |
1292 CodeStream << getValueAsStr(EEI->getVectorOperand()) << '.' << simdLane[Inde
x]; | |
1293 Code << getCast(CodeStream.str(), EEI->getType()); | |
1294 return; | |
1295 } | |
1296 | |
1297 error("SIMD extract element with non-constant index not implemented yet"); | |
1298 } | |
1299 | |
1300 void JSWriter::generateShuffleVectorExpression(const ShuffleVectorInst *SVI, raw
_string_ostream& Code) { | |
1301 Code << getAssignIfNeeded(SVI); | |
1302 | |
1303 // LLVM has no splat operator, so it makes do by using an insert and a | |
1304 // shuffle. If that's what this shuffle is doing, the code in | |
1305 // generateInsertElementExpression will have also detected it and skipped | |
1306 // emitting the insert, so we can just emit a splat here. | |
1307 if (isa<ConstantAggregateZero>(SVI->getMask()) && | |
1308 isa<InsertElementInst>(SVI->getOperand(0))) | |
1309 { | |
1310 InsertElementInst *IEI = cast<InsertElementInst>(SVI->getOperand(0)); | |
1311 if (ConstantInt *CI = dyn_cast<ConstantInt>(IEI->getOperand(2))) { | |
1312 if (CI->isZero()) { | |
1313 std::string operand = getValueAsStr(IEI->getOperand(1)); | |
1314 if (!PreciseF32) { | |
1315 // SIMD_float32x4_splat requires an actual float32 even if we're | |
1316 // otherwise not being precise about it. | |
1317 operand = "Math_fround(" + operand + ")"; | |
1318 } | |
1319 if (SVI->getType()->getElementType()->isIntegerTy()) { | |
1320 Code << "SIMD_int32x4_splat("; | |
1321 } else { | |
1322 Code << "SIMD_float32x4_splat("; | |
1323 } | |
1324 Code << operand << ")"; | |
1325 return; | |
1326 } | |
1327 } | |
1328 } | |
1329 | |
1330 // Check whether can generate SIMD.js swizzle or shuffle. | |
1331 std::string A = getValueAsStr(SVI->getOperand(0)); | |
1332 std::string B = getValueAsStr(SVI->getOperand(1)); | |
1333 int OpNumElements = cast<VectorType>(SVI->getOperand(0)->getType())->getNumEle
ments(); | |
1334 int ResultNumElements = SVI->getType()->getNumElements(); | |
1335 int Mask0 = ResultNumElements > 0 ? SVI->getMaskValue(0) : -1; | |
1336 int Mask1 = ResultNumElements > 1 ? SVI->getMaskValue(1) : -1; | |
1337 int Mask2 = ResultNumElements > 2 ? SVI->getMaskValue(2) : -1; | |
1338 int Mask3 = ResultNumElements > 3 ? SVI->getMaskValue(3) : -1; | |
1339 bool swizzleA = false; | |
1340 bool swizzleB = false; | |
1341 if ((Mask0 < OpNumElements) && (Mask1 < OpNumElements) && | |
1342 (Mask2 < OpNumElements) && (Mask3 < OpNumElements)) { | |
1343 swizzleA = true; | |
1344 } | |
1345 if ((Mask0 < 0 || (Mask0 >= OpNumElements && Mask0 < OpNumElements * 2)) && | |
1346 (Mask1 < 0 || (Mask1 >= OpNumElements && Mask1 < OpNumElements * 2)) && | |
1347 (Mask2 < 0 || (Mask2 >= OpNumElements && Mask2 < OpNumElements * 2)) && | |
1348 (Mask3 < 0 || (Mask3 >= OpNumElements && Mask3 < OpNumElements * 2))) { | |
1349 swizzleB = true; | |
1350 } | |
1351 assert(!(swizzleA && swizzleB)); | |
1352 if (swizzleA || swizzleB) { | |
1353 std::string T = (swizzleA ? A : B); | |
1354 if (SVI->getType()->getElementType()->isIntegerTy()) { | |
1355 Code << "SIMD_int32x4_swizzle(" << T; | |
1356 } else { | |
1357 Code << "SIMD_float32x4_swizzle(" << T; | |
1358 } | |
1359 int i = 0; | |
1360 for (; i < ResultNumElements; ++i) { | |
1361 Code << ", "; | |
1362 int Mask = SVI->getMaskValue(i); | |
1363 if (Mask < 0) { | |
1364 Code << 0; | |
1365 } else if (Mask < OpNumElements) { | |
1366 Code << Mask; | |
1367 } else { | |
1368 assert(Mask < OpNumElements * 2); | |
1369 Code << (Mask-OpNumElements); | |
1370 } | |
1371 } | |
1372 for (; i < 4; ++i) { | |
1373 Code << ", 0"; | |
1374 } | |
1375 Code << ")"; | |
1376 return; | |
1377 } | |
1378 | |
1379 // Emit a fully-general shuffle. | |
1380 if (SVI->getType()->getElementType()->isIntegerTy()) { | |
1381 Code << "SIMD_int32x4_shuffle("; | |
1382 } else { | |
1383 Code << "SIMD_float32x4_shuffle("; | |
1384 } | |
1385 | |
1386 Code << A << ", " << B << ", "; | |
1387 | |
1388 SmallVector<int, 16> Indices; | |
1389 SVI->getShuffleMask(Indices); | |
1390 for (unsigned int i = 0; i < Indices.size(); ++i) { | |
1391 if (i != 0) | |
1392 Code << ", "; | |
1393 int Mask = Indices[i]; | |
1394 if (Mask >= OpNumElements) | |
1395 Mask = Mask - OpNumElements + 4; | |
1396 if (Mask < 0) | |
1397 Code << 0; | |
1398 else | |
1399 Code << Mask; | |
1400 } | |
1401 | |
1402 Code << ")"; | |
1403 } | |
1404 | |
1405 void JSWriter::generateICmpExpression(const ICmpInst *I, raw_string_ostream& Cod
e) { | |
1406 bool Invert = false; | |
1407 const char *Name; | |
1408 switch (cast<ICmpInst>(I)->getPredicate()) { | |
1409 case ICmpInst::ICMP_EQ: Name = "equal"; break; | |
1410 case ICmpInst::ICMP_NE: Name = "equal"; Invert = true; break; | |
1411 case ICmpInst::ICMP_SLE: Name = "greaterThan"; Invert = true; break; | |
1412 case ICmpInst::ICMP_SGE: Name = "lessThan"; Invert = true; break; | |
1413 case ICmpInst::ICMP_ULE: Name = "unsignedLessThanOrEqual"; break; | |
1414 case ICmpInst::ICMP_UGE: Name = "unsignedGreaterThanOrEqual"; break; | |
1415 case ICmpInst::ICMP_ULT: Name = "unsignedLessThan"; break; | |
1416 case ICmpInst::ICMP_SLT: Name = "lessThan"; break; | |
1417 case ICmpInst::ICMP_UGT: Name = "unsignedGreaterThan"; break; | |
1418 case ICmpInst::ICMP_SGT: Name = "greaterThan"; break; | |
1419 default: I->dump(); error("invalid vector icmp"); break; | |
1420 } | |
1421 | |
1422 if (Invert) | |
1423 Code << "SIMD_int32x4_not("; | |
1424 | |
1425 Code << getAssignIfNeeded(I) << "SIMD_int32x4_" << Name << "(" | |
1426 << getValueAsStr(I->getOperand(0)) << ", " << getValueAsStr(I->getOperand
(1)) << ")"; | |
1427 | |
1428 if (Invert) | |
1429 Code << ")"; | |
1430 } | |
1431 | |
1432 void JSWriter::generateFCmpExpression(const FCmpInst *I, raw_string_ostream& Cod
e) { | |
1433 const char *Name; | |
1434 bool Invert = false; | |
1435 switch (cast<FCmpInst>(I)->getPredicate()) { | |
1436 case ICmpInst::FCMP_FALSE: | |
1437 Code << "SIMD_int32x4_splat(0)"; | |
1438 return; | |
1439 case ICmpInst::FCMP_TRUE: | |
1440 Code << "SIMD_int32x4_splat(-1)"; | |
1441 return; | |
1442 case ICmpInst::FCMP_ONE: | |
1443 Code << "SIMD_float32x4_and(SIMD_float32x4_and(" | |
1444 "SIMD_float32x4_equal(" << getValueAsStr(I->getOperand(0)) << ", " | |
1445 << getValueAsStr(I->getOperand(0)) << "),
" << | |
1446 "SIMD_float32x4_equal(" << getValueAsStr(I->getOperand(1)) << ", " | |
1447 << getValueAsStr(I->getOperand(1)) << ")),
" << | |
1448 "SIMD_float32x4_notEqual(" << getValueAsStr(I->getOperand(0)) << "
, " | |
1449 << getValueAsStr(I->getOperand(1)) << "
))"; | |
1450 return; | |
1451 case ICmpInst::FCMP_UEQ: | |
1452 Code << "SIMD_float32x4_or(SIMD_float32x4_or(" | |
1453 "SIMD_float32x4_notEqual(" << getValueAsStr(I->getOperand(0)) << "
, " | |
1454 << getValueAsStr(I->getOperand(0)) << "
), " << | |
1455 "SIMD_float32x4_notEqual(" << getValueAsStr(I->getOperand(1)) << "
, " | |
1456 << getValueAsStr(I->getOperand(1)) << "
)), " << | |
1457 "SIMD_float32x4_equal(" << getValueAsStr(I->getOperand(0)) << ", " | |
1458 << getValueAsStr(I->getOperand(1)) << "))"
; | |
1459 return; | |
1460 case FCmpInst::FCMP_ORD: | |
1461 Code << "SIMD_float32x4_and(" | |
1462 "SIMD_float32x4_equal(" << getValueAsStr(I->getOperand(0)) << ", "
<< getValueAsStr(I->getOperand(0)) << "), " << | |
1463 "SIMD_float32x4_equal(" << getValueAsStr(I->getOperand(1)) << ", "
<< getValueAsStr(I->getOperand(1)) << "))"; | |
1464 return; | |
1465 | |
1466 case FCmpInst::FCMP_UNO: | |
1467 Code << "SIMD_float32x4_or(" | |
1468 "SIMD_float32x4_notEqual(" << getValueAsStr(I->getOperand(0)) << "
, " << getValueAsStr(I->getOperand(0)) << "), " << | |
1469 "SIMD_float32x4_notEqual(" << getValueAsStr(I->getOperand(1)) << "
, " << getValueAsStr(I->getOperand(1)) << "))"; | |
1470 return; | |
1471 | |
1472 case ICmpInst::FCMP_OEQ: Name = "equal"; break; | |
1473 case ICmpInst::FCMP_OGT: Name = "greaterThan"; break; | |
1474 case ICmpInst::FCMP_OGE: Name = "greaterThanOrEqual"; break; | |
1475 case ICmpInst::FCMP_OLT: Name = "lessThan"; break; | |
1476 case ICmpInst::FCMP_OLE: Name = "lessThanOrEqual"; break; | |
1477 case ICmpInst::FCMP_UGT: Name = "lessThanOrEqual"; Invert = true; break; | |
1478 case ICmpInst::FCMP_UGE: Name = "lessThan"; Invert = true; break; | |
1479 case ICmpInst::FCMP_ULT: Name = "greaterThanOrEqual"; Invert = true; break; | |
1480 case ICmpInst::FCMP_ULE: Name = "greaterThan"; Invert = true; break; | |
1481 case ICmpInst::FCMP_UNE: Name = "notEqual"; break; | |
1482 default: I->dump(); error("invalid vector fcmp"); break; | |
1483 } | |
1484 | |
1485 if (Invert) | |
1486 Code << "SIMD_int32x4_not("; | |
1487 | |
1488 Code << getAssignIfNeeded(I) << "SIMD_float32x4_" << Name << "(" | |
1489 << getValueAsStr(I->getOperand(0)) << ", " << getValueAsStr(I->getOperand
(1)) << ")"; | |
1490 | |
1491 if (Invert) | |
1492 Code << ")"; | |
1493 } | |
1494 | |
1495 static const Value *getElement(const Value *V, unsigned i) { | |
1496 if (const InsertElementInst *II = dyn_cast<InsertElementInst>(V)) { | |
1497 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(2))) { | |
1498 if (CI->equalsInt(i)) | |
1499 return II->getOperand(1); | |
1500 } | |
1501 return getElement(II->getOperand(0), i); | |
1502 } | |
1503 return NULL; | |
1504 } | |
1505 | |
1506 static const Value *getSplatValue(const Value *V) { | |
1507 if (const Constant *C = dyn_cast<Constant>(V)) | |
1508 return C->getSplatValue(); | |
1509 | |
1510 VectorType *VTy = cast<VectorType>(V->getType()); | |
1511 const Value *Result = NULL; | |
1512 for (unsigned i = 0; i < VTy->getNumElements(); ++i) { | |
1513 const Value *E = getElement(V, i); | |
1514 if (!E) | |
1515 return NULL; | |
1516 if (!Result) | |
1517 Result = E; | |
1518 else if (Result != E) | |
1519 return NULL; | |
1520 } | |
1521 return Result; | |
1522 | |
1523 } | |
1524 | |
1525 void JSWriter::generateShiftExpression(const BinaryOperator *I, raw_string_ostre
am& Code) { | |
1526 // If we're shifting every lane by the same amount (shifting by a splat valu
e | |
1527 // then we can use a ByScalar shift. | |
1528 const Value *Count = I->getOperand(1); | |
1529 if (const Value *Splat = getSplatValue(Count)) { | |
1530 Code << getAssignIfNeeded(I) << "SIMD_int32x4_"; | |
1531 if (I->getOpcode() == Instruction::AShr) | |
1532 Code << "shiftRightArithmeticByScalar"; | |
1533 else if (I->getOpcode() == Instruction::LShr) | |
1534 Code << "shiftRightLogicalByScalar"; | |
1535 else | |
1536 Code << "shiftLeftByScalar"; | |
1537 Code << "(" << getValueAsStr(I->getOperand(0)) << ", " << getValueAsStr(
Splat) << ")"; | |
1538 return; | |
1539 } | |
1540 | |
1541 // SIMD.js does not currently have vector-vector shifts. | |
1542 generateUnrolledExpression(I, Code); | |
1543 } | |
1544 | |
1545 void JSWriter::generateUnrolledExpression(const User *I, raw_string_ostream& Cod
e) { | |
1546 VectorType *VT = cast<VectorType>(I->getType()); | |
1547 | |
1548 Code << getAssignIfNeeded(I); | |
1549 | |
1550 if (VT->getElementType()->isIntegerTy()) { | |
1551 Code << "SIMD_int32x4("; | |
1552 } else { | |
1553 Code << "SIMD_float32x4("; | |
1554 } | |
1555 | |
1556 for (unsigned Index = 0; Index < VT->getNumElements(); ++Index) { | |
1557 if (Index != 0) | |
1558 Code << ", "; | |
1559 if (!PreciseF32 && VT->getElementType()->isFloatTy()) { | |
1560 Code << "Math_fround("; | |
1561 } | |
1562 std::string Lane = VT->getNumElements() <= 4 ? | |
1563 std::string(".") + simdLane[Index] : | |
1564 ".s" + utostr(Index); | |
1565 switch (Operator::getOpcode(I)) { | |
1566 case Instruction::SDiv: | |
1567 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << "|0) / (" | |
1568 << getValueAsStr(I->getOperand(1)) << Lane << "|0)|0"; | |
1569 break; | |
1570 case Instruction::UDiv: | |
1571 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << ">>>0) / (" | |
1572 << getValueAsStr(I->getOperand(1)) << Lane << ">>>0)>>>0"; | |
1573 break; | |
1574 case Instruction::SRem: | |
1575 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << "|0) / (" | |
1576 << getValueAsStr(I->getOperand(1)) << Lane << "|0)|0"; | |
1577 break; | |
1578 case Instruction::URem: | |
1579 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << ">>>0) / (" | |
1580 << getValueAsStr(I->getOperand(1)) << Lane << ">>>0)>>>0"; | |
1581 break; | |
1582 case Instruction::AShr: | |
1583 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << "|0) >> (" | |
1584 << getValueAsStr(I->getOperand(1)) << Lane << "|0)|0"; | |
1585 break; | |
1586 case Instruction::LShr: | |
1587 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << "|0) >>> (" | |
1588 << getValueAsStr(I->getOperand(1)) << Lane << "|0)|0"; | |
1589 break; | |
1590 case Instruction::Shl: | |
1591 Code << "(" << getValueAsStr(I->getOperand(0)) << Lane << "|0) << (" | |
1592 << getValueAsStr(I->getOperand(1)) << Lane << "|0)|0"; | |
1593 break; | |
1594 default: I->dump(); error("invalid unrolled vector instr"); break; | |
1595 } | |
1596 if (!PreciseF32 && VT->getElementType()->isFloatTy()) { | |
1597 Code << ")"; | |
1598 } | |
1599 } | |
1600 | |
1601 Code << ")"; | |
1602 } | |
1603 | |
1604 bool JSWriter::generateSIMDExpression(const User *I, raw_string_ostream& Code) { | |
1605 VectorType *VT; | |
1606 if ((VT = dyn_cast<VectorType>(I->getType()))) { | |
1607 // vector-producing instructions | |
1608 checkVectorType(VT); | |
1609 | |
1610 switch (Operator::getOpcode(I)) { | |
1611 default: I->dump(); error("invalid vector instr"); break; | |
1612 case Instruction::Call: // return value is just a SIMD value, no special h
andling | |
1613 return false; | |
1614 case Instruction::PHI: // handled separately - we push them back into the
relooper branchings | |
1615 break; | |
1616 case Instruction::ICmp: | |
1617 generateICmpExpression(cast<ICmpInst>(I), Code); | |
1618 break; | |
1619 case Instruction::FCmp: | |
1620 generateFCmpExpression(cast<FCmpInst>(I), Code); | |
1621 break; | |
1622 case Instruction::SExt: | |
1623 assert(cast<VectorType>(I->getOperand(0)->getType())->getElementType()->
isIntegerTy(1) && | |
1624 "sign-extension from vector of other than i1 not yet supported"); | |
1625 // Since we represent vectors of i1 as vectors of sign extended wider in
tegers, | |
1626 // sign extending them is a no-op. | |
1627 Code << getAssignIfNeeded(I) << getValueAsStr(I->getOperand(0)); | |
1628 break; | |
1629 case Instruction::Select: | |
1630 // Since we represent vectors of i1 as vectors of sign extended wider in
tegers, | |
1631 // selecting on them is just an elementwise select. | |
1632 if (isa<VectorType>(I->getOperand(0)->getType())) { | |
1633 if (cast<VectorType>(I->getType())->getElementType()->isIntegerTy()) { | |
1634 Code << getAssignIfNeeded(I) << "SIMD_int32x4_select(" << getValueAs
Str(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) << "," << getVal
ueAsStr(I->getOperand(2)) << ")"; break; | |
1635 } else { | |
1636 Code << getAssignIfNeeded(I) << "SIMD_float32x4_select(" << getValue
AsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) << "," << getV
alueAsStr(I->getOperand(2)) << ")"; break; | |
1637 } | |
1638 return true; | |
1639 } | |
1640 // Otherwise we have a scalar condition, so it's a ?: operator. | |
1641 return false; | |
1642 case Instruction::FAdd: Code << getAssignIfNeeded(I) << "SIMD_float32x4_ad
d(" << getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1))
<< ")"; break; | |
1643 case Instruction::FMul: Code << getAssignIfNeeded(I) << "SIMD_float32x4_mu
l(" << getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1))
<< ")"; break; | |
1644 case Instruction::FDiv: Code << getAssignIfNeeded(I) << "SIMD_float32x4_di
v(" << getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1))
<< ")"; break; | |
1645 case Instruction::Add: Code << getAssignIfNeeded(I) << "SIMD_int32x4_add("
<< getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) <<
")"; break; | |
1646 case Instruction::Sub: Code << getAssignIfNeeded(I) << "SIMD_int32x4_sub("
<< getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) <<
")"; break; | |
1647 case Instruction::Mul: Code << getAssignIfNeeded(I) << "SIMD_int32x4_mul("
<< getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) <<
")"; break; | |
1648 case Instruction::And: Code << getAssignIfNeeded(I) << "SIMD_int32x4_and("
<< getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) <<
")"; break; | |
1649 case Instruction::Or: Code << getAssignIfNeeded(I) << "SIMD_int32x4_or("
<< getValueAsStr(I->getOperand(0)) << "," << getValueAsStr(I->getOperand(1)) <<
")"; break; | |
1650 case Instruction::Xor: | |
1651 // LLVM represents a not(x) as -1 ^ x | |
1652 Code << getAssignIfNeeded(I); | |
1653 if (BinaryOperator::isNot(I)) { | |
1654 Code << "SIMD_int32x4_not(" << getValueAsStr(BinaryOperator::getNotArg
ument(I)) << ")"; break; | |
1655 } else { | |
1656 Code << "SIMD_int32x4_xor(" << getValueAsStr(I->getOperand(0)) << ","
<< getValueAsStr(I->getOperand(1)) << ")"; break; | |
1657 } | |
1658 break; | |
1659 case Instruction::FSub: | |
1660 // LLVM represents an fneg(x) as -0.0 - x. | |
1661 Code << getAssignIfNeeded(I); | |
1662 if (BinaryOperator::isFNeg(I)) { | |
1663 Code << "SIMD_float32x4_neg(" << getValueAsStr(BinaryOperator::getFNeg
Argument(I)) << ")"; | |
1664 } else { | |
1665 Code << "SIMD_float32x4_sub(" << getValueAsStr(I->getOperand(0)) << ",
" << getValueAsStr(I->getOperand(1)) << ")"; | |
1666 } | |
1667 break; | |
1668 case Instruction::BitCast: { | |
1669 Code << getAssignIfNeeded(I); | |
1670 if (cast<VectorType>(I->getType())->getElementType()->isIntegerTy()) { | |
1671 Code << "SIMD_int32x4_fromFloat32x4Bits(" << getValueAsStr(I->getOpera
nd(0)) << ')'; | |
1672 } else { | |
1673 Code << "SIMD_float32x4_fromInt32x4Bits(" << getValueAsStr(I->getOpera
nd(0)) << ')'; | |
1674 } | |
1675 break; | |
1676 } | |
1677 case Instruction::Load: { | |
1678 const LoadInst *LI = cast<LoadInst>(I); | |
1679 const Value *P = LI->getPointerOperand(); | |
1680 std::string PS = getValueAsStr(P); | |
1681 | |
1682 // Determine if this is a partial load. | |
1683 static const std::string partialAccess[4] = { "X", "XY", "XYZ", "" }; | |
1684 if (VT->getNumElements() < 1 || VT->getNumElements() > 4) { | |
1685 error("invalid number of lanes in SIMD operation!"); | |
1686 break; | |
1687 } | |
1688 const std::string &Part = partialAccess[VT->getNumElements() - 1]; | |
1689 | |
1690 Code << getAssignIfNeeded(I); | |
1691 if (VT->getElementType()->isIntegerTy()) { | |
1692 Code << "SIMD_int32x4_load" << Part << "(HEAPU8, " << PS << ")"; | |
1693 } else { | |
1694 Code << "SIMD_float32x4_load" << Part << "(HEAPU8, " << PS << ")"; | |
1695 } | |
1696 break; | |
1697 } | |
1698 case Instruction::InsertElement: | |
1699 generateInsertElementExpression(cast<InsertElementInst>(I), Code); | |
1700 break; | |
1701 case Instruction::ShuffleVector: | |
1702 generateShuffleVectorExpression(cast<ShuffleVectorInst>(I), Code); | |
1703 break; | |
1704 case Instruction::SDiv: | |
1705 case Instruction::UDiv: | |
1706 case Instruction::SRem: | |
1707 case Instruction::URem: | |
1708 // The SIMD API does not currently support these operations directly. | |
1709 // Emulate them using scalar operations (which is essentially the same | |
1710 // as what would happen if the API did support them, since hardware | |
1711 // doesn't support them). | |
1712 generateUnrolledExpression(I, Code); | |
1713 break; | |
1714 case Instruction::AShr: | |
1715 case Instruction::LShr: | |
1716 case Instruction::Shl: | |
1717 generateShiftExpression(cast<BinaryOperator>(I), Code); | |
1718 break; | |
1719 } | |
1720 return true; | |
1721 } else { | |
1722 // vector-consuming instructions | |
1723 if (Operator::getOpcode(I) == Instruction::Store && (VT = dyn_cast<VectorTyp
e>(I->getOperand(0)->getType())) && VT->isVectorTy()) { | |
1724 checkVectorType(VT); | |
1725 const StoreInst *SI = cast<StoreInst>(I); | |
1726 const Value *P = SI->getPointerOperand(); | |
1727 std::string PS = getOpName(P); | |
1728 std::string VS = getValueAsStr(SI->getValueOperand()); | |
1729 Code << getAdHocAssign(PS, P->getType()) << getValueAsStr(P) << ';'; | |
1730 | |
1731 // Determine if this is a partial store. | |
1732 static const std::string partialAccess[4] = { "X", "XY", "XYZ", "" }; | |
1733 if (VT->getNumElements() < 1 || VT->getNumElements() > 4) { | |
1734 error("invalid number of lanes in SIMD operation!"); | |
1735 return false; | |
1736 } | |
1737 const std::string &Part = partialAccess[VT->getNumElements() - 1]; | |
1738 | |
1739 if (VT->getElementType()->isIntegerTy()) { | |
1740 Code << "SIMD_int32x4_store" << Part << "(HEAPU8, " << PS << ", " << VS
<< ")"; | |
1741 } else { | |
1742 Code << "SIMD_float32x4_store" << Part << "(HEAPU8, " << PS << ", " << V
S << ")"; | |
1743 } | |
1744 return true; | |
1745 } else if (Operator::getOpcode(I) == Instruction::ExtractElement) { | |
1746 generateExtractElementExpression(cast<ExtractElementInst>(I), Code); | |
1747 return true; | |
1748 } | |
1749 } | |
1750 return false; | |
1751 } | |
1752 | |
1753 static uint64_t LSBMask(unsigned numBits) { | |
1754 return numBits >= 64 ? 0xFFFFFFFFFFFFFFFFULL : (1ULL << numBits) - 1; | |
1755 } | |
1756 | |
1757 // Generate code for and operator, either an Instruction or a ConstantExpr. | |
1758 void JSWriter::generateExpression(const User *I, raw_string_ostream& Code) { | |
1759 // To avoid emiting code and variables for the no-op pointer bitcasts | |
1760 // and all-zero-index geps that LLVM needs to satisfy its type system, we | |
1761 // call stripPointerCasts() on all values before translating them. This | |
1762 // includes bitcasts whose only use is lifetime marker intrinsics. | |
1763 assert(I == I->stripPointerCasts()); | |
1764 | |
1765 Type *T = I->getType(); | |
1766 if (T->isIntegerTy() && T->getIntegerBitWidth() > 32) { | |
1767 errs() << *I << "\n"; | |
1768 report_fatal_error("legalization problem"); | |
1769 } | |
1770 | |
1771 if (!generateSIMDExpression(I, Code)) switch (Operator::getOpcode(I)) { | |
1772 default: { | |
1773 I->dump(); | |
1774 error("Invalid instruction"); | |
1775 break; | |
1776 } | |
1777 case Instruction::Ret: { | |
1778 const ReturnInst* ret = cast<ReturnInst>(I); | |
1779 const Value *RV = ret->getReturnValue(); | |
1780 if (StackBumped) { | |
1781 Code << "STACKTOP = sp;"; | |
1782 } | |
1783 Code << "return"; | |
1784 if (RV != NULL) { | |
1785 Code << " " << getValueAsCastParenStr(RV, ASM_NONSPECIFIC | ASM_MUST_CAST)
; | |
1786 } | |
1787 break; | |
1788 } | |
1789 case Instruction::Br: | |
1790 case Instruction::IndirectBr: | |
1791 case Instruction::Switch: return; // handled while relooping | |
1792 case Instruction::Unreachable: { | |
1793 // Typically there should be an abort right before these, so we don't emit a
ny code // TODO: when ASSERTIONS are on, emit abort(0) | |
1794 Code << "// unreachable"; | |
1795 break; | |
1796 } | |
1797 case Instruction::Add: | |
1798 case Instruction::FAdd: | |
1799 case Instruction::Sub: | |
1800 case Instruction::FSub: | |
1801 case Instruction::Mul: | |
1802 case Instruction::FMul: | |
1803 case Instruction::UDiv: | |
1804 case Instruction::SDiv: | |
1805 case Instruction::FDiv: | |
1806 case Instruction::URem: | |
1807 case Instruction::SRem: | |
1808 case Instruction::FRem: | |
1809 case Instruction::And: | |
1810 case Instruction::Or: | |
1811 case Instruction::Xor: | |
1812 case Instruction::Shl: | |
1813 case Instruction::LShr: | |
1814 case Instruction::AShr:{ | |
1815 Code << getAssignIfNeeded(I); | |
1816 unsigned opcode = Operator::getOpcode(I); | |
1817 switch (opcode) { | |
1818 case Instruction::Add: Code << getParenCast( | |
1819 getValueAsParenStr(I->getOperand(0)) + | |
1820 " + " + | |
1821 getValueAsParenStr(I->getOperand(1)), | |
1822 I->getType() | |
1823 ); break; | |
1824 case Instruction::Sub: Code << getParenCast( | |
1825 getValueAsParenStr(I->getOperand(0)) + | |
1826 " - " + | |
1827 getValueAsParenStr(I->getOperand(1)), | |
1828 I->getType() | |
1829 ); break; | |
1830 case Instruction::Mul: Code << getIMul(I->getOperand(0), I->getOperand(1)
); break; | |
1831 case Instruction::UDiv: | |
1832 case Instruction::SDiv: | |
1833 case Instruction::URem: | |
1834 case Instruction::SRem: Code << "(" << | |
1835 getValueAsCastParenStr(I->getOperand(0), (
opcode == Instruction::SDiv || opcode == Instruction::SRem) ? ASM_SIGNED : ASM_U
NSIGNED) << | |
1836 ((opcode == Instruction::UDiv || opcode ==
Instruction::SDiv) ? " / " : " % ") << | |
1837 getValueAsCastParenStr(I->getOperand(1), (
opcode == Instruction::SDiv || opcode == Instruction::SRem) ? ASM_SIGNED : ASM_U
NSIGNED) << | |
1838 ")&-1"; break; | |
1839 case Instruction::And: Code << getValueAsStr(I->getOperand(0)) << " & " <
< getValueAsStr(I->getOperand(1)); break; | |
1840 case Instruction::Or: Code << getValueAsStr(I->getOperand(0)) << " | " <
< getValueAsStr(I->getOperand(1)); break; | |
1841 case Instruction::Xor: Code << getValueAsStr(I->getOperand(0)) << " ^ " <
< getValueAsStr(I->getOperand(1)); break; | |
1842 case Instruction::Shl: { | |
1843 std::string Shifted = getValueAsStr(I->getOperand(0)) + " << " + getVal
ueAsStr(I->getOperand(1)); | |
1844 if (I->getType()->getIntegerBitWidth() < 32) { | |
1845 Shifted = getParenCast(Shifted, I->getType(), ASM_UNSIGNED); // remove
bits that are shifted beyond the size of this value | |
1846 } | |
1847 Code << Shifted; | |
1848 break; | |
1849 } | |
1850 case Instruction::AShr: | |
1851 case Instruction::LShr: { | |
1852 std::string Input = getValueAsStr(I->getOperand(0)); | |
1853 if (I->getType()->getIntegerBitWidth() < 32) { | |
1854 Input = '(' + getCast(Input, I->getType(), opcode == Instruction::AShr
? ASM_SIGNED : ASM_UNSIGNED) + ')'; // fill in high bits, as shift needs those
and is done in 32-bit | |
1855 } | |
1856 Code << Input << (opcode == Instruction::AShr ? " >> " : " >>> ") << ge
tValueAsStr(I->getOperand(1)); | |
1857 break; | |
1858 } | |
1859 | |
1860 case Instruction::FAdd: Code << ensureFloat(getValueAsStr(I->getOperand(0)
) + " + " + getValueAsStr(I->getOperand(1)), I->getType()); break; | |
1861 case Instruction::FMul: Code << ensureFloat(getValueAsStr(I->getOperand(0)
) + " * " + getValueAsStr(I->getOperand(1)), I->getType()); break; | |
1862 case Instruction::FDiv: Code << ensureFloat(getValueAsStr(I->getOperand(0)
) + " / " + getValueAsStr(I->getOperand(1)), I->getType()); break; | |
1863 case Instruction::FRem: Code << ensureFloat(getValueAsStr(I->getOperand(0)
) + " % " + getValueAsStr(I->getOperand(1)), I->getType()); break; | |
1864 case Instruction::FSub: | |
1865 // LLVM represents an fneg(x) as -0.0 - x. | |
1866 if (BinaryOperator::isFNeg(I)) { | |
1867 Code << ensureFloat("-" + getValueAsStr(BinaryOperator::getFNegArgumen
t(I)), I->getType()); | |
1868 } else { | |
1869 Code << ensureFloat(getValueAsStr(I->getOperand(0)) + " - " + getValue
AsStr(I->getOperand(1)), I->getType()); | |
1870 } | |
1871 break; | |
1872 default: error("bad binary opcode"); break; | |
1873 } | |
1874 break; | |
1875 } | |
1876 case Instruction::FCmp: { | |
1877 Code << getAssignIfNeeded(I); | |
1878 switch (cast<FCmpInst>(I)->getPredicate()) { | |
1879 // Comparisons which are simple JS operators. | |
1880 case FCmpInst::FCMP_OEQ: Code << getValueAsStr(I->getOperand(0)) << " ==
" << getValueAsStr(I->getOperand(1)); break; | |
1881 case FCmpInst::FCMP_UNE: Code << getValueAsStr(I->getOperand(0)) << " !=
" << getValueAsStr(I->getOperand(1)); break; | |
1882 case FCmpInst::FCMP_OGT: Code << getValueAsStr(I->getOperand(0)) << " >
" << getValueAsStr(I->getOperand(1)); break; | |
1883 case FCmpInst::FCMP_OGE: Code << getValueAsStr(I->getOperand(0)) << " >=
" << getValueAsStr(I->getOperand(1)); break; | |
1884 case FCmpInst::FCMP_OLT: Code << getValueAsStr(I->getOperand(0)) << " <
" << getValueAsStr(I->getOperand(1)); break; | |
1885 case FCmpInst::FCMP_OLE: Code << getValueAsStr(I->getOperand(0)) << " <=
" << getValueAsStr(I->getOperand(1)); break; | |
1886 | |
1887 // Comparisons which are inverses of JS operators. | |
1888 case FCmpInst::FCMP_UGT: | |
1889 Code << "!(" << getValueAsStr(I->getOperand(0)) << " <= " << getValueAsS
tr(I->getOperand(1)) << ")"; | |
1890 break; | |
1891 case FCmpInst::FCMP_UGE: | |
1892 Code << "!(" << getValueAsStr(I->getOperand(0)) << " < " << getValueAsS
tr(I->getOperand(1)) << ")"; | |
1893 break; | |
1894 case FCmpInst::FCMP_ULT: | |
1895 Code << "!(" << getValueAsStr(I->getOperand(0)) << " >= " << getValueAsS
tr(I->getOperand(1)) << ")"; | |
1896 break; | |
1897 case FCmpInst::FCMP_ULE: | |
1898 Code << "!(" << getValueAsStr(I->getOperand(0)) << " > " << getValueAsS
tr(I->getOperand(1)) << ")"; | |
1899 break; | |
1900 | |
1901 // Comparisons which require explicit NaN checks. | |
1902 case FCmpInst::FCMP_UEQ: | |
1903 Code << "(" << getValueAsStr(I->getOperand(0)) << " != " << getValueAsSt
r(I->getOperand(0)) << ") | " << | |
1904 "(" << getValueAsStr(I->getOperand(1)) << " != " << getValueAsSt
r(I->getOperand(1)) << ") |" << | |
1905 "(" << getValueAsStr(I->getOperand(0)) << " == " << getValueAsSt
r(I->getOperand(1)) << ")"; | |
1906 break; | |
1907 case FCmpInst::FCMP_ONE: | |
1908 Code << "(" << getValueAsStr(I->getOperand(0)) << " == " << getValueAsSt
r(I->getOperand(0)) << ") & " << | |
1909 "(" << getValueAsStr(I->getOperand(1)) << " == " << getValueAsSt
r(I->getOperand(1)) << ") &" << | |
1910 "(" << getValueAsStr(I->getOperand(0)) << " != " << getValueAsSt
r(I->getOperand(1)) << ")"; | |
1911 break; | |
1912 | |
1913 // Simple NaN checks. | |
1914 case FCmpInst::FCMP_ORD: Code << "(" << getValueAsStr(I->getOperand(0))
<< " == " << getValueAsStr(I->getOperand(0)) << ") & " << | |
1915 "(" << getValueAsStr(I->getOperand(1))
<< " == " << getValueAsStr(I->getOperand(1)) << ")"; break; | |
1916 case FCmpInst::FCMP_UNO: Code << "(" << getValueAsStr(I->getOperand(0))
<< " != " << getValueAsStr(I->getOperand(0)) << ") | " << | |
1917 "(" << getValueAsStr(I->getOperand(1))
<< " != " << getValueAsStr(I->getOperand(1)) << ")"; break; | |
1918 | |
1919 // Simple constants. | |
1920 case FCmpInst::FCMP_FALSE: Code << "0"; break; | |
1921 case FCmpInst::FCMP_TRUE : Code << "1"; break; | |
1922 | |
1923 default: error("bad fcmp"); break; | |
1924 } | |
1925 break; | |
1926 } | |
1927 case Instruction::ICmp: { | |
1928 unsigned predicate = isa<ConstantExpr>(I) ? | |
1929 cast<ConstantExpr>(I)->getPredicate() : | |
1930 cast<ICmpInst>(I)->getPredicate(); | |
1931 AsmCast sign = CmpInst::isUnsigned(predicate) ? ASM_UNSIGNED : ASM_SIGNED; | |
1932 Code << getAssignIfNeeded(I) << "(" << | |
1933 getValueAsCastStr(I->getOperand(0), sign) << | |
1934 ")"; | |
1935 switch (predicate) { | |
1936 case ICmpInst::ICMP_EQ: Code << "=="; break; | |
1937 case ICmpInst::ICMP_NE: Code << "!="; break; | |
1938 case ICmpInst::ICMP_ULE: Code << "<="; break; | |
1939 case ICmpInst::ICMP_SLE: Code << "<="; break; | |
1940 case ICmpInst::ICMP_UGE: Code << ">="; break; | |
1941 case ICmpInst::ICMP_SGE: Code << ">="; break; | |
1942 case ICmpInst::ICMP_ULT: Code << "<"; break; | |
1943 case ICmpInst::ICMP_SLT: Code << "<"; break; | |
1944 case ICmpInst::ICMP_UGT: Code << ">"; break; | |
1945 case ICmpInst::ICMP_SGT: Code << ">"; break; | |
1946 default: llvm_unreachable("Invalid ICmp predicate"); | |
1947 } | |
1948 Code << "(" << | |
1949 getValueAsCastStr(I->getOperand(1), sign) << | |
1950 ")"; | |
1951 break; | |
1952 } | |
1953 case Instruction::Alloca: { | |
1954 const AllocaInst* AI = cast<AllocaInst>(I); | |
1955 | |
1956 // We've done an alloca, so we'll have bumped the stack and will | |
1957 // need to restore it. | |
1958 // Yes, we shouldn't have to bump it for nativized vars, however | |
1959 // they are included in the frame offset, so the restore is still | |
1960 // needed until that is fixed. | |
1961 StackBumped = true; | |
1962 | |
1963 if (NativizedVars.count(AI)) { | |
1964 // nativized stack variable, we just need a 'var' definition | |
1965 UsedVars[getJSName(AI)] = AI->getType()->getElementType(); | |
1966 return; | |
1967 } | |
1968 | |
1969 // Fixed-size entry-block allocations are allocated all at once in the | |
1970 // function prologue. | |
1971 if (AI->isStaticAlloca()) { | |
1972 uint64_t Offset; | |
1973 if (Allocas.getFrameOffset(AI, &Offset)) { | |
1974 Code << getAssign(AI); | |
1975 if (Allocas.getMaxAlignment() <= STACK_ALIGN) { | |
1976 Code << "sp"; | |
1977 } else { | |
1978 Code << "sp_a"; // aligned base of stack is different, use that | |
1979 } | |
1980 if (Offset != 0) { | |
1981 Code << " + " << Offset << "|0"; | |
1982 } | |
1983 break; | |
1984 } | |
1985 // Otherwise, this alloca is being represented by another alloca, so | |
1986 // there's nothing to print. | |
1987 return; | |
1988 } | |
1989 | |
1990 assert(AI->getAlignment() <= STACK_ALIGN); // TODO | |
1991 | |
1992 Type *T = AI->getAllocatedType(); | |
1993 std::string Size; | |
1994 uint64_t BaseSize = DL->getTypeAllocSize(T); | |
1995 const Value *AS = AI->getArraySize(); | |
1996 if (const ConstantInt *CI = dyn_cast<ConstantInt>(AS)) { | |
1997 Size = Twine(stackAlign(BaseSize * CI->getZExtValue())).str(); | |
1998 } else { | |
1999 Size = stackAlignStr("((" + utostr(BaseSize) + '*' + getValueAsStr(AS) + "
)|0)"); | |
2000 } | |
2001 Code << getAssign(AI) << "STACKTOP; " << getStackBump(Size); | |
2002 break; | |
2003 } | |
2004 case Instruction::Load: { | |
2005 const LoadInst *LI = cast<LoadInst>(I); | |
2006 const Value *P = LI->getPointerOperand(); | |
2007 unsigned Alignment = LI->getAlignment(); | |
2008 if (NativizedVars.count(P)) { | |
2009 Code << getAssign(LI) << getValueAsStr(P); | |
2010 } else { | |
2011 Code << getLoad(LI, P, LI->getType(), Alignment); | |
2012 } | |
2013 break; | |
2014 } | |
2015 case Instruction::Store: { | |
2016 const StoreInst *SI = cast<StoreInst>(I); | |
2017 const Value *P = SI->getPointerOperand(); | |
2018 const Value *V = SI->getValueOperand(); | |
2019 unsigned Alignment = SI->getAlignment(); | |
2020 std::string VS = getValueAsStr(V); | |
2021 if (NativizedVars.count(P)) { | |
2022 Code << getValueAsStr(P) << " = " << VS; | |
2023 } else { | |
2024 Code << getStore(SI, P, V->getType(), VS, Alignment); | |
2025 } | |
2026 | |
2027 Type *T = V->getType(); | |
2028 if (T->isIntegerTy() && T->getIntegerBitWidth() > 32) { | |
2029 errs() << *I << "\n"; | |
2030 report_fatal_error("legalization problem"); | |
2031 } | |
2032 break; | |
2033 } | |
2034 case Instruction::GetElementPtr: { | |
2035 Code << getAssignIfNeeded(I); | |
2036 const GEPOperator *GEP = cast<GEPOperator>(I); | |
2037 gep_type_iterator GTI = gep_type_begin(GEP); | |
2038 int32_t ConstantOffset = 0; | |
2039 std::string text = getValueAsParenStr(GEP->getPointerOperand()); | |
2040 | |
2041 GetElementPtrInst::const_op_iterator I = GEP->op_begin(); | |
2042 I++; | |
2043 for (GetElementPtrInst::const_op_iterator E = GEP->op_end(); | |
2044 I != E; ++I) { | |
2045 const Value *Index = *I; | |
2046 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { | |
2047 // For a struct, add the member offset. | |
2048 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); | |
2049 uint32_t Offset = DL->getStructLayout(STy)->getElementOffset(FieldNo); | |
2050 ConstantOffset = (uint32_t)ConstantOffset + Offset; | |
2051 } else { | |
2052 // For an array, add the element offset, explicitly scaled. | |
2053 uint32_t ElementSize = DL->getTypeAllocSize(*GTI); | |
2054 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Index)) { | |
2055 ConstantOffset = (uint32_t)ConstantOffset + (uint32_t)CI->getSExtValue
() * ElementSize; | |
2056 } else { | |
2057 text = "(" + text + " + (" + getIMul(Index, ConstantInt::get(Type::get
Int32Ty(GEP->getContext()), ElementSize)) + ")|0)"; | |
2058 } | |
2059 } | |
2060 } | |
2061 if (ConstantOffset != 0) { | |
2062 text = "(" + text + " + " + itostr(ConstantOffset) + "|0)"; | |
2063 } | |
2064 Code << text; | |
2065 break; | |
2066 } | |
2067 case Instruction::PHI: { | |
2068 // handled separately - we push them back into the relooper branchings | |
2069 return; | |
2070 } | |
2071 case Instruction::PtrToInt: | |
2072 case Instruction::IntToPtr: | |
2073 Code << getAssignIfNeeded(I) << getValueAsStr(I->getOperand(0)); | |
2074 break; | |
2075 case Instruction::Trunc: | |
2076 case Instruction::ZExt: | |
2077 case Instruction::SExt: | |
2078 case Instruction::FPTrunc: | |
2079 case Instruction::FPExt: | |
2080 case Instruction::FPToUI: | |
2081 case Instruction::FPToSI: | |
2082 case Instruction::UIToFP: | |
2083 case Instruction::SIToFP: { | |
2084 Code << getAssignIfNeeded(I); | |
2085 switch (Operator::getOpcode(I)) { | |
2086 case Instruction::Trunc: { | |
2087 //unsigned inBits = V->getType()->getIntegerBitWidth(); | |
2088 unsigned outBits = I->getType()->getIntegerBitWidth(); | |
2089 Code << getValueAsStr(I->getOperand(0)) << "&" << utostr(LSBMask(outBits))
; | |
2090 break; | |
2091 } | |
2092 case Instruction::SExt: { | |
2093 std::string bits = utostr(32 - I->getOperand(0)->getType()->getIntegerBitW
idth()); | |
2094 Code << getValueAsStr(I->getOperand(0)) << " << " << bits << " >> " << bit
s; | |
2095 break; | |
2096 } | |
2097 case Instruction::ZExt: { | |
2098 Code << getValueAsCastStr(I->getOperand(0), ASM_UNSIGNED); | |
2099 break; | |
2100 } | |
2101 case Instruction::FPExt: { | |
2102 if (PreciseF32) { | |
2103 Code << "+" << getValueAsStr(I->getOperand(0)); break; | |
2104 } else { | |
2105 Code << getValueAsStr(I->getOperand(0)); break; | |
2106 } | |
2107 break; | |
2108 } | |
2109 case Instruction::FPTrunc: { | |
2110 Code << ensureFloat(getValueAsStr(I->getOperand(0)), I->getType()); | |
2111 break; | |
2112 } | |
2113 case Instruction::SIToFP: Code << '(' << getCast(getValueAsCastParenStr(I-
>getOperand(0), ASM_SIGNED), I->getType()) << ')'; break; | |
2114 case Instruction::UIToFP: Code << '(' << getCast(getValueAsCastParenStr(I-
>getOperand(0), ASM_UNSIGNED), I->getType()) << ')'; break; | |
2115 case Instruction::FPToSI: Code << '(' << getDoubleToInt(getValueAsParenStr
(I->getOperand(0))) << ')'; break; | |
2116 case Instruction::FPToUI: Code << '(' << getCast(getDoubleToInt(getValueAs
ParenStr(I->getOperand(0))), I->getType(), ASM_UNSIGNED) << ')'; break; | |
2117 case Instruction::PtrToInt: Code << '(' << getValueAsStr(I->getOperand(0)) <
< ')'; break; | |
2118 case Instruction::IntToPtr: Code << '(' << getValueAsStr(I->getOperand(0)) <
< ')'; break; | |
2119 default: llvm_unreachable("Unreachable"); | |
2120 } | |
2121 break; | |
2122 } | |
2123 case Instruction::BitCast: { | |
2124 Code << getAssignIfNeeded(I); | |
2125 // Most bitcasts are no-ops for us. However, the exception is int to float a
nd float to int | |
2126 Type *InType = I->getOperand(0)->getType(); | |
2127 Type *OutType = I->getType(); | |
2128 std::string V = getValueAsStr(I->getOperand(0)); | |
2129 if (InType->isIntegerTy() && OutType->isFloatingPointTy()) { | |
2130 assert(InType->getIntegerBitWidth() == 32); | |
2131 Code << "(HEAP32[tempDoublePtr>>2]=" << V << "," << getCast("HEAPF32[tempD
oublePtr>>2]", Type::getFloatTy(TheModule->getContext())) << ")"; | |
2132 } else if (OutType->isIntegerTy() && InType->isFloatingPointTy()) { | |
2133 assert(OutType->getIntegerBitWidth() == 32); | |
2134 Code << "(HEAPF32[tempDoublePtr>>2]=" << V << "," "HEAP32[tempDoublePtr>>2
]|0)"; | |
2135 } else { | |
2136 Code << V; | |
2137 } | |
2138 break; | |
2139 } | |
2140 case Instruction::Call: { | |
2141 const CallInst *CI = cast<CallInst>(I); | |
2142 std::string Call = handleCall(CI); | |
2143 if (Call.empty()) return; | |
2144 Code << Call; | |
2145 break; | |
2146 } | |
2147 case Instruction::Select: { | |
2148 Code << getAssignIfNeeded(I) << getValueAsStr(I->getOperand(0)) << " ? " << | |
2149 getValueAsStr(I->getOperand(1)) << " : " << | |
2150 getValueAsStr(I->getOperand(2)); | |
2151 break; | |
2152 } | |
2153 case Instruction::AtomicRMW: { | |
2154 const AtomicRMWInst *rmwi = cast<AtomicRMWInst>(I); | |
2155 const Value *P = rmwi->getOperand(0); | |
2156 const Value *V = rmwi->getOperand(1); | |
2157 std::string VS = getValueAsStr(V); | |
2158 Code << getLoad(rmwi, P, I->getType(), 0) << ';'; | |
2159 // Most bitcasts are no-ops for us. However, the exception is int to float a
nd float to int | |
2160 switch (rmwi->getOperation()) { | |
2161 case AtomicRMWInst::Xchg: Code << getStore(rmwi, P, I->getType(), VS, 0);
break; | |
2162 case AtomicRMWInst::Add: Code << getStore(rmwi, P, I->getType(), "((" + g
etJSName(I) + '+' + VS + ")|0)", 0); break; | |
2163 case AtomicRMWInst::Sub: Code << getStore(rmwi, P, I->getType(), "((" + g
etJSName(I) + '-' + VS + ")|0)", 0); break; | |
2164 case AtomicRMWInst::And: Code << getStore(rmwi, P, I->getType(), "(" + ge
tJSName(I) + '&' + VS + ")", 0); break; | |
2165 case AtomicRMWInst::Nand: Code << getStore(rmwi, P, I->getType(), "(~(" +
getJSName(I) + '&' + VS + "))", 0); break; | |
2166 case AtomicRMWInst::Or: Code << getStore(rmwi, P, I->getType(), "(" + ge
tJSName(I) + '|' + VS + ")", 0); break; | |
2167 case AtomicRMWInst::Xor: Code << getStore(rmwi, P, I->getType(), "(" + ge
tJSName(I) + '^' + VS + ")", 0); break; | |
2168 case AtomicRMWInst::Max: | |
2169 case AtomicRMWInst::Min: | |
2170 case AtomicRMWInst::UMax: | |
2171 case AtomicRMWInst::UMin: | |
2172 case AtomicRMWInst::BAD_BINOP: llvm_unreachable("Bad atomic operation"); | |
2173 } | |
2174 break; | |
2175 } | |
2176 case Instruction::Fence: // no threads, so nothing to do here | |
2177 Code << "/* fence */"; | |
2178 break; | |
2179 } | |
2180 | |
2181 if (const Instruction *Inst = dyn_cast<Instruction>(I)) { | |
2182 Code << ';'; | |
2183 // append debug info | |
2184 emitDebugInfo(Code, Inst); | |
2185 Code << '\n'; | |
2186 } | |
2187 } | |
2188 | |
2189 // Checks whether to use a condition variable. We do so for switches and for ind
irectbrs | |
2190 static const Value *considerConditionVar(const Instruction *I) { | |
2191 if (const IndirectBrInst *IB = dyn_cast<const IndirectBrInst>(I)) { | |
2192 return IB->getAddress(); | |
2193 } | |
2194 const SwitchInst *SI = dyn_cast<SwitchInst>(I); | |
2195 if (!SI) return NULL; | |
2196 // use a switch if the range is not too big or sparse | |
2197 int64_t Minn = INT64_MAX, Maxx = INT64_MIN; | |
2198 for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end(); i != e;
++i) { | |
2199 int64_t Curr = i.getCaseValue()->getSExtValue(); | |
2200 if (Curr < Minn) Minn = Curr; | |
2201 if (Curr > Maxx) Maxx = Curr; | |
2202 } | |
2203 int64_t Range = Maxx - Minn; | |
2204 int Num = SI->getNumCases(); | |
2205 return Num < 5 || Range > 10*1024 || (Range/Num) > 1024 ? NULL : SI->getCondit
ion(); // heuristics | |
2206 } | |
2207 | |
2208 void JSWriter::addBlock(const BasicBlock *BB, Relooper& R, LLVMToRelooperMap& LL
VMToRelooper) { | |
2209 std::string Code; | |
2210 raw_string_ostream CodeStream(Code); | |
2211 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); | |
2212 I != E; ++I) { | |
2213 if (I->stripPointerCasts() == I) { | |
2214 generateExpression(I, CodeStream); | |
2215 } | |
2216 } | |
2217 CodeStream.flush(); | |
2218 const Value* Condition = considerConditionVar(BB->getTerminator()); | |
2219 Block *Curr = new Block(Code.c_str(), Condition ? getValueAsCastStr(Condition)
.c_str() : NULL); | |
2220 LLVMToRelooper[BB] = Curr; | |
2221 R.AddBlock(Curr); | |
2222 } | |
2223 | |
2224 void JSWriter::printFunctionBody(const Function *F) { | |
2225 assert(!F->isDeclaration()); | |
2226 | |
2227 // Prepare relooper | |
2228 Relooper::MakeOutputBuffer(1024*1024); | |
2229 Relooper R; | |
2230 //if (!canReloop(F)) R.SetEmulate(true); | |
2231 if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::Mi
nSize) || | |
2232 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::Op
timizeForSize)) { | |
2233 R.SetMinSize(true); | |
2234 } | |
2235 R.SetAsmJSMode(1); | |
2236 Block *Entry = NULL; | |
2237 LLVMToRelooperMap LLVMToRelooper; | |
2238 | |
2239 // Create relooper blocks with their contents. TODO: We could optimize | |
2240 // indirectbr by emitting indexed blocks first, so their indexes | |
2241 // match up with the label index. | |
2242 for (Function::const_iterator BI = F->begin(), BE = F->end(); | |
2243 BI != BE; ++BI) { | |
2244 InvokeState = 0; // each basic block begins in state 0; the previous may not
have cleared it, if e.g. it had a throw in the middle and the rest of it was de
capitated | |
2245 addBlock(BI, R, LLVMToRelooper); | |
2246 if (!Entry) Entry = LLVMToRelooper[BI]; | |
2247 } | |
2248 assert(Entry); | |
2249 | |
2250 // Create branchings | |
2251 for (Function::const_iterator BI = F->begin(), BE = F->end(); | |
2252 BI != BE; ++BI) { | |
2253 const TerminatorInst *TI = BI->getTerminator(); | |
2254 switch (TI->getOpcode()) { | |
2255 default: { | |
2256 report_fatal_error("invalid branch instr " + Twine(TI->getOpcodeName()))
; | |
2257 break; | |
2258 } | |
2259 case Instruction::Br: { | |
2260 const BranchInst* br = cast<BranchInst>(TI); | |
2261 if (br->getNumOperands() == 3) { | |
2262 BasicBlock *S0 = br->getSuccessor(0); | |
2263 BasicBlock *S1 = br->getSuccessor(1); | |
2264 std::string P0 = getPhiCode(&*BI, S0); | |
2265 std::string P1 = getPhiCode(&*BI, S1); | |
2266 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*S0], getValueAsStr(
TI->getOperand(0)).c_str(), P0.size() > 0 ? P0.c_str() : NULL); | |
2267 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*S1], NULL,
P1.size() > 0 ? P1.c_str() : NULL); | |
2268 } else if (br->getNumOperands() == 1) { | |
2269 BasicBlock *S = br->getSuccessor(0); | |
2270 std::string P = getPhiCode(&*BI, S); | |
2271 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*S], NULL, P.size()
> 0 ? P.c_str() : NULL); | |
2272 } else { | |
2273 error("Branch with 2 operands?"); | |
2274 } | |
2275 break; | |
2276 } | |
2277 case Instruction::IndirectBr: { | |
2278 const IndirectBrInst* br = cast<IndirectBrInst>(TI); | |
2279 unsigned Num = br->getNumDestinations(); | |
2280 std::set<const BasicBlock*> Seen; // sadly llvm allows the same block to
appear multiple times | |
2281 bool SetDefault = false; // pick the first and make it the default, llvm
gives no reasonable default here | |
2282 for (unsigned i = 0; i < Num; i++) { | |
2283 const BasicBlock *S = br->getDestination(i); | |
2284 if (Seen.find(S) != Seen.end()) continue; | |
2285 Seen.insert(S); | |
2286 std::string P = getPhiCode(&*BI, S); | |
2287 std::string Target; | |
2288 if (!SetDefault) { | |
2289 SetDefault = true; | |
2290 } else { | |
2291 Target = "case " + utostr(getBlockAddress(F, S)) + ": "; | |
2292 } | |
2293 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*S], Target.size() >
0 ? Target.c_str() : NULL, P.size() > 0 ? P.c_str() : NULL); | |
2294 } | |
2295 break; | |
2296 } | |
2297 case Instruction::Switch: { | |
2298 const SwitchInst* SI = cast<SwitchInst>(TI); | |
2299 bool UseSwitch = !!considerConditionVar(SI); | |
2300 BasicBlock *DD = SI->getDefaultDest(); | |
2301 std::string P = getPhiCode(&*BI, DD); | |
2302 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*DD], NULL, P.size() >
0 ? P.c_str() : NULL); | |
2303 typedef std::map<const BasicBlock*, std::string> BlockCondMap; | |
2304 BlockCondMap BlocksToConditions; | |
2305 for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end(); i
!= e; ++i) { | |
2306 const BasicBlock *BB = i.getCaseSuccessor(); | |
2307 std::string Curr = i.getCaseValue()->getValue().toString(10, true); | |
2308 std::string Condition; | |
2309 if (UseSwitch) { | |
2310 Condition = "case " + Curr + ": "; | |
2311 } else { | |
2312 Condition = "(" + getValueAsCastParenStr(SI->getCondition()) + " ==
" + Curr + ")"; | |
2313 } | |
2314 BlocksToConditions[BB] = Condition + (!UseSwitch && BlocksToConditions
[BB].size() > 0 ? " | " : "") + BlocksToConditions[BB]; | |
2315 } | |
2316 for (BlockCondMap::const_iterator I = BlocksToConditions.begin(), E = Bl
ocksToConditions.end(); I != E; ++I) { | |
2317 const BasicBlock *BB = I->first; | |
2318 if (BB == DD) continue; // ok to eliminate this, default dest will get
there anyhow | |
2319 std::string P = getPhiCode(&*BI, BB); | |
2320 LLVMToRelooper[&*BI]->AddBranchTo(LLVMToRelooper[&*BB], I->second.c_st
r(), P.size() > 0 ? P.c_str() : NULL); | |
2321 } | |
2322 break; | |
2323 } | |
2324 case Instruction::Ret: | |
2325 case Instruction::Unreachable: break; | |
2326 } | |
2327 } | |
2328 | |
2329 // Calculate relooping and print | |
2330 R.Calculate(Entry); | |
2331 R.Render(); | |
2332 | |
2333 // Emit local variables | |
2334 UsedVars["sp"] = Type::getInt32Ty(F->getContext()); | |
2335 unsigned MaxAlignment = Allocas.getMaxAlignment(); | |
2336 if (MaxAlignment > STACK_ALIGN) { | |
2337 UsedVars["sp_a"] = Type::getInt32Ty(F->getContext()); | |
2338 } | |
2339 UsedVars["label"] = Type::getInt32Ty(F->getContext()); | |
2340 if (!UsedVars.empty()) { | |
2341 unsigned Count = 0; | |
2342 for (VarMap::const_iterator VI = UsedVars.begin(); VI != UsedVars.end(); ++V
I) { | |
2343 if (Count == 20) { | |
2344 Out << ";\n"; | |
2345 Count = 0; | |
2346 } | |
2347 if (Count == 0) Out << " var "; | |
2348 if (Count > 0) { | |
2349 Out << ", "; | |
2350 } | |
2351 Count++; | |
2352 Out << VI->first << " = "; | |
2353 switch (VI->second->getTypeID()) { | |
2354 default: | |
2355 llvm_unreachable("unsupported variable initializer type"); | |
2356 case Type::PointerTyID: | |
2357 case Type::IntegerTyID: | |
2358 Out << "0"; | |
2359 break; | |
2360 case Type::FloatTyID: | |
2361 if (PreciseF32) { | |
2362 Out << "Math_fround(0)"; | |
2363 break; | |
2364 } | |
2365 // otherwise fall through to double | |
2366 case Type::DoubleTyID: | |
2367 Out << "+0"; | |
2368 break; | |
2369 case Type::VectorTyID: | |
2370 if (cast<VectorType>(VI->second)->getElementType()->isIntegerTy()) { | |
2371 Out << "SIMD_int32x4(0,0,0,0)"; | |
2372 } else { | |
2373 Out << "SIMD_float32x4(0,0,0,0)"; | |
2374 } | |
2375 break; | |
2376 } | |
2377 } | |
2378 Out << ";"; | |
2379 nl(Out); | |
2380 } | |
2381 | |
2382 { | |
2383 static bool Warned = false; | |
2384 if (!Warned && OptLevel < 2 && UsedVars.size() > 2000) { | |
2385 prettyWarning() << "emitted code will contain very large numbers of local
variables, which is bad for performance (build to JS with -O2 or above to avoid
this - make sure to do so both on source files, and during 'linking')\n"; | |
2386 Warned = true; | |
2387 } | |
2388 } | |
2389 | |
2390 // Emit stack entry | |
2391 Out << " " << getAdHocAssign("sp", Type::getInt32Ty(F->getContext())) << "STAC
KTOP;"; | |
2392 if (uint64_t FrameSize = Allocas.getFrameSize()) { | |
2393 if (MaxAlignment > STACK_ALIGN) { | |
2394 // We must align this entire stack frame to something higher than the defa
ult | |
2395 Out << "\n "; | |
2396 Out << "sp_a = STACKTOP = (STACKTOP + " << utostr(MaxAlignment-1) << ")&-"
<< utostr(MaxAlignment) << ";"; | |
2397 } | |
2398 Out << "\n "; | |
2399 Out << getStackBump(FrameSize); | |
2400 } | |
2401 | |
2402 // Emit (relooped) code | |
2403 char *buffer = Relooper::GetOutputBuffer(); | |
2404 nl(Out) << buffer; | |
2405 | |
2406 // Ensure a final return if necessary | |
2407 Type *RT = F->getFunctionType()->getReturnType(); | |
2408 if (!RT->isVoidTy()) { | |
2409 char *LastCurly = strrchr(buffer, '}'); | |
2410 if (!LastCurly) LastCurly = buffer; | |
2411 char *FinalReturn = strstr(LastCurly, "return "); | |
2412 if (!FinalReturn) { | |
2413 Out << " return " << getParenCast(getConstant(UndefValue::get(RT)), RT, AS
M_NONSPECIFIC) << ";\n"; | |
2414 } | |
2415 } | |
2416 } | |
2417 | |
2418 void JSWriter::processConstants() { | |
2419 // First, calculate the address of each constant | |
2420 for (Module::const_global_iterator I = TheModule->global_begin(), | |
2421 E = TheModule->global_end(); I != E; ++I) { | |
2422 if (I->hasInitializer()) { | |
2423 parseConstant(I->getName().str(), I->getInitializer(), true); | |
2424 } | |
2425 } | |
2426 // Second, allocate their contents | |
2427 for (Module::const_global_iterator I = TheModule->global_begin(), | |
2428 E = TheModule->global_end(); I != E; ++I) { | |
2429 if (I->hasInitializer()) { | |
2430 parseConstant(I->getName().str(), I->getInitializer(), false); | |
2431 } | |
2432 } | |
2433 } | |
2434 | |
2435 void JSWriter::printFunction(const Function *F) { | |
2436 ValueNames.clear(); | |
2437 | |
2438 // Prepare and analyze function | |
2439 | |
2440 UsedVars.clear(); | |
2441 UniqueNum = 0; | |
2442 | |
2443 // When optimizing, the regular optimizer (mem2reg, SROA, GVN, and others) | |
2444 // will have already taken all the opportunities for nativization. | |
2445 if (OptLevel == CodeGenOpt::None) | |
2446 calculateNativizedVars(F); | |
2447 | |
2448 // Do alloca coloring at -O1 and higher. | |
2449 Allocas.analyze(*F, *DL, OptLevel != CodeGenOpt::None); | |
2450 | |
2451 // Emit the function | |
2452 | |
2453 std::string Name = F->getName(); | |
2454 sanitizeGlobal(Name); | |
2455 Out << "function " << Name << "("; | |
2456 for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end(); | |
2457 AI != AE; ++AI) { | |
2458 if (AI != F->arg_begin()) Out << ","; | |
2459 Out << getJSName(AI); | |
2460 } | |
2461 Out << ") {"; | |
2462 nl(Out); | |
2463 for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end(); | |
2464 AI != AE; ++AI) { | |
2465 std::string name = getJSName(AI); | |
2466 Out << " " << name << " = " << getCast(name, AI->getType(), ASM_NONSPECIFIC)
<< ";"; | |
2467 nl(Out); | |
2468 } | |
2469 printFunctionBody(F); | |
2470 Out << "}"; | |
2471 nl(Out); | |
2472 | |
2473 Allocas.clear(); | |
2474 StackBumped = false; | |
2475 } | |
2476 | |
2477 void JSWriter::printModuleBody() { | |
2478 processConstants(); | |
2479 | |
2480 // Emit function bodies. | |
2481 nl(Out) << "// EMSCRIPTEN_START_FUNCTIONS"; nl(Out); | |
2482 for (Module::const_iterator I = TheModule->begin(), E = TheModule->end(); | |
2483 I != E; ++I) { | |
2484 if (!I->isDeclaration()) printFunction(I); | |
2485 } | |
2486 Out << "function runPostSets() {\n"; | |
2487 Out << " " << PostSets << "\n"; | |
2488 Out << "}\n"; | |
2489 PostSets = ""; | |
2490 Out << "// EMSCRIPTEN_END_FUNCTIONS\n\n"; | |
2491 | |
2492 assert(GlobalData32.size() == 0 && GlobalData8.size() == 0); // FIXME when we
use optimal constant alignments | |
2493 | |
2494 // TODO fix commas | |
2495 Out << "/* memory initializer */ allocate(["; | |
2496 printCommaSeparated(GlobalData64); | |
2497 if (GlobalData64.size() > 0 && GlobalData32.size() + GlobalData8.size() > 0) { | |
2498 Out << ","; | |
2499 } | |
2500 printCommaSeparated(GlobalData32); | |
2501 if (GlobalData32.size() > 0 && GlobalData8.size() > 0) { | |
2502 Out << ","; | |
2503 } | |
2504 printCommaSeparated(GlobalData8); | |
2505 Out << "], \"i8\", ALLOC_NONE, Runtime.GLOBAL_BASE);"; | |
2506 | |
2507 // Emit metadata for emcc driver | |
2508 Out << "\n\n// EMSCRIPTEN_METADATA\n"; | |
2509 Out << "{\n"; | |
2510 | |
2511 Out << "\"declares\": ["; | |
2512 bool first = true; | |
2513 for (Module::const_iterator I = TheModule->begin(), E = TheModule->end(); | |
2514 I != E; ++I) { | |
2515 if (I->isDeclaration() && !I->use_empty()) { | |
2516 // Ignore intrinsics that are always no-ops or expanded into other code | |
2517 // which doesn't require the intrinsic function itself to be declared. | |
2518 if (I->isIntrinsic()) { | |
2519 switch (I->getIntrinsicID()) { | |
2520 case Intrinsic::dbg_declare: | |
2521 case Intrinsic::dbg_value: | |
2522 case Intrinsic::lifetime_start: | |
2523 case Intrinsic::lifetime_end: | |
2524 case Intrinsic::invariant_start: | |
2525 case Intrinsic::invariant_end: | |
2526 case Intrinsic::prefetch: | |
2527 case Intrinsic::memcpy: | |
2528 case Intrinsic::memset: | |
2529 case Intrinsic::memmove: | |
2530 case Intrinsic::expect: | |
2531 case Intrinsic::flt_rounds: | |
2532 continue; | |
2533 } | |
2534 } | |
2535 | |
2536 if (first) { | |
2537 first = false; | |
2538 } else { | |
2539 Out << ", "; | |
2540 } | |
2541 Out << "\"" << I->getName() << "\""; | |
2542 } | |
2543 } | |
2544 for (NameSet::const_iterator I = Declares.begin(), E = Declares.end(); | |
2545 I != E; ++I) { | |
2546 if (first) { | |
2547 first = false; | |
2548 } else { | |
2549 Out << ", "; | |
2550 } | |
2551 Out << "\"" << *I << "\""; | |
2552 } | |
2553 Out << "],"; | |
2554 | |
2555 Out << "\"redirects\": {"; | |
2556 first = true; | |
2557 for (StringMap::const_iterator I = Redirects.begin(), E = Redirects.end(); | |
2558 I != E; ++I) { | |
2559 if (first) { | |
2560 first = false; | |
2561 } else { | |
2562 Out << ", "; | |
2563 } | |
2564 Out << "\"_" << I->first << "\": \"" << I->second << "\""; | |
2565 } | |
2566 Out << "},"; | |
2567 | |
2568 Out << "\"externs\": ["; | |
2569 first = true; | |
2570 for (NameSet::const_iterator I = Externals.begin(), E = Externals.end(); | |
2571 I != E; ++I) { | |
2572 if (first) { | |
2573 first = false; | |
2574 } else { | |
2575 Out << ", "; | |
2576 } | |
2577 Out << "\"" << *I << "\""; | |
2578 } | |
2579 Out << "],"; | |
2580 | |
2581 Out << "\"implementedFunctions\": ["; | |
2582 first = true; | |
2583 for (Module::const_iterator I = TheModule->begin(), E = TheModule->end(); | |
2584 I != E; ++I) { | |
2585 if (!I->isDeclaration()) { | |
2586 if (first) { | |
2587 first = false; | |
2588 } else { | |
2589 Out << ", "; | |
2590 } | |
2591 std::string name = I->getName(); | |
2592 sanitizeGlobal(name); | |
2593 Out << "\"" << name << '"'; | |
2594 } | |
2595 } | |
2596 Out << "],"; | |
2597 | |
2598 Out << "\"tables\": {"; | |
2599 unsigned Num = FunctionTables.size(); | |
2600 for (FunctionTableMap::iterator I = FunctionTables.begin(), E = FunctionTables
.end(); I != E; ++I) { | |
2601 Out << " \"" << I->first << "\": \"var FUNCTION_TABLE_" << I->first << " =
["; | |
2602 FunctionTable &Table = I->second; | |
2603 // ensure power of two | |
2604 unsigned Size = 1; | |
2605 while (Size < Table.size()) Size <<= 1; | |
2606 while (Table.size() < Size) Table.push_back("0"); | |
2607 for (unsigned i = 0; i < Table.size(); i++) { | |
2608 Out << Table[i]; | |
2609 if (i < Table.size()-1) Out << ","; | |
2610 } | |
2611 Out << "];\""; | |
2612 if (--Num > 0) Out << ","; | |
2613 Out << "\n"; | |
2614 } | |
2615 Out << "},"; | |
2616 | |
2617 Out << "\"initializers\": ["; | |
2618 first = true; | |
2619 for (unsigned i = 0; i < GlobalInitializers.size(); i++) { | |
2620 if (first) { | |
2621 first = false; | |
2622 } else { | |
2623 Out << ", "; | |
2624 } | |
2625 Out << "\"" << GlobalInitializers[i] << "\""; | |
2626 } | |
2627 Out << "],"; | |
2628 | |
2629 Out << "\"exports\": ["; | |
2630 first = true; | |
2631 for (unsigned i = 0; i < Exports.size(); i++) { | |
2632 if (first) { | |
2633 first = false; | |
2634 } else { | |
2635 Out << ", "; | |
2636 } | |
2637 Out << "\"" << Exports[i] << "\""; | |
2638 } | |
2639 Out << "],"; | |
2640 | |
2641 Out << "\"cantValidate\": \"" << CantValidate << "\","; | |
2642 | |
2643 Out << "\"simd\": "; | |
2644 Out << (UsesSIMD ? "1" : "0"); | |
2645 Out << ","; | |
2646 | |
2647 Out << "\"namedGlobals\": {"; | |
2648 first = true; | |
2649 for (NameIntMap::const_iterator I = NamedGlobals.begin(), E = NamedGlobals.end
(); I != E; ++I) { | |
2650 if (first) { | |
2651 first = false; | |
2652 } else { | |
2653 Out << ", "; | |
2654 } | |
2655 Out << "\"_" << I->first << "\": \"" << utostr(I->second) << "\""; | |
2656 } | |
2657 Out << "}"; | |
2658 | |
2659 Out << "\n}\n"; | |
2660 } | |
2661 | |
2662 void JSWriter::parseConstant(const std::string& name, const Constant* CV, bool c
alculate) { | |
2663 if (isa<GlobalValue>(CV)) | |
2664 return; | |
2665 //errs() << "parsing constant " << name << "\n"; | |
2666 // TODO: we repeat some work in both calculate and emit phases here | |
2667 // FIXME: use the proper optimal alignments | |
2668 if (const ConstantDataSequential *CDS = | |
2669 dyn_cast<ConstantDataSequential>(CV)) { | |
2670 assert(CDS->isString()); | |
2671 if (calculate) { | |
2672 HeapData *GlobalData = allocateAddress(name); | |
2673 StringRef Str = CDS->getAsString(); | |
2674 for (unsigned int i = 0; i < Str.size(); i++) { | |
2675 GlobalData->push_back(Str.data()[i]); | |
2676 } | |
2677 } | |
2678 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) { | |
2679 APFloat APF = CFP->getValueAPF(); | |
2680 if (CFP->getType() == Type::getFloatTy(CFP->getContext())) { | |
2681 if (calculate) { | |
2682 HeapData *GlobalData = allocateAddress(name); | |
2683 union flt { float f; unsigned char b[sizeof(float)]; } flt; | |
2684 flt.f = APF.convertToFloat(); | |
2685 for (unsigned i = 0; i < sizeof(float); ++i) { | |
2686 GlobalData->push_back(flt.b[i]); | |
2687 } | |
2688 } | |
2689 } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext())) { | |
2690 if (calculate) { | |
2691 HeapData *GlobalData = allocateAddress(name); | |
2692 union dbl { double d; unsigned char b[sizeof(double)]; } dbl; | |
2693 dbl.d = APF.convertToDouble(); | |
2694 for (unsigned i = 0; i < sizeof(double); ++i) { | |
2695 GlobalData->push_back(dbl.b[i]); | |
2696 } | |
2697 } | |
2698 } else { | |
2699 assert(false && "Unsupported floating-point type"); | |
2700 } | |
2701 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { | |
2702 if (calculate) { | |
2703 union { uint64_t i; unsigned char b[sizeof(uint64_t)]; } integer; | |
2704 integer.i = *CI->getValue().getRawData(); | |
2705 unsigned BitWidth = 64; // CI->getValue().getBitWidth(); | |
2706 assert(BitWidth == 32 || BitWidth == 64); | |
2707 HeapData *GlobalData = allocateAddress(name); | |
2708 // assuming compiler is little endian | |
2709 for (unsigned i = 0; i < BitWidth / 8; ++i) { | |
2710 GlobalData->push_back(integer.b[i]); | |
2711 } | |
2712 } | |
2713 } else if (isa<ConstantPointerNull>(CV)) { | |
2714 assert(false && "Unlowered ConstantPointerNull"); | |
2715 } else if (isa<ConstantAggregateZero>(CV)) { | |
2716 if (calculate) { | |
2717 unsigned Bytes = DL->getTypeStoreSize(CV->getType()); | |
2718 HeapData *GlobalData = allocateAddress(name); | |
2719 for (unsigned i = 0; i < Bytes; ++i) { | |
2720 GlobalData->push_back(0); | |
2721 } | |
2722 // FIXME: create a zero section at the end, avoid filling meminit with zer
os | |
2723 } | |
2724 } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) { | |
2725 if (calculate) { | |
2726 for (Constant::const_user_iterator UI = CV->user_begin(), UE = CV->user_en
d(); UI != UE; ++UI) { | |
2727 if ((*UI)->getName() == "llvm.used") { | |
2728 // export the kept-alives | |
2729 for (unsigned i = 0; i < CA->getNumOperands(); i++) { | |
2730 const Constant *C = CA->getOperand(i); | |
2731 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { | |
2732 C = CE->getOperand(0); // ignore bitcasts | |
2733 } | |
2734 Exports.push_back(getJSName(C)); | |
2735 } | |
2736 } else if ((*UI)->getName() == "llvm.global.annotations") { | |
2737 // llvm.global.annotations can be ignored. | |
2738 } else { | |
2739 llvm_unreachable("Unexpected constant array"); | |
2740 } | |
2741 break; // we assume one use here | |
2742 } | |
2743 } | |
2744 } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) { | |
2745 if (name == "__init_array_start") { | |
2746 // this is the global static initializer | |
2747 if (calculate) { | |
2748 unsigned Num = CS->getNumOperands(); | |
2749 for (unsigned i = 0; i < Num; i++) { | |
2750 const Value* C = CS->getOperand(i); | |
2751 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { | |
2752 C = CE->getOperand(0); // ignore bitcasts | |
2753 } | |
2754 GlobalInitializers.push_back(getJSName(C)); | |
2755 } | |
2756 } | |
2757 } else if (calculate) { | |
2758 HeapData *GlobalData = allocateAddress(name); | |
2759 unsigned Bytes = DL->getTypeStoreSize(CV->getType()); | |
2760 for (unsigned i = 0; i < Bytes; ++i) { | |
2761 GlobalData->push_back(0); | |
2762 } | |
2763 } else { | |
2764 // Per the PNaCl abi, this must be a packed struct of a very specific type | |
2765 // https://chromium.googlesource.com/native_client/pnacl-llvm/+/7287c45c13
dc887cebe3db6abfa2f1080186bb97/lib/Transforms/NaCl/FlattenGlobals.cpp | |
2766 assert(CS->getType()->isPacked()); | |
2767 // This is the only constant where we cannot just emit everything during t
he first phase, 'calculate', as we may refer to other globals | |
2768 unsigned Num = CS->getNumOperands(); | |
2769 unsigned Offset = getRelativeGlobalAddress(name); | |
2770 unsigned OffsetStart = Offset; | |
2771 unsigned Absolute = getGlobalAddress(name); | |
2772 for (unsigned i = 0; i < Num; i++) { | |
2773 const Constant* C = CS->getOperand(i); | |
2774 if (isa<ConstantAggregateZero>(C)) { | |
2775 unsigned Bytes = DL->getTypeStoreSize(C->getType()); | |
2776 Offset += Bytes; // zeros, so just skip | |
2777 } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { | |
2778 const Value *V = CE->getOperand(0); | |
2779 unsigned Data = 0; | |
2780 if (CE->getOpcode() == Instruction::PtrToInt) { | |
2781 Data = getConstAsOffset(V, Absolute + Offset - OffsetStart); | |
2782 } else if (CE->getOpcode() == Instruction::Add) { | |
2783 V = cast<ConstantExpr>(V)->getOperand(0); | |
2784 Data = getConstAsOffset(V, Absolute + Offset - OffsetStart); | |
2785 ConstantInt *CI = cast<ConstantInt>(CE->getOperand(1)); | |
2786 Data += *CI->getValue().getRawData(); | |
2787 } else { | |
2788 CE->dump(); | |
2789 llvm_unreachable("Unexpected constant expr kind"); | |
2790 } | |
2791 union { unsigned i; unsigned char b[sizeof(unsigned)]; } integer; | |
2792 integer.i = Data; | |
2793 assert(Offset+4 <= GlobalData64.size()); | |
2794 for (unsigned i = 0; i < 4; ++i) { | |
2795 GlobalData64[Offset++] = integer.b[i]; | |
2796 } | |
2797 } else if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequ
ential>(C)) { | |
2798 assert(CDS->isString()); | |
2799 StringRef Str = CDS->getAsString(); | |
2800 assert(Offset+Str.size() <= GlobalData64.size()); | |
2801 for (unsigned int i = 0; i < Str.size(); i++) { | |
2802 GlobalData64[Offset++] = Str.data()[i]; | |
2803 } | |
2804 } else { | |
2805 C->dump(); | |
2806 llvm_unreachable("Unexpected constant kind"); | |
2807 } | |
2808 } | |
2809 } | |
2810 } else if (isa<ConstantVector>(CV)) { | |
2811 assert(false && "Unlowered ConstantVector"); | |
2812 } else if (isa<BlockAddress>(CV)) { | |
2813 assert(false && "Unlowered BlockAddress"); | |
2814 } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { | |
2815 if (name == "__init_array_start") { | |
2816 // this is the global static initializer | |
2817 if (calculate) { | |
2818 const Value *V = CE->getOperand(0); | |
2819 GlobalInitializers.push_back(getJSName(V)); | |
2820 // is the func | |
2821 } | |
2822 } else if (name == "__fini_array_start") { | |
2823 // nothing to do | |
2824 } else { | |
2825 // a global equal to a ptrtoint of some function, so a 32-bit integer for
us | |
2826 if (calculate) { | |
2827 HeapData *GlobalData = allocateAddress(name); | |
2828 for (unsigned i = 0; i < 4; ++i) { | |
2829 GlobalData->push_back(0); | |
2830 } | |
2831 } else { | |
2832 unsigned Data = 0; | |
2833 | |
2834 // Deconstruct lowered getelementptrs. | |
2835 if (CE->getOpcode() == Instruction::Add) { | |
2836 Data = cast<ConstantInt>(CE->getOperand(1))->getZExtValue(); | |
2837 CE = cast<ConstantExpr>(CE->getOperand(0)); | |
2838 } | |
2839 const Value *V = CE; | |
2840 if (CE->getOpcode() == Instruction::PtrToInt) { | |
2841 V = CE->getOperand(0); | |
2842 } | |
2843 | |
2844 // Deconstruct getelementptrs. | |
2845 int64_t BaseOffset; | |
2846 V = GetPointerBaseWithConstantOffset(V, BaseOffset, *DL); | |
2847 Data += (uint64_t)BaseOffset; | |
2848 | |
2849 Data += getConstAsOffset(V, getGlobalAddress(name)); | |
2850 union { unsigned i; unsigned char b[sizeof(unsigned)]; } integer; | |
2851 integer.i = Data; | |
2852 unsigned Offset = getRelativeGlobalAddress(name); | |
2853 assert(Offset+4 <= GlobalData64.size()); | |
2854 for (unsigned i = 0; i < 4; ++i) { | |
2855 GlobalData64[Offset++] = integer.b[i]; | |
2856 } | |
2857 } | |
2858 } | |
2859 } else if (isa<UndefValue>(CV)) { | |
2860 assert(false && "Unlowered UndefValue"); | |
2861 } else { | |
2862 CV->dump(); | |
2863 assert(false && "Unsupported constant kind"); | |
2864 } | |
2865 } | |
2866 | |
2867 // nativization | |
2868 | |
2869 void JSWriter::calculateNativizedVars(const Function *F) { | |
2870 NativizedVars.clear(); | |
2871 | |
2872 for (Function::const_iterator BI = F->begin(), BE = F->end(); BI != BE; ++BI)
{ | |
2873 for (BasicBlock::const_iterator II = BI->begin(), E = BI->end(); II != E; ++
II) { | |
2874 const Instruction *I = &*II; | |
2875 if (const AllocaInst *AI = dyn_cast<const AllocaInst>(I)) { | |
2876 if (AI->getAllocatedType()->isVectorTy()) continue; // we do not nativiz
e vectors, we rely on the LLVM optimizer to avoid load/stores on them | |
2877 if (AI->getAllocatedType()->isAggregateType()) continue; // we do not na
tivize aggregates either | |
2878 // this is on the stack. if its address is never used nor escaped, we ca
n nativize it | |
2879 bool Fail = false; | |
2880 for (Instruction::const_user_iterator UI = I->user_begin(), UE = I->user
_end(); UI != UE && !Fail; ++UI) { | |
2881 const Instruction *U = dyn_cast<Instruction>(*UI); | |
2882 if (!U) { Fail = true; break; } // not an instruction, not cool | |
2883 switch (U->getOpcode()) { | |
2884 case Instruction::Load: break; // load is cool | |
2885 case Instruction::Store: { | |
2886 if (U->getOperand(0) == I) Fail = true; // store *of* it is not co
ol; store *to* it is fine | |
2887 break; | |
2888 } | |
2889 default: { Fail = true; break; } // anything that is "not" "cool", i
s "not cool" | |
2890 } | |
2891 } | |
2892 if (!Fail) NativizedVars.insert(I); | |
2893 } | |
2894 } | |
2895 } | |
2896 } | |
2897 | |
2898 // special analyses | |
2899 | |
2900 bool JSWriter::canReloop(const Function *F) { | |
2901 return true; | |
2902 } | |
2903 | |
2904 // main entry | |
2905 | |
2906 void JSWriter::printCommaSeparated(const HeapData data) { | |
2907 for (HeapData::const_iterator I = data.begin(); | |
2908 I != data.end(); ++I) { | |
2909 if (I != data.begin()) { | |
2910 Out << ","; | |
2911 } | |
2912 Out << (int)*I; | |
2913 } | |
2914 } | |
2915 | |
2916 void JSWriter::printProgram(const std::string& fname, | |
2917 const std::string& mName) { | |
2918 printModule(fname,mName); | |
2919 } | |
2920 | |
2921 void JSWriter::printModule(const std::string& fname, | |
2922 const std::string& mName) { | |
2923 printModuleBody(); | |
2924 } | |
2925 | |
2926 bool JSWriter::runOnModule(Module &M) { | |
2927 TheModule = &M; | |
2928 DL = &M.getDataLayout(); | |
2929 | |
2930 setupCallHandlers(); | |
2931 | |
2932 printProgram("", ""); | |
2933 | |
2934 return false; | |
2935 } | |
2936 | |
2937 char JSWriter::ID = 0; | |
2938 | |
2939 class CheckTriple : public ModulePass { | |
2940 public: | |
2941 static char ID; | |
2942 CheckTriple() : ModulePass(ID) {} | |
2943 virtual bool runOnModule(Module &M) { | |
2944 if (M.getTargetTriple() != "asmjs-unknown-emscripten") { | |
2945 prettyWarning() << "incorrect target triple '" << M.getTargetTriple() << "
' (did you use emcc/em++ on all source files and not clang directly?)\n"; | |
2946 } | |
2947 return false; | |
2948 } | |
2949 }; | |
2950 | |
2951 char CheckTriple::ID; | |
2952 | |
2953 Pass *createCheckTriplePass() { | |
2954 return new CheckTriple(); | |
2955 } | |
2956 | |
2957 //===----------------------------------------------------------------------===// | |
2958 // External Interface declaration | |
2959 //===----------------------------------------------------------------------===// | |
2960 | |
2961 bool JSTargetMachine::addPassesToEmitFile(PassManagerBase &PM, | |
2962 raw_pwrite_stream &o, | |
2963 CodeGenFileType FileType, | |
2964 bool DisableVerify, | |
2965 AnalysisID StartAfter, | |
2966 AnalysisID StopAfter) { | |
2967 assert(FileType == TargetMachine::CGFT_AssemblyFile); | |
2968 | |
2969 PM.add(createCheckTriplePass()); | |
2970 PM.add(createExpandInsertExtractElementPass()); | |
2971 PM.add(createExpandI64Pass()); | |
2972 | |
2973 CodeGenOpt::Level OptLevel = getOptLevel(); | |
2974 | |
2975 // When optimizing, there shouldn't be any opportunities for SimplifyAllocas | |
2976 // because the regular optimizer should have taken them all (GVN, and possibly | |
2977 // also SROA). | |
2978 if (OptLevel == CodeGenOpt::None) | |
2979 PM.add(createEmscriptenSimplifyAllocasPass()); | |
2980 | |
2981 PM.add(new JSWriter(o, OptLevel)); | |
2982 | |
2983 return false; | |
2984 } | |
OLD | NEW |