Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(167)

Side by Side Diff: src/IceTargetLoweringX8632.cpp

Issue 1341423002: Reflow comments to use the full width. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Fix spelling and rebase Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLoweringX8632.h ('k') | src/IceTargetLoweringX8632Traits.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
11 /// This file implements the TargetLoweringX8632 class, which 11 /// This file implements the TargetLoweringX8632 class, which consists almost
12 /// consists almost entirely of the lowering sequence for each 12 /// entirely of the lowering sequence for each high-level instruction.
13 /// high-level instruction.
14 /// 13 ///
15 //===----------------------------------------------------------------------===// 14 //===----------------------------------------------------------------------===//
16 15
17 #include "IceTargetLoweringX8632.h" 16 #include "IceTargetLoweringX8632.h"
18 17
19 #include "IceTargetLoweringX8632Traits.h" 18 #include "IceTargetLoweringX8632Traits.h"
20 #include "IceTargetLoweringX86Base.h" 19 #include "IceTargetLoweringX86Base.h"
21 20
22 namespace Ice { 21 namespace Ice {
23 22
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // __ ______ __ __ ______ ______ __ __ __ ______ 92 // __ ______ __ __ ______ ______ __ __ __ ______
94 // /\ \ /\ __ \/\ \ _ \ \/\ ___\/\ == \/\ \/\ "-.\ \/\ ___\ 93 // /\ \ /\ __ \/\ \ _ \ \/\ ___\/\ == \/\ \/\ "-.\ \/\ ___\
95 // \ \ \___\ \ \/\ \ \ \/ ".\ \ \ __\\ \ __<\ \ \ \ \-. \ \ \__ \ 94 // \ \ \___\ \ \/\ \ \ \/ ".\ \ \ __\\ \ __<\ \ \ \ \-. \ \ \__ \
96 // \ \_____\ \_____\ \__/".~\_\ \_____\ \_\ \_\ \_\ \_\\"\_\ \_____\ 95 // \ \_____\ \_____\ \__/".~\_\ \_____\ \_\ \_\ \_\ \_\\"\_\ \_____\
97 // \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/ 96 // \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/
98 // 97 //
99 //------------------------------------------------------------------------------ 98 //------------------------------------------------------------------------------
100 void TargetX8632::lowerCall(const InstCall *Instr) { 99 void TargetX8632::lowerCall(const InstCall *Instr) {
101 // x86-32 calling convention: 100 // x86-32 calling convention:
102 // 101 //
103 // * At the point before the call, the stack must be aligned to 16 102 // * At the point before the call, the stack must be aligned to 16 bytes.
104 // bytes.
105 // 103 //
106 // * The first four arguments of vector type, regardless of their 104 // * The first four arguments of vector type, regardless of their position
107 // position relative to the other arguments in the argument list, are 105 // relative to the other arguments in the argument list, are placed in
108 // placed in registers xmm0 - xmm3. 106 // registers xmm0 - xmm3.
109 // 107 //
110 // * Other arguments are pushed onto the stack in right-to-left order, 108 // * Other arguments are pushed onto the stack in right-to-left order, such
111 // such that the left-most argument ends up on the top of the stack at 109 // that the left-most argument ends up on the top of the stack at the lowest
112 // the lowest memory address. 110 // memory address.
113 // 111 //
114 // * Stack arguments of vector type are aligned to start at the next 112 // * Stack arguments of vector type are aligned to start at the next highest
115 // highest multiple of 16 bytes. Other stack arguments are aligned to 113 // multiple of 16 bytes. Other stack arguments are aligned to 4 bytes.
116 // 4 bytes.
117 // 114 //
118 // This intends to match the section "IA-32 Function Calling 115 // This intends to match the section "IA-32 Function Calling Convention" of
119 // Convention" of the document "OS X ABI Function Call Guide" by 116 // the document "OS X ABI Function Call Guide" by Apple.
120 // Apple.
121 NeedsStackAlignment = true; 117 NeedsStackAlignment = true;
122 118
123 using OperandList = std::vector<Operand *>; 119 using OperandList = std::vector<Operand *>;
124 OperandList XmmArgs; 120 OperandList XmmArgs;
125 OperandList StackArgs, StackArgLocations; 121 OperandList StackArgs, StackArgLocations;
126 uint32_t ParameterAreaSizeBytes = 0; 122 uint32_t ParameterAreaSizeBytes = 0;
127 123
128 // Classify each argument operand according to the location where the 124 // Classify each argument operand according to the location where the
129 // argument is passed. 125 // argument is passed.
130 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) { 126 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) {
(...skipping 11 matching lines...) Expand all
142 } 138 }
143 Variable *esp = 139 Variable *esp =
144 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp); 140 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
145 Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes); 141 Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes);
146 StackArgLocations.push_back( 142 StackArgLocations.push_back(
147 Traits::X86OperandMem::create(Func, Ty, esp, Loc)); 143 Traits::X86OperandMem::create(Func, Ty, esp, Loc));
148 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType()); 144 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType());
149 } 145 }
150 } 146 }
151 147
152 // Adjust the parameter area so that the stack is aligned. It is 148 // Adjust the parameter area so that the stack is aligned. It is assumed that
153 // assumed that the stack is already aligned at the start of the 149 // the stack is already aligned at the start of the calling sequence.
154 // calling sequence.
155 ParameterAreaSizeBytes = Traits::applyStackAlignment(ParameterAreaSizeBytes); 150 ParameterAreaSizeBytes = Traits::applyStackAlignment(ParameterAreaSizeBytes);
156 151
157 // Subtract the appropriate amount for the argument area. This also 152 // Subtract the appropriate amount for the argument area. This also takes
158 // takes care of setting the stack adjustment during emission. 153 // care of setting the stack adjustment during emission.
159 // 154 //
160 // TODO: If for some reason the call instruction gets dead-code 155 // TODO: If for some reason the call instruction gets dead-code eliminated
161 // eliminated after lowering, we would need to ensure that the 156 // after lowering, we would need to ensure that the pre-call and the
162 // pre-call and the post-call esp adjustment get eliminated as well. 157 // post-call esp adjustment get eliminated as well.
163 if (ParameterAreaSizeBytes) { 158 if (ParameterAreaSizeBytes) {
164 _adjust_stack(ParameterAreaSizeBytes); 159 _adjust_stack(ParameterAreaSizeBytes);
165 } 160 }
166 161
167 // Copy arguments that are passed on the stack to the appropriate 162 // Copy arguments that are passed on the stack to the appropriate stack
168 // stack locations. 163 // locations.
169 for (SizeT i = 0, e = StackArgs.size(); i < e; ++i) { 164 for (SizeT i = 0, e = StackArgs.size(); i < e; ++i) {
170 lowerStore(InstStore::create(Func, StackArgs[i], StackArgLocations[i])); 165 lowerStore(InstStore::create(Func, StackArgs[i], StackArgLocations[i]));
171 } 166 }
172 167
173 // Copy arguments to be passed in registers to the appropriate 168 // Copy arguments to be passed in registers to the appropriate registers.
174 // registers. 169 // TODO: Investigate the impact of lowering arguments passed in registers
175 // TODO: Investigate the impact of lowering arguments passed in 170 // after lowering stack arguments as opposed to the other way around.
176 // registers after lowering stack arguments as opposed to the other 171 // Lowering register arguments after stack arguments may reduce register
177 // way around. Lowering register arguments after stack arguments may 172 // pressure. On the other hand, lowering register arguments first (before
178 // reduce register pressure. On the other hand, lowering register 173 // stack arguments) may result in more compact code, as the memory operand
179 // arguments first (before stack arguments) may result in more compact 174 // displacements may end up being smaller before any stack adjustment is
180 // code, as the memory operand displacements may end up being smaller 175 // done.
181 // before any stack adjustment is done.
182 for (SizeT i = 0, NumXmmArgs = XmmArgs.size(); i < NumXmmArgs; ++i) { 176 for (SizeT i = 0, NumXmmArgs = XmmArgs.size(); i < NumXmmArgs; ++i) {
183 Variable *Reg = 177 Variable *Reg =
184 legalizeToReg(XmmArgs[i], Traits::RegisterSet::Reg_xmm0 + i); 178 legalizeToReg(XmmArgs[i], Traits::RegisterSet::Reg_xmm0 + i);
185 // Generate a FakeUse of register arguments so that they do not get 179 // Generate a FakeUse of register arguments so that they do not get dead
186 // dead code eliminated as a result of the FakeKill of scratch 180 // code eliminated as a result of the FakeKill of scratch registers after
187 // registers after the call. 181 // the call.
188 Context.insert(InstFakeUse::create(Func, Reg)); 182 Context.insert(InstFakeUse::create(Func, Reg));
189 } 183 }
190 // Generate the call instruction. Assign its result to a temporary 184 // Generate the call instruction. Assign its result to a temporary with high
191 // with high register allocation weight. 185 // register allocation weight.
192 Variable *Dest = Instr->getDest(); 186 Variable *Dest = Instr->getDest();
193 // ReturnReg doubles as ReturnRegLo as necessary. 187 // ReturnReg doubles as ReturnRegLo as necessary.
194 Variable *ReturnReg = nullptr; 188 Variable *ReturnReg = nullptr;
195 Variable *ReturnRegHi = nullptr; 189 Variable *ReturnRegHi = nullptr;
196 if (Dest) { 190 if (Dest) {
197 switch (Dest->getType()) { 191 switch (Dest->getType()) {
198 case IceType_NUM: 192 case IceType_NUM:
199 case IceType_void: 193 case IceType_void:
200 llvm::report_fatal_error("Invalid Call dest type"); 194 llvm::report_fatal_error("Invalid Call dest type");
201 break; 195 break;
202 case IceType_i1: 196 case IceType_i1:
203 case IceType_i8: 197 case IceType_i8:
204 case IceType_i16: 198 case IceType_i16:
205 case IceType_i32: 199 case IceType_i32:
206 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_eax); 200 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_eax);
207 break; 201 break;
208 case IceType_i64: 202 case IceType_i64:
209 ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax); 203 ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
210 ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx); 204 ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
211 break; 205 break;
212 case IceType_f32: 206 case IceType_f32:
213 case IceType_f64: 207 case IceType_f64:
214 // Leave ReturnReg==ReturnRegHi==nullptr, and capture the result with 208 // Leave ReturnReg==ReturnRegHi==nullptr, and capture the result with the
215 // the fstp instruction. 209 // fstp instruction.
216 break; 210 break;
217 case IceType_v4i1: 211 case IceType_v4i1:
218 case IceType_v8i1: 212 case IceType_v8i1:
219 case IceType_v16i1: 213 case IceType_v16i1:
220 case IceType_v16i8: 214 case IceType_v16i8:
221 case IceType_v8i16: 215 case IceType_v8i16:
222 case IceType_v4i32: 216 case IceType_v4i32:
223 case IceType_v4f32: 217 case IceType_v4f32:
224 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_xmm0); 218 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_xmm0);
225 break; 219 break;
(...skipping 14 matching lines...) Expand all
240 CallTarget = CallTargetVar; 234 CallTarget = CallTargetVar;
241 } 235 }
242 } 236 }
243 Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget); 237 Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget);
244 Context.insert(NewCall); 238 Context.insert(NewCall);
245 if (NeedSandboxing) 239 if (NeedSandboxing)
246 _bundle_unlock(); 240 _bundle_unlock();
247 if (ReturnRegHi) 241 if (ReturnRegHi)
248 Context.insert(InstFakeDef::create(Func, ReturnRegHi)); 242 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
249 243
250 // Add the appropriate offset to esp. The call instruction takes care 244 // Add the appropriate offset to esp. The call instruction takes care of
251 // of resetting the stack offset during emission. 245 // resetting the stack offset during emission.
252 if (ParameterAreaSizeBytes) { 246 if (ParameterAreaSizeBytes) {
253 Variable *esp = 247 Variable *esp =
254 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp); 248 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
255 _add(esp, Ctx->getConstantInt32(ParameterAreaSizeBytes)); 249 _add(esp, Ctx->getConstantInt32(ParameterAreaSizeBytes));
256 } 250 }
257 251
258 // Insert a register-kill pseudo instruction. 252 // Insert a register-kill pseudo instruction.
259 Context.insert(InstFakeKill::create(Func, NewCall)); 253 Context.insert(InstFakeKill::create(Func, NewCall));
260 254
261 // Generate a FakeUse to keep the call live if necessary. 255 // Generate a FakeUse to keep the call live if necessary.
(...skipping 18 matching lines...) Expand all
280 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 || 274 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 ||
281 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 || 275 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 ||
282 isVectorType(Dest->getType())); 276 isVectorType(Dest->getType()));
283 if (isVectorType(Dest->getType())) { 277 if (isVectorType(Dest->getType())) {
284 _movp(Dest, ReturnReg); 278 _movp(Dest, ReturnReg);
285 } else { 279 } else {
286 _mov(Dest, ReturnReg); 280 _mov(Dest, ReturnReg);
287 } 281 }
288 } 282 }
289 } else if (isScalarFloatingType(Dest->getType())) { 283 } else if (isScalarFloatingType(Dest->getType())) {
290 // Special treatment for an FP function which returns its result in 284 // Special treatment for an FP function which returns its result in st(0).
291 // st(0). 285 // If Dest ends up being a physical xmm register, the fstp emit code will
292 // If Dest ends up being a physical xmm register, the fstp emit code 286 // route st(0) through a temporary stack slot.
293 // will route st(0) through a temporary stack slot.
294 _fstp(Dest); 287 _fstp(Dest);
295 // Create a fake use of Dest in case it actually isn't used, 288 // Create a fake use of Dest in case it actually isn't used, because st(0)
296 // because st(0) still needs to be popped. 289 // still needs to be popped.
297 Context.insert(InstFakeUse::create(Func, Dest)); 290 Context.insert(InstFakeUse::create(Func, Dest));
298 } 291 }
299 } 292 }
300 293
301 void TargetX8632::lowerArguments() { 294 void TargetX8632::lowerArguments() {
302 VarList &Args = Func->getArgs(); 295 VarList &Args = Func->getArgs();
303 // The first four arguments of vector type, regardless of their 296 // The first four arguments of vector type, regardless of their position
304 // position relative to the other arguments in the argument list, are 297 // relative to the other arguments in the argument list, are passed in
305 // passed in registers xmm0 - xmm3. 298 // registers xmm0 - xmm3.
306 unsigned NumXmmArgs = 0; 299 unsigned NumXmmArgs = 0;
307 300
308 Context.init(Func->getEntryNode()); 301 Context.init(Func->getEntryNode());
309 Context.setInsertPoint(Context.getCur()); 302 Context.setInsertPoint(Context.getCur());
310 303
311 for (SizeT I = 0, E = Args.size(); 304 for (SizeT I = 0, E = Args.size();
312 I < E && NumXmmArgs < Traits::X86_MAX_XMM_ARGS; ++I) { 305 I < E && NumXmmArgs < Traits::X86_MAX_XMM_ARGS; ++I) {
313 Variable *Arg = Args[I]; 306 Variable *Arg = Args[I];
314 Type Ty = Arg->getType(); 307 Type Ty = Arg->getType();
315 if (!isVectorType(Ty)) 308 if (!isVectorType(Ty))
316 continue; 309 continue;
317 // Replace Arg in the argument list with the home register. Then 310 // Replace Arg in the argument list with the home register. Then generate
318 // generate an instruction in the prolog to copy the home register 311 // an instruction in the prolog to copy the home register to the assigned
319 // to the assigned location of Arg. 312 // location of Arg.
320 int32_t RegNum = Traits::RegisterSet::Reg_xmm0 + NumXmmArgs; 313 int32_t RegNum = Traits::RegisterSet::Reg_xmm0 + NumXmmArgs;
321 ++NumXmmArgs; 314 ++NumXmmArgs;
322 Variable *RegisterArg = Func->makeVariable(Ty); 315 Variable *RegisterArg = Func->makeVariable(Ty);
323 if (BuildDefs::dump()) 316 if (BuildDefs::dump())
324 RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func)); 317 RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
325 RegisterArg->setRegNum(RegNum); 318 RegisterArg->setRegNum(RegNum);
326 RegisterArg->setIsArg(); 319 RegisterArg->setIsArg();
327 Arg->setIsArg(false); 320 Arg->setIsArg(false);
328 321
329 Args[I] = RegisterArg; 322 Args[I] = RegisterArg;
(...skipping 14 matching lines...) Expand all
344 Reg = eax; 337 Reg = eax;
345 Context.insert(InstFakeUse::create(Func, edx)); 338 Context.insert(InstFakeUse::create(Func, edx));
346 } else if (isScalarFloatingType(Src0->getType())) { 339 } else if (isScalarFloatingType(Src0->getType())) {
347 _fld(Src0); 340 _fld(Src0);
348 } else if (isVectorType(Src0->getType())) { 341 } else if (isVectorType(Src0->getType())) {
349 Reg = legalizeToReg(Src0, Traits::RegisterSet::Reg_xmm0); 342 Reg = legalizeToReg(Src0, Traits::RegisterSet::Reg_xmm0);
350 } else { 343 } else {
351 _mov(Reg, Src0, Traits::RegisterSet::Reg_eax); 344 _mov(Reg, Src0, Traits::RegisterSet::Reg_eax);
352 } 345 }
353 } 346 }
354 // Add a ret instruction even if sandboxing is enabled, because 347 // Add a ret instruction even if sandboxing is enabled, because addEpilog
355 // addEpilog explicitly looks for a ret instruction as a marker for 348 // explicitly looks for a ret instruction as a marker for where to insert the
356 // where to insert the frame removal instructions. 349 // frame removal instructions.
357 _ret(Reg); 350 _ret(Reg);
358 // Add a fake use of esp to make sure esp stays alive for the entire 351 // Add a fake use of esp to make sure esp stays alive for the entire
359 // function. Otherwise post-call esp adjustments get dead-code 352 // function. Otherwise post-call esp adjustments get dead-code eliminated.
360 // eliminated. TODO: Are there more places where the fake use 353 // TODO: Are there more places where the fake use should be inserted? E.g.
361 // should be inserted? E.g. "void f(int n){while(1) g(n);}" may not 354 // "void f(int n){while(1) g(n);}" may not have a ret instruction.
362 // have a ret instruction.
363 Variable *esp = 355 Variable *esp =
364 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp); 356 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
365 Context.insert(InstFakeUse::create(Func, esp)); 357 Context.insert(InstFakeUse::create(Func, esp));
366 } 358 }
367 359
368 void TargetX8632::addProlog(CfgNode *Node) { 360 void TargetX8632::addProlog(CfgNode *Node) {
369 // Stack frame layout: 361 // Stack frame layout:
370 // 362 //
371 // +------------------------+ 363 // +------------------------+
372 // | 1. return address | 364 // | 1. return address |
(...skipping 15 matching lines...) Expand all
388 // 380 //
389 // The following variables record the size in bytes of the given areas: 381 // The following variables record the size in bytes of the given areas:
390 // * X86_RET_IP_SIZE_BYTES: area 1 382 // * X86_RET_IP_SIZE_BYTES: area 1
391 // * PreservedRegsSizeBytes: area 2 383 // * PreservedRegsSizeBytes: area 2
392 // * SpillAreaPaddingBytes: area 3 384 // * SpillAreaPaddingBytes: area 3
393 // * GlobalsSize: area 4 385 // * GlobalsSize: area 4
394 // * GlobalsAndSubsequentPaddingSize: areas 4 - 5 386 // * GlobalsAndSubsequentPaddingSize: areas 4 - 5
395 // * LocalsSpillAreaSize: area 6 387 // * LocalsSpillAreaSize: area 6
396 // * SpillAreaSizeBytes: areas 3 - 7 388 // * SpillAreaSizeBytes: areas 3 - 7
397 389
398 // Determine stack frame offsets for each Variable without a 390 // Determine stack frame offsets for each Variable without a register
399 // register assignment. This can be done as one variable per stack 391 // assignment. This can be done as one variable per stack slot. Or, do
400 // slot. Or, do coalescing by running the register allocator again 392 // coalescing by running the register allocator again with an infinite set of
401 // with an infinite set of registers (as a side effect, this gives 393 // registers (as a side effect, this gives variables a second chance at
402 // variables a second chance at physical register assignment). 394 // physical register assignment).
403 // 395 //
404 // A middle ground approach is to leverage sparsity and allocate one 396 // A middle ground approach is to leverage sparsity and allocate one block of
405 // block of space on the frame for globals (variables with 397 // space on the frame for globals (variables with multi-block lifetime), and
406 // multi-block lifetime), and one block to share for locals 398 // one block to share for locals (single-block lifetime).
407 // (single-block lifetime).
408 399
409 Context.init(Node); 400 Context.init(Node);
410 Context.setInsertPoint(Context.getCur()); 401 Context.setInsertPoint(Context.getCur());
411 402
412 llvm::SmallBitVector CalleeSaves = 403 llvm::SmallBitVector CalleeSaves =
413 getRegisterSet(RegSet_CalleeSave, RegSet_None); 404 getRegisterSet(RegSet_CalleeSave, RegSet_None);
414 RegsUsed = llvm::SmallBitVector(CalleeSaves.size()); 405 RegsUsed = llvm::SmallBitVector(CalleeSaves.size());
415 VarList SortedSpilledVariables, VariablesLinkedToSpillSlots; 406 VarList SortedSpilledVariables, VariablesLinkedToSpillSlots;
416 size_t GlobalsSize = 0; 407 size_t GlobalsSize = 0;
417 // If there is a separate locals area, this represents that area. 408 // If there is a separate locals area, this represents that area. Otherwise
418 // Otherwise it counts any variable not counted by GlobalsSize. 409 // it counts any variable not counted by GlobalsSize.
419 SpillAreaSizeBytes = 0; 410 SpillAreaSizeBytes = 0;
420 // If there is a separate locals area, this specifies the alignment 411 // If there is a separate locals area, this specifies the alignment for it.
421 // for it.
422 uint32_t LocalsSlotsAlignmentBytes = 0; 412 uint32_t LocalsSlotsAlignmentBytes = 0;
423 // The entire spill locations area gets aligned to largest natural 413 // The entire spill locations area gets aligned to largest natural alignment
424 // alignment of the variables that have a spill slot. 414 // of the variables that have a spill slot.
425 uint32_t SpillAreaAlignmentBytes = 0; 415 uint32_t SpillAreaAlignmentBytes = 0;
426 // A spill slot linked to a variable with a stack slot should reuse 416 // A spill slot linked to a variable with a stack slot should reuse that
427 // that stack slot. 417 // stack slot.
428 std::function<bool(Variable *)> TargetVarHook = 418 std::function<bool(Variable *)> TargetVarHook =
429 [&VariablesLinkedToSpillSlots](Variable *Var) { 419 [&VariablesLinkedToSpillSlots](Variable *Var) {
430 if (auto *SpillVar = 420 if (auto *SpillVar =
431 llvm::dyn_cast<typename Traits::SpillVariable>(Var)) { 421 llvm::dyn_cast<typename Traits::SpillVariable>(Var)) {
432 assert(Var->mustNotHaveReg()); 422 assert(Var->mustNotHaveReg());
433 if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) { 423 if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) {
434 VariablesLinkedToSpillSlots.push_back(Var); 424 VariablesLinkedToSpillSlots.push_back(Var);
435 return true; 425 return true;
436 } 426 }
437 } 427 }
(...skipping 21 matching lines...) Expand all
459 449
460 // Generate "push ebp; mov ebp, esp" 450 // Generate "push ebp; mov ebp, esp"
461 if (IsEbpBasedFrame) { 451 if (IsEbpBasedFrame) {
462 assert((RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None)) 452 assert((RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None))
463 .count() == 0); 453 .count() == 0);
464 PreservedRegsSizeBytes += typeWidthInBytes(IceType_i32); 454 PreservedRegsSizeBytes += typeWidthInBytes(IceType_i32);
465 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp); 455 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
466 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp); 456 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
467 _push(ebp); 457 _push(ebp);
468 _mov(ebp, esp); 458 _mov(ebp, esp);
469 // Keep ebp live for late-stage liveness analysis 459 // Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
470 // (e.g. asm-verbose mode).
471 Context.insert(InstFakeUse::create(Func, ebp)); 460 Context.insert(InstFakeUse::create(Func, ebp));
472 } 461 }
473 462
474 // Align the variables area. SpillAreaPaddingBytes is the size of 463 // Align the variables area. SpillAreaPaddingBytes is the size of the region
475 // the region after the preserved registers and before the spill areas. 464 // after the preserved registers and before the spill areas.
476 // LocalsSlotsPaddingBytes is the amount of padding between the globals 465 // LocalsSlotsPaddingBytes is the amount of padding between the globals and
477 // and locals area if they are separate. 466 // locals area if they are separate.
478 assert(SpillAreaAlignmentBytes <= Traits::X86_STACK_ALIGNMENT_BYTES); 467 assert(SpillAreaAlignmentBytes <= Traits::X86_STACK_ALIGNMENT_BYTES);
479 assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes); 468 assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes);
480 uint32_t SpillAreaPaddingBytes = 0; 469 uint32_t SpillAreaPaddingBytes = 0;
481 uint32_t LocalsSlotsPaddingBytes = 0; 470 uint32_t LocalsSlotsPaddingBytes = 0;
482 alignStackSpillAreas(Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes, 471 alignStackSpillAreas(Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes,
483 SpillAreaAlignmentBytes, GlobalsSize, 472 SpillAreaAlignmentBytes, GlobalsSize,
484 LocalsSlotsAlignmentBytes, &SpillAreaPaddingBytes, 473 LocalsSlotsAlignmentBytes, &SpillAreaPaddingBytes,
485 &LocalsSlotsPaddingBytes); 474 &LocalsSlotsPaddingBytes);
486 SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes; 475 SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes;
487 uint32_t GlobalsAndSubsequentPaddingSize = 476 uint32_t GlobalsAndSubsequentPaddingSize =
488 GlobalsSize + LocalsSlotsPaddingBytes; 477 GlobalsSize + LocalsSlotsPaddingBytes;
489 478
490 // Align esp if necessary. 479 // Align esp if necessary.
491 if (NeedsStackAlignment) { 480 if (NeedsStackAlignment) {
492 uint32_t StackOffset = 481 uint32_t StackOffset =
493 Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes; 482 Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes;
494 uint32_t StackSize = 483 uint32_t StackSize =
495 Traits::applyStackAlignment(StackOffset + SpillAreaSizeBytes); 484 Traits::applyStackAlignment(StackOffset + SpillAreaSizeBytes);
496 SpillAreaSizeBytes = StackSize - StackOffset; 485 SpillAreaSizeBytes = StackSize - StackOffset;
497 } 486 }
498 487
499 // Generate "sub esp, SpillAreaSizeBytes" 488 // Generate "sub esp, SpillAreaSizeBytes"
500 if (SpillAreaSizeBytes) 489 if (SpillAreaSizeBytes)
501 _sub(getPhysicalRegister(Traits::RegisterSet::Reg_esp), 490 _sub(getPhysicalRegister(Traits::RegisterSet::Reg_esp),
502 Ctx->getConstantInt32(SpillAreaSizeBytes)); 491 Ctx->getConstantInt32(SpillAreaSizeBytes));
503 Ctx->statsUpdateFrameBytes(SpillAreaSizeBytes); 492 Ctx->statsUpdateFrameBytes(SpillAreaSizeBytes);
504 493
505 resetStackAdjustment(); 494 resetStackAdjustment();
506 495
507 // Fill in stack offsets for stack args, and copy args into registers 496 // Fill in stack offsets for stack args, and copy args into registers for
508 // for those that were register-allocated. Args are pushed right to 497 // those that were register-allocated. Args are pushed right to left, so
509 // left, so Arg[0] is closest to the stack/frame pointer. 498 // Arg[0] is closest to the stack/frame pointer.
510 Variable *FramePtr = getPhysicalRegister(getFrameOrStackReg()); 499 Variable *FramePtr = getPhysicalRegister(getFrameOrStackReg());
511 size_t BasicFrameOffset = 500 size_t BasicFrameOffset =
512 PreservedRegsSizeBytes + Traits::X86_RET_IP_SIZE_BYTES; 501 PreservedRegsSizeBytes + Traits::X86_RET_IP_SIZE_BYTES;
513 if (!IsEbpBasedFrame) 502 if (!IsEbpBasedFrame)
514 BasicFrameOffset += SpillAreaSizeBytes; 503 BasicFrameOffset += SpillAreaSizeBytes;
515 504
516 const VarList &Args = Func->getArgs(); 505 const VarList &Args = Func->getArgs();
517 size_t InArgsSizeBytes = 0; 506 size_t InArgsSizeBytes = 0;
518 unsigned NumXmmArgs = 0; 507 unsigned NumXmmArgs = 0;
519 for (Variable *Arg : Args) { 508 for (Variable *Arg : Args) {
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
569 void TargetX8632::addEpilog(CfgNode *Node) { 558 void TargetX8632::addEpilog(CfgNode *Node) {
570 InstList &Insts = Node->getInsts(); 559 InstList &Insts = Node->getInsts();
571 InstList::reverse_iterator RI, E; 560 InstList::reverse_iterator RI, E;
572 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) { 561 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
573 if (llvm::isa<typename Traits::Insts::Ret>(*RI)) 562 if (llvm::isa<typename Traits::Insts::Ret>(*RI))
574 break; 563 break;
575 } 564 }
576 if (RI == E) 565 if (RI == E)
577 return; 566 return;
578 567
579 // Convert the reverse_iterator position into its corresponding 568 // Convert the reverse_iterator position into its corresponding (forward)
580 // (forward) iterator position. 569 // iterator position.
581 InstList::iterator InsertPoint = RI.base(); 570 InstList::iterator InsertPoint = RI.base();
582 --InsertPoint; 571 --InsertPoint;
583 Context.init(Node); 572 Context.init(Node);
584 Context.setInsertPoint(InsertPoint); 573 Context.setInsertPoint(InsertPoint);
585 574
586 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp); 575 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
587 if (IsEbpBasedFrame) { 576 if (IsEbpBasedFrame) {
588 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp); 577 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
589 // For late-stage liveness analysis (e.g. asm-verbose mode), 578 // For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake
590 // adding a fake use of esp before the assignment of esp=ebp keeps 579 // use of esp before the assignment of esp=ebp keeps previous esp
591 // previous esp adjustments from being dead-code eliminated. 580 // adjustments from being dead-code eliminated.
592 Context.insert(InstFakeUse::create(Func, esp)); 581 Context.insert(InstFakeUse::create(Func, esp));
593 _mov(esp, ebp); 582 _mov(esp, ebp);
594 _pop(ebp); 583 _pop(ebp);
595 } else { 584 } else {
596 // add esp, SpillAreaSizeBytes 585 // add esp, SpillAreaSizeBytes
597 if (SpillAreaSizeBytes) 586 if (SpillAreaSizeBytes)
598 _add(esp, Ctx->getConstantInt32(SpillAreaSizeBytes)); 587 _add(esp, Ctx->getConstantInt32(SpillAreaSizeBytes));
599 } 588 }
600 589
601 // Add pop instructions for preserved registers. 590 // Add pop instructions for preserved registers.
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
740 RPE_PooledConstantReordering, K); 729 RPE_PooledConstantReordering, K);
741 RandomShuffle(Pool.begin(), Pool.end(), 730 RandomShuffle(Pool.begin(), Pool.end(),
742 [&RNG](uint64_t N) { return (uint32_t)RNG.next(N); }); 731 [&RNG](uint64_t N) { return (uint32_t)RNG.next(N); });
743 } 732 }
744 733
745 for (Constant *C : Pool) { 734 for (Constant *C : Pool) {
746 if (!C->getShouldBePooled()) 735 if (!C->getShouldBePooled())
747 continue; 736 continue;
748 typename T::IceType *Const = llvm::cast<typename T::IceType>(C); 737 typename T::IceType *Const = llvm::cast<typename T::IceType>(C);
749 typename T::IceType::PrimType Value = Const->getValue(); 738 typename T::IceType::PrimType Value = Const->getValue();
750 // Use memcpy() to copy bits from Value into RawValue in a way 739 // Use memcpy() to copy bits from Value into RawValue in a way that avoids
751 // that avoids breaking strict-aliasing rules. 740 // breaking strict-aliasing rules.
752 typename T::PrimitiveIntType RawValue; 741 typename T::PrimitiveIntType RawValue;
753 memcpy(&RawValue, &Value, sizeof(Value)); 742 memcpy(&RawValue, &Value, sizeof(Value));
754 char buf[30]; 743 char buf[30];
755 int CharsPrinted = 744 int CharsPrinted =
756 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue); 745 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue);
757 assert(CharsPrinted >= 0 && 746 assert(CharsPrinted >= 0 &&
758 (size_t)CharsPrinted < llvm::array_lengthof(buf)); 747 (size_t)CharsPrinted < llvm::array_lengthof(buf));
759 (void)CharsPrinted; // avoid warnings if asserts are disabled 748 (void)CharsPrinted; // avoid warnings if asserts are disabled
760 Const->emitPoolLabel(Str); 749 Const->emitPoolLabel(Str);
761 Str << ":\n\t" << T::AsmTag << "\t" << buf << "\t# " << T::TypeName << " " 750 Str << ":\n\t" << T::AsmTag << "\t" << buf << "\t# " << T::TypeName << " "
762 << Value << "\n"; 751 << Value << "\n";
763 } 752 }
764 } 753 }
765 754
766 void TargetDataX8632::lowerConstants() { 755 void TargetDataX8632::lowerConstants() {
767 if (Ctx->getFlags().getDisableTranslation()) 756 if (Ctx->getFlags().getDisableTranslation())
768 return; 757 return;
769 // No need to emit constants from the int pool since (for x86) they 758 // No need to emit constants from the int pool since (for x86) they are
770 // are embedded as immediates in the instructions, just emit float/double. 759 // embedded as immediates in the instructions, just emit float/double.
771 switch (Ctx->getFlags().getOutFileType()) { 760 switch (Ctx->getFlags().getOutFileType()) {
772 case FT_Elf: { 761 case FT_Elf: {
773 ELFObjectWriter *Writer = Ctx->getObjectWriter(); 762 ELFObjectWriter *Writer = Ctx->getObjectWriter();
774 763
775 Writer->writeConstantPool<ConstantInteger32>(IceType_i8); 764 Writer->writeConstantPool<ConstantInteger32>(IceType_i8);
776 Writer->writeConstantPool<ConstantInteger32>(IceType_i16); 765 Writer->writeConstantPool<ConstantInteger32>(IceType_i16);
777 Writer->writeConstantPool<ConstantInteger32>(IceType_i32); 766 Writer->writeConstantPool<ConstantInteger32>(IceType_i32);
778 767
779 Writer->writeConstantPool<ConstantFloat>(IceType_f32); 768 Writer->writeConstantPool<ConstantFloat>(IceType_f32);
780 Writer->writeConstantPool<ConstantDouble>(IceType_f64); 769 Writer->writeConstantPool<ConstantDouble>(IceType_f64);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
839 emitGlobal(*Var, SectionSuffix); 828 emitGlobal(*Var, SectionSuffix);
840 } 829 }
841 } 830 }
842 } break; 831 } break;
843 } 832 }
844 } 833 }
845 834
846 TargetHeaderX8632::TargetHeaderX8632(GlobalContext *Ctx) 835 TargetHeaderX8632::TargetHeaderX8632(GlobalContext *Ctx)
847 : TargetHeaderLowering(Ctx) {} 836 : TargetHeaderLowering(Ctx) {}
848 837
849 // In some cases, there are x-macros tables for both high-level and 838 // In some cases, there are x-macros tables for both high-level and low-level
850 // low-level instructions/operands that use the same enum key value. 839 // instructions/operands that use the same enum key value. The tables are kept
851 // The tables are kept separate to maintain a proper separation 840 // separate to maintain a proper separation between abstraction layers. There
852 // between abstraction layers. There is a risk that the tables could 841 // is a risk that the tables could get out of sync if enum values are reordered
853 // get out of sync if enum values are reordered or if entries are 842 // or if entries are added or deleted. The following dummy namespaces use
854 // added or deleted. The following dummy namespaces use
855 // static_asserts to ensure everything is kept in sync. 843 // static_asserts to ensure everything is kept in sync.
856 844
857 namespace { 845 namespace {
858 // Validate the enum values in FCMPX8632_TABLE. 846 // Validate the enum values in FCMPX8632_TABLE.
859 namespace dummy1 { 847 namespace dummy1 {
860 // Define a temporary set of enum values based on low-level table 848 // Define a temporary set of enum values based on low-level table entries.
861 // entries.
862 enum _tmp_enum { 849 enum _tmp_enum {
863 #define X(val, dflt, swapS, C1, C2, swapV, pred) _tmp_##val, 850 #define X(val, dflt, swapS, C1, C2, swapV, pred) _tmp_##val,
864 FCMPX8632_TABLE 851 FCMPX8632_TABLE
865 #undef X 852 #undef X
866 _num 853 _num
867 }; 854 };
868 // Define a set of constants based on high-level table entries. 855 // Define a set of constants based on high-level table entries.
869 #define X(tag, str) static const int _table1_##tag = InstFcmp::tag; 856 #define X(tag, str) static const int _table1_##tag = InstFcmp::tag;
870 ICEINSTFCMP_TABLE 857 ICEINSTFCMP_TABLE
871 #undef X 858 #undef X
872 // Define a set of constants based on low-level table entries, and 859 // Define a set of constants based on low-level table entries, and ensure the
873 // ensure the table entry keys are consistent. 860 // table entry keys are consistent.
874 #define X(val, dflt, swapS, C1, C2, swapV, pred) \ 861 #define X(val, dflt, swapS, C1, C2, swapV, pred) \
875 static const int _table2_##val = _tmp_##val; \ 862 static const int _table2_##val = _tmp_##val; \
876 static_assert( \ 863 static_assert( \
877 _table1_##val == _table2_##val, \ 864 _table1_##val == _table2_##val, \
878 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE"); 865 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE");
879 FCMPX8632_TABLE 866 FCMPX8632_TABLE
880 #undef X 867 #undef X
881 // Repeat the static asserts with respect to the high-level table 868 // Repeat the static asserts with respect to the high-level table entries in
882 // entries in case the high-level table has extra entries. 869 // case the high-level table has extra entries.
883 #define X(tag, str) \ 870 #define X(tag, str) \
884 static_assert( \ 871 static_assert( \
885 _table1_##tag == _table2_##tag, \ 872 _table1_##tag == _table2_##tag, \
886 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE"); 873 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE");
887 ICEINSTFCMP_TABLE 874 ICEINSTFCMP_TABLE
888 #undef X 875 #undef X
889 } // end of namespace dummy1 876 } // end of namespace dummy1
890 877
891 // Validate the enum values in ICMPX8632_TABLE. 878 // Validate the enum values in ICMPX8632_TABLE.
892 namespace dummy2 { 879 namespace dummy2 {
893 // Define a temporary set of enum values based on low-level table 880 // Define a temporary set of enum values based on low-level table entries.
894 // entries.
895 enum _tmp_enum { 881 enum _tmp_enum {
896 #define X(val, C_32, C1_64, C2_64, C3_64) _tmp_##val, 882 #define X(val, C_32, C1_64, C2_64, C3_64) _tmp_##val,
897 ICMPX8632_TABLE 883 ICMPX8632_TABLE
898 #undef X 884 #undef X
899 _num 885 _num
900 }; 886 };
901 // Define a set of constants based on high-level table entries. 887 // Define a set of constants based on high-level table entries.
902 #define X(tag, str) static const int _table1_##tag = InstIcmp::tag; 888 #define X(tag, str) static const int _table1_##tag = InstIcmp::tag;
903 ICEINSTICMP_TABLE 889 ICEINSTICMP_TABLE
904 #undef X 890 #undef X
905 // Define a set of constants based on low-level table entries, and 891 // Define a set of constants based on low-level table entries, and ensure the
906 // ensure the table entry keys are consistent. 892 // table entry keys are consistent.
907 #define X(val, C_32, C1_64, C2_64, C3_64) \ 893 #define X(val, C_32, C1_64, C2_64, C3_64) \
908 static const int _table2_##val = _tmp_##val; \ 894 static const int _table2_##val = _tmp_##val; \
909 static_assert( \ 895 static_assert( \
910 _table1_##val == _table2_##val, \ 896 _table1_##val == _table2_##val, \
911 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE"); 897 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE");
912 ICMPX8632_TABLE 898 ICMPX8632_TABLE
913 #undef X 899 #undef X
914 // Repeat the static asserts with respect to the high-level table 900 // Repeat the static asserts with respect to the high-level table entries in
915 // entries in case the high-level table has extra entries. 901 // case the high-level table has extra entries.
916 #define X(tag, str) \ 902 #define X(tag, str) \
917 static_assert( \ 903 static_assert( \
918 _table1_##tag == _table2_##tag, \ 904 _table1_##tag == _table2_##tag, \
919 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE"); 905 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE");
920 ICEINSTICMP_TABLE 906 ICEINSTICMP_TABLE
921 #undef X 907 #undef X
922 } // end of namespace dummy2 908 } // end of namespace dummy2
923 909
924 // Validate the enum values in ICETYPEX8632_TABLE. 910 // Validate the enum values in ICETYPEX8632_TABLE.
925 namespace dummy3 { 911 namespace dummy3 {
926 // Define a temporary set of enum values based on low-level table 912 // Define a temporary set of enum values based on low-level table entries.
927 // entries.
928 enum _tmp_enum { 913 enum _tmp_enum {
929 #define X(tag, elementty, cvt, sdss, pack, width, fld) _tmp_##tag, 914 #define X(tag, elementty, cvt, sdss, pack, width, fld) _tmp_##tag,
930 ICETYPEX8632_TABLE 915 ICETYPEX8632_TABLE
931 #undef X 916 #undef X
932 _num 917 _num
933 }; 918 };
934 // Define a set of constants based on high-level table entries. 919 // Define a set of constants based on high-level table entries.
935 #define X(tag, sizeLog2, align, elts, elty, str) \ 920 #define X(tag, sizeLog2, align, elts, elty, str) \
936 static const int _table1_##tag = tag; 921 static const int _table1_##tag = tag;
937 ICETYPE_TABLE 922 ICETYPE_TABLE
938 #undef X 923 #undef X
939 // Define a set of constants based on low-level table entries, and 924 // Define a set of constants based on low-level table entries, and ensure the
940 // ensure the table entry keys are consistent. 925 // table entry keys are consistent.
941 #define X(tag, elementty, cvt, sdss, pack, width, fld) \ 926 #define X(tag, elementty, cvt, sdss, pack, width, fld) \
942 static const int _table2_##tag = _tmp_##tag; \ 927 static const int _table2_##tag = _tmp_##tag; \
943 static_assert(_table1_##tag == _table2_##tag, \ 928 static_assert(_table1_##tag == _table2_##tag, \
944 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE"); 929 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE");
945 ICETYPEX8632_TABLE 930 ICETYPEX8632_TABLE
946 #undef X 931 #undef X
947 // Repeat the static asserts with respect to the high-level table 932 // Repeat the static asserts with respect to the high-level table entries in
948 // entries in case the high-level table has extra entries. 933 // case the high-level table has extra entries.
949 #define X(tag, sizeLog2, align, elts, elty, str) \ 934 #define X(tag, sizeLog2, align, elts, elty, str) \
950 static_assert(_table1_##tag == _table2_##tag, \ 935 static_assert(_table1_##tag == _table2_##tag, \
951 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE"); 936 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE");
952 ICETYPE_TABLE 937 ICETYPE_TABLE
953 #undef X 938 #undef X
954 } // end of namespace dummy3 939 } // end of namespace dummy3
955 } // end of anonymous namespace 940 } // end of anonymous namespace
956 941
957 } // end of namespace Ice 942 } // end of namespace Ice
OLDNEW
« no previous file with comments | « src/IceTargetLoweringX8632.h ('k') | src/IceTargetLoweringX8632Traits.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698