Clang Project

clang_source_code/lib/CodeGen/CGStmtOpenMP.cpp
1//===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit OpenMP nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCleanup.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Stmt.h"
19#include "clang/AST/StmtOpenMP.h"
20#include "clang/AST/DeclOpenMP.h"
21using namespace clang;
22using namespace CodeGen;
23
24namespace {
25/// Lexical scope for OpenMP executable constructs, that handles correct codegen
26/// for captured expressions.
27class OMPLexicalScope : public CodeGenFunction::LexicalScope {
28  void emitPreInitStmt(CodeGenFunction &CGFconst OMPExecutableDirective &S) {
29    for (const auto *C : S.clauses()) {
30      if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
31        if (const auto *PreInit =
32                cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
33          for (const auto *I : PreInit->decls()) {
34            if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
35              CGF.EmitVarDecl(cast<VarDecl>(*I));
36            } else {
37              CodeGenFunction::AutoVarEmission Emission =
38                  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
39              CGF.EmitAutoVarCleanups(Emission);
40            }
41          }
42        }
43      }
44    }
45  }
46  CodeGenFunction::OMPPrivateScope InlinedShareds;
47
48  static bool isCapturedVar(CodeGenFunction &CGFconst VarDecl *VD) {
49    return CGF.LambdaCaptureFields.lookup(VD) ||
50           (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
51           (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
52  }
53
54public:
55  OMPLexicalScope(
56      CodeGenFunction &CGFconst OMPExecutableDirective &S,
57      const llvm::Optional<OpenMPDirectiveKindCapturedRegion = llvm::None,
58      const bool EmitPreInitStmt = true)
59      : CodeGenFunction::LexicalScope(CGFS.getSourceRange()),
60        InlinedShareds(CGF) {
61    if (EmitPreInitStmt)
62      emitPreInitStmt(CGFS);
63    if (!CapturedRegion.hasValue())
64      return;
65     (0) . __assert_fail ("S.hasAssociatedStmt() && \"Expected associated statement for inlined directive.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 66, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(S.hasAssociatedStmt() &&
66 (0) . __assert_fail ("S.hasAssociatedStmt() && \"Expected associated statement for inlined directive.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 66, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">           "Expected associated statement for inlined directive.");
67    const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
68    for (const auto &C : CS->captures()) {
69      if (C.capturesVariable() || C.capturesVariableByCopy()) {
70        auto *VD = C.getCapturedVar();
71         (0) . __assert_fail ("VD == VD->getCanonicalDecl() && \"Canonical decl must be captured.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 72, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(VD == VD->getCanonicalDecl() &&
72 (0) . __assert_fail ("VD == VD->getCanonicalDecl() && \"Canonical decl must be captured.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 72, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">               "Canonical decl must be captured.");
73        DeclRefExpr DRE(
74            CGF.getContext(), const_cast<VarDecl *>(VD),
75            isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
76                                       InlinedShareds.isGlobalVarCaptured(VD)),
77            VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
78        InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
79          return CGF.EmitLValue(&DRE).getAddress();
80        });
81      }
82    }
83    (void)InlinedShareds.Privatize();
84  }
85};
86
87/// Lexical scope for OpenMP parallel construct, that handles correct codegen
88/// for captured expressions.
89class OMPParallelScope final : public OMPLexicalScope {
90  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
91    OpenMPDirectiveKind Kind = S.getDirectiveKind();
92    return !(isOpenMPTargetExecutionDirective(Kind) ||
93             isOpenMPLoopBoundSharingDirective(Kind)) &&
94           isOpenMPParallelDirective(Kind);
95  }
96
97public:
98  OMPParallelScope(CodeGenFunction &CGFconst OMPExecutableDirective &S)
99      : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
100                        EmitPreInitStmt(S)) {}
101};
102
103/// Lexical scope for OpenMP teams construct, that handles correct codegen
104/// for captured expressions.
105class OMPTeamsScope final : public OMPLexicalScope {
106  bool EmitPreInitStmt(const OMPExecutableDirective &S) {
107    OpenMPDirectiveKind Kind = S.getDirectiveKind();
108    return !isOpenMPTargetExecutionDirective(Kind) &&
109           isOpenMPTeamsDirective(Kind);
110  }
111
112public:
113  OMPTeamsScope(CodeGenFunction &CGFconst OMPExecutableDirective &S)
114      : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
115                        EmitPreInitStmt(S)) {}
116};
117
118/// Private scope for OpenMP loop-based directives, that supports capturing
119/// of used expression from loop statement.
120class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
121  void emitPreInitStmt(CodeGenFunction &CGFconst OMPLoopDirective &S) {
122    CodeGenFunction::OMPMapVars PreCondVars;
123    for (const auto *E : S.counters()) {
124      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
125      (void)PreCondVars.setVarAddr(
126          CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
127    }
128    (void)PreCondVars.apply(CGF);
129    if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
130      for (const auto *I : PreInits->decls())
131        CGF.EmitVarDecl(cast<VarDecl>(*I));
132    }
133    PreCondVars.restore(CGF);
134  }
135
136public:
137  OMPLoopScope(CodeGenFunction &CGFconst OMPLoopDirective &S)
138      : CodeGenFunction::RunCleanupsScope(CGF) {
139    emitPreInitStmt(CGFS);
140  }
141};
142
143class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
144  CodeGenFunction::OMPPrivateScope InlinedShareds;
145
146  static bool isCapturedVar(CodeGenFunction &CGFconst VarDecl *VD) {
147    return CGF.LambdaCaptureFields.lookup(VD) ||
148           (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
149           (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
150            cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
151  }
152
153public:
154  OMPSimdLexicalScope(CodeGenFunction &CGFconst OMPExecutableDirective &S)
155      : CodeGenFunction::LexicalScope(CGFS.getSourceRange()),
156        InlinedShareds(CGF) {
157    for (const auto *C : S.clauses()) {
158      if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
159        if (const auto *PreInit =
160                cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
161          for (const auto *I : PreInit->decls()) {
162            if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
163              CGF.EmitVarDecl(cast<VarDecl>(*I));
164            } else {
165              CodeGenFunction::AutoVarEmission Emission =
166                  CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
167              CGF.EmitAutoVarCleanups(Emission);
168            }
169          }
170        }
171      } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
172        for (const Expr *E : UDP->varlists()) {
173          const Decl *D = cast<DeclRefExpr>(E)->getDecl();
174          if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
175            CGF.EmitVarDecl(*OED);
176        }
177      }
178    }
179    if (!isOpenMPSimdDirective(S.getDirectiveKind()))
180      CGF.EmitOMPPrivateClause(S, InlinedShareds);
181    if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
182      if (const Expr *E = TG->getReductionRef())
183        CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
184    }
185    const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
186    while (CS) {
187      for (auto &C : CS->captures()) {
188        if (C.capturesVariable() || C.capturesVariableByCopy()) {
189          auto *VD = C.getCapturedVar();
190           (0) . __assert_fail ("VD == VD->getCanonicalDecl() && \"Canonical decl must be captured.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 191, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(VD == VD->getCanonicalDecl() &&
191 (0) . __assert_fail ("VD == VD->getCanonicalDecl() && \"Canonical decl must be captured.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 191, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">                 "Canonical decl must be captured.");
192          DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
193                          isCapturedVar(CGF, VD) ||
194                              (CGF.CapturedStmtInfo &&
195                               InlinedShareds.isGlobalVarCaptured(VD)),
196                          VD->getType().getNonReferenceType(), VK_LValue,
197                          C.getLocation());
198          InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
199            return CGF.EmitLValue(&DRE).getAddress();
200          });
201        }
202      }
203      CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
204    }
205    (void)InlinedShareds.Privatize();
206  }
207};
208
209// namespace
210
211static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
212                                         const OMPExecutableDirective &S,
213                                         const RegionCodeGenTy &CodeGen);
214
215LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
216  if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
217    if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
218      OrigVD = OrigVD->getCanonicalDecl();
219      bool IsCaptured =
220          LambdaCaptureFields.lookup(OrigVD) ||
221          (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
222          (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
223      DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
224                      OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
225      return EmitLValue(&DRE);
226    }
227  }
228  return EmitLValue(E);
229}
230
231llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
232  ASTContext &C = getContext();
233  llvm::Value *Size = nullptr;
234  auto SizeInChars = C.getTypeSizeInChars(Ty);
235  if (SizeInChars.isZero()) {
236    // getTypeSizeInChars() returns 0 for a VLA.
237    while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
238      VlaSizePair VlaSize = getVLASize(VAT);
239      Ty = VlaSize.Type;
240      Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
241                  : VlaSize.NumElts;
242    }
243    SizeInChars = C.getTypeSizeInChars(Ty);
244    if (SizeInChars.isZero())
245      return llvm::ConstantInt::get(SizeTy, /*V=*/0);
246    return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
247  }
248  return CGM.getSize(SizeInChars);
249}
250
251void CodeGenFunction::GenerateOpenMPCapturedVars(
252    const CapturedStmt &SSmallVectorImpl<llvm::Value *> &CapturedVars) {
253  const RecordDecl *RD = S.getCapturedRecordDecl();
254  auto CurField = RD->field_begin();
255  auto CurCap = S.captures().begin();
256  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
257                                                 E = S.capture_init_end();
258       I != E; ++I, ++CurField, ++CurCap) {
259    if (CurField->hasCapturedVLAType()) {
260      const VariableArrayType *VAT = CurField->getCapturedVLAType();
261      llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
262      CapturedVars.push_back(Val);
263    } else if (CurCap->capturesThis()) {
264      CapturedVars.push_back(CXXThisValue);
265    } else if (CurCap->capturesVariableByCopy()) {
266      llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
267
268      // If the field is not a pointer, we need to save the actual value
269      // and load it as a void pointer.
270      if (!CurField->getType()->isAnyPointerType()) {
271        ASTContext &Ctx = getContext();
272        Address DstAddr = CreateMemTemp(
273            Ctx.getUIntPtrType(),
274            Twine(CurCap->getCapturedVar()->getName(), ".casted"));
275        LValue DstLV = MakeAddrLValue(DstAddrCtx.getUIntPtrType());
276
277        llvm::Value *SrcAddrVal = EmitScalarConversion(
278            DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
279            Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
280        LValue SrcLV =
281            MakeNaturalAlignAddrLValue(SrcAddrValCurField->getType());
282
283        // Store the value using the source type pointer.
284        EmitStoreThroughLValue(RValue::get(CV), SrcLV);
285
286        // Load the value using the destination type pointer.
287        CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
288      }
289      CapturedVars.push_back(CV);
290    } else {
291       (0) . __assert_fail ("CurCap->capturesVariable() && \"Expected capture by reference.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 291, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CurCap->capturesVariable() && "Expected capture by reference.");
292      CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
293    }
294  }
295}
296
297static Address castValueFromUintptr(CodeGenFunction &CGFSourceLocation Loc,
298                                    QualType DstTypeStringRef Name,
299                                    LValue AddrLV,
300                                    bool isReferenceType = false) {
301  ASTContext &Ctx = CGF.getContext();
302
303  llvm::Value *CastedPtr = CGF.EmitScalarConversion(
304      AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
305      Ctx.getPointerType(DstType), Loc);
306  Address TmpAddr =
307      CGF.MakeNaturalAlignAddrLValue(CastedPtrCtx.getPointerType(DstType))
308          .getAddress();
309
310  // If we are dealing with references we need to return the address of the
311  // reference instead of the reference of the value.
312  if (isReferenceType) {
313    QualType RefType = Ctx.getLValueReferenceType(DstType);
314    llvm::Value *RefVal = TmpAddr.getPointer();
315    TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name, ".ref"));
316    LValue TmpLVal = CGF.MakeAddrLValue(TmpAddrRefType);
317    CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal/*isInit=*/true);
318  }
319
320  return TmpAddr;
321}
322
323static QualType getCanonicalParamType(ASTContext &CQualType T) {
324  if (T->isLValueReferenceType())
325    return C.getLValueReferenceType(
326        getCanonicalParamType(CT.getNonReferenceType()),
327        /*SpelledAsLValue=*/false);
328  if (T->isPointerType())
329    return C.getPointerType(getCanonicalParamType(CT->getPointeeType()));
330  if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
331    if (const auto *VLA = dyn_cast<VariableArrayType>(A))
332      return getCanonicalParamType(C, VLA->getElementType());
333    if (!A->isVariablyModifiedType())
334      return C.getCanonicalType(T);
335  }
336  return C.getCanonicalParamType(T);
337}
338
339namespace {
340  /// Contains required data for proper outlined function codegen.
341  struct FunctionOptions {
342    /// Captured statement for which the function is generated.
343    const CapturedStmt *S = nullptr;
344    /// true if cast to/from  UIntPtr is required for variables captured by
345    /// value.
346    const bool UIntPtrCastRequired = true;
347    /// true if only casted arguments must be registered as local args or VLA
348    /// sizes.
349    const bool RegisterCastedArgsOnly = false;
350    /// Name of the generated function.
351    const StringRef FunctionName;
352    explicit FunctionOptions(const CapturedStmt *Sbool UIntPtrCastRequired,
353                             bool RegisterCastedArgsOnly,
354                             StringRef FunctionName)
355        : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
356          RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
357          FunctionName(FunctionName) {}
358  };
359}
360
361static llvm::Function *emitOutlinedFunctionPrologue(
362    CodeGenFunction &CGFFunctionArgList &Args,
363    llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
364        &LocalAddrs,
365    llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
366        &VLASizes,
367    llvm::Value *&CXXThisValueconst FunctionOptions &FO) {
368  const CapturedDecl *CD = FO.S->getCapturedDecl();
369  const RecordDecl *RD = FO.S->getCapturedRecordDecl();
370   (0) . __assert_fail ("CD->hasBody() && \"missing CapturedDecl body\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 370, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CD->hasBody() && "missing CapturedDecl body");
371
372  CXXThisValue = nullptr;
373  // Build the argument list.
374  CodeGenModule &CGM = CGF.CGM;
375  ASTContext &Ctx = CGM.getContext();
376  FunctionArgList TargetArgs;
377  Args.append(CD->param_begin(),
378              std::next(CD->param_begin(), CD->getContextParamPosition()));
379  TargetArgs.append(
380      CD->param_begin(),
381      std::next(CD->param_begin(), CD->getContextParamPosition()));
382  auto I = FO.S->captures().begin();
383  FunctionDecl *DebugFunctionDecl = nullptr;
384  if (!FO.UIntPtrCastRequired) {
385    FunctionProtoType::ExtProtoInfo EPI;
386    QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
387    DebugFunctionDecl = FunctionDecl::Create(
388        CtxCtx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
389        SourceLocation(), DeclarationName(), FunctionTy,
390        Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
391        /*isInlineSpecified=*/false/*hasWrittenPrototype=*/false);
392  }
393  for (const FieldDecl *FD : RD->fields()) {
394    QualType ArgType = FD->getType();
395    IdentifierInfo *II = nullptr;
396    VarDecl *CapVar = nullptr;
397
398    // If this is a capture by copy and the type is not a pointer, the outlined
399    // function argument type should be uintptr and the value properly casted to
400    // uintptr. This is necessary given that the runtime library is only able to
401    // deal with pointers. We can pass in the same way the VLA type sizes to the
402    // outlined function.
403    if (FO.UIntPtrCastRequired &&
404        ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
405         I->capturesVariableArrayType()))
406      ArgType = Ctx.getUIntPtrType();
407
408    if (I->capturesVariable() || I->capturesVariableByCopy()) {
409      CapVar = I->getCapturedVar();
410      II = CapVar->getIdentifier();
411    } else if (I->capturesThis()) {
412      II = &Ctx.Idents.get("this");
413    } else {
414      capturesVariableArrayType()", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 414, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(I->capturesVariableArrayType());
415      II = &Ctx.Idents.get("vla");
416    }
417    if (ArgType->isVariablyModifiedType())
418      ArgType = getCanonicalParamType(Ctx, ArgType);
419    VarDecl *Arg;
420    if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
421      Arg = ParmVarDecl::Create(
422          Ctx, DebugFunctionDecl,
423          CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
424          CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
425          /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
426    } else {
427      Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
428                                      II, ArgType, ImplicitParamDecl::Other);
429    }
430    Args.emplace_back(Arg);
431    // Do not cast arguments if we emit function with non-original types.
432    TargetArgs.emplace_back(
433        FO.UIntPtrCastRequired
434            ? Arg
435            : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
436    ++I;
437  }
438  Args.append(
439      std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
440      CD->param_end());
441  TargetArgs.append(
442      std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
443      CD->param_end());
444
445  // Create the function declaration.
446  const CGFunctionInfo &FuncInfo =
447      CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTyTargetArgs);
448  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
449
450  auto *F =
451      llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
452                             FO.FunctionName, &CGM.getModule());
453  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
454  if (CD->isNothrow())
455    F->setDoesNotThrow();
456  F->setDoesNotRecurse();
457
458  // Generate the function.
459  CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
460                    FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
461  unsigned Cnt = CD->getContextParamPosition();
462  I = FO.S->captures().begin();
463  for (const FieldDecl *FD : RD->fields()) {
464    // Do not map arguments if we emit function with non-original types.
465    Address LocalAddr(Address::invalid());
466    if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
467      LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
468                                                             TargetArgs[Cnt]);
469    } else {
470      LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
471    }
472    // If we are capturing a pointer by copy we don't need to do anything, just
473    // use the value that we get from the arguments.
474    if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
475      const VarDecl *CurVD = I->getCapturedVar();
476      // If the variable is a reference we need to materialize it here.
477      if (CurVD->getType()->isReferenceType()) {
478        Address RefAddr = CGF.CreateMemTemp(
479            CurVD->getType(), CGM.getPointerAlign(), ".materialized_ref");
480        CGF.EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr,
481                              /*Volatile=*/false, CurVD->getType());
482        LocalAddr = RefAddr;
483      }
484      if (!FO.RegisterCastedArgsOnly)
485        LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
486      ++Cnt;
487      ++I;
488      continue;
489    }
490
491    LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
492                                        AlignmentSource::Decl);
493    if (FD->hasCapturedVLAType()) {
494      if (FO.UIntPtrCastRequired) {
495        ArgLVal = CGF.MakeAddrLValue(
496            castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
497                                 Args[Cnt]->getName(), ArgLVal),
498            FD->getType(), AlignmentSource::Decl);
499      }
500      llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
501      const VariableArrayType *VAT = FD->getCapturedVLAType();
502      VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
503    } else if (I->capturesVariable()) {
504      const VarDecl *Var = I->getCapturedVar();
505      QualType VarTy = Var->getType();
506      Address ArgAddr = ArgLVal.getAddress();
507      if (!VarTy->isReferenceType()) {
508        if (ArgLVal.getType()->isLValueReferenceType()) {
509          ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
510        } else if (!VarTy->isVariablyModifiedType() ||
511                   !VarTy->isPointerType()) {
512          isPointerType()", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 512, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(ArgLVal.getType()->isPointerType());
513          ArgAddr = CGF.EmitLoadOfPointer(
514              ArgAddr, ArgLVal.getType()->castAs<PointerType>());
515        }
516      }
517      if (!FO.RegisterCastedArgsOnly) {
518        LocalAddrs.insert(
519            {Args[Cnt],
520             {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
521      }
522    } else if (I->capturesVariableByCopy()) {
523       (0) . __assert_fail ("!FD->getType()->isAnyPointerType() && \"Not expecting a captured pointer.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 524, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(!FD->getType()->isAnyPointerType() &&
524 (0) . __assert_fail ("!FD->getType()->isAnyPointerType() && \"Not expecting a captured pointer.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 524, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">             "Not expecting a captured pointer.");
525      const VarDecl *Var = I->getCapturedVar();
526      QualType VarTy = Var->getType();
527      LocalAddrs.insert(
528          {Args[Cnt],
529           {Var, FO.UIntPtrCastRequired
530                     ? castValueFromUintptr(CGF, I->getLocation(),
531                                            FD->getType(), Args[Cnt]->getName(),
532                                            ArgLVal, VarTy->isReferenceType())
533                     : ArgLVal.getAddress()}});
534    } else {
535      // If 'this' is captured, load it into CXXThisValue.
536      capturesThis()", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 536, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(I->capturesThis());
537      CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
538      LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
539    }
540    ++Cnt;
541    ++I;
542  }
543
544  return F;
545}
546
547llvm::Function *
548CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
549   (0) . __assert_fail ("CapturedStmtInfo && \"CapturedStmtInfo should be set when generating the captured function\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 551, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(
550 (0) . __assert_fail ("CapturedStmtInfo && \"CapturedStmtInfo should be set when generating the captured function\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 551, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">      CapturedStmtInfo &&
551 (0) . __assert_fail ("CapturedStmtInfo && \"CapturedStmtInfo should be set when generating the captured function\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 551, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">      "CapturedStmtInfo should be set when generating the captured function");
552  const CapturedDecl *CD = S.getCapturedDecl();
553  // Build the argument list.
554  bool NeedWrapperFunction =
555      getDebugInfo() &&
556      CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo;
557  FunctionArgList Args;
558  llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
559  llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
560  SmallString<256Buffer;
561  llvm::raw_svector_ostream Out(Buffer);
562  Out << CapturedStmtInfo->getHelperName();
563  if (NeedWrapperFunction)
564    Out << "_debug__";
565  FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
566                     Out.str());
567  llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
568                                                   VLASizes, CXXThisValue, FO);
569  for (const auto &LocalAddrPair : LocalAddrs) {
570    if (LocalAddrPair.second.first) {
571      setAddrOfLocalVar(LocalAddrPair.second.first,
572                        LocalAddrPair.second.second);
573    }
574  }
575  for (const auto &VLASizePair : VLASizes)
576    VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
577  PGO.assignRegionCounters(GlobalDecl(CD), F);
578  CapturedStmtInfo->EmitBody(*thisCD->getBody());
579  FinishFunction(CD->getBodyRBrace());
580  if (!NeedWrapperFunction)
581    return F;
582
583  FunctionOptions WrapperFO(&S/*UIntPtrCastRequired=*/true,
584                            /*RegisterCastedArgsOnly=*/true,
585                            CapturedStmtInfo->getHelperName());
586  CodeGenFunction WrapperCGF(CGM/*suppressNewContext=*/true);
587  WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
588  Args.clear();
589  LocalAddrs.clear();
590  VLASizes.clear();
591  llvm::Function *WrapperF =
592      emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
593                                   WrapperCGF.CXXThisValue, WrapperFO);
594  llvm::SmallVector<llvm::Value *, 4CallArgs;
595  for (const auto *Arg : Args) {
596    llvm::Value *CallArg;
597    auto I = LocalAddrs.find(Arg);
598    if (I != LocalAddrs.end()) {
599      LValue LV = WrapperCGF.MakeAddrLValue(
600          I->second.second,
601          I->second.first ? I->second.first->getType() : Arg->getType(),
602          AlignmentSource::Decl);
603      CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
604    } else {
605      auto EI = VLASizes.find(Arg);
606      if (EI != VLASizes.end()) {
607        CallArg = EI->second.second;
608      } else {
609        LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
610                                              Arg->getType(),
611                                              AlignmentSource::Decl);
612        CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
613      }
614    }
615    CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
616  }
617  CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
618                                                  F, CallArgs);
619  WrapperCGF.FinishFunction();
620  return WrapperF;
621}
622
623//===----------------------------------------------------------------------===//
624//                              OpenMP Directive Emission
625//===----------------------------------------------------------------------===//
626void CodeGenFunction::EmitOMPAggregateAssign(
627    Address DestAddrAddress SrcAddrQualType OriginalType,
628    const llvm::function_ref<void(Address, Address)> CopyGen) {
629  // Perform element-by-element initialization.
630  QualType ElementTy;
631
632  // Drill down to the base element type on both arrays.
633  const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
634  llvm::Value *NumElements = emitArrayLength(ArrayTyElementTyDestAddr);
635  SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
636
637  llvm::Value *SrcBegin = SrcAddr.getPointer();
638  llvm::Value *DestBegin = DestAddr.getPointer();
639  // Cast from pointer to array type to pointer to single element.
640  llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
641  // The basic structure here is a while-do loop.
642  llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
643  llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
644  llvm::Value *IsEmpty =
645      Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
646  Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
647
648  // Enter the loop body, making that address the current address.
649  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
650  EmitBlock(BodyBB);
651
652  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
653
654  llvm::PHINode *SrcElementPHI =
655    Builder.CreatePHI(SrcBegin->getType(), 2"omp.arraycpy.srcElementPast");
656  SrcElementPHI->addIncoming(SrcBegin, EntryBB);
657  Address SrcElementCurrent =
658      Address(SrcElementPHI,
659              SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
660
661  llvm::PHINode *DestElementPHI =
662    Builder.CreatePHI(DestBegin->getType(), 2"omp.arraycpy.destElementPast");
663  DestElementPHI->addIncoming(DestBegin, EntryBB);
664  Address DestElementCurrent =
665    Address(DestElementPHI,
666            DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
667
668  // Emit copy.
669  CopyGen(DestElementCurrent, SrcElementCurrent);
670
671  // Shift the address forward by one element.
672  llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
673      DestElementPHI, /*Idx0=*/1"omp.arraycpy.dest.element");
674  llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
675      SrcElementPHI, /*Idx0=*/1"omp.arraycpy.src.element");
676  // Check whether we've reached the end.
677  llvm::Value *Done =
678      Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
679  Builder.CreateCondBr(Done, DoneBB, BodyBB);
680  DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
681  SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
682
683  // Done.
684  EmitBlock(DoneBB/*IsFinished=*/true);
685}
686
687void CodeGenFunction::EmitOMPCopy(QualType OriginalTypeAddress DestAddr,
688                                  Address SrcAddrconst VarDecl *DestVD,
689                                  const VarDecl *SrcVDconst Expr *Copy) {
690  if (OriginalType->isArrayType()) {
691    const auto *BO = dyn_cast<BinaryOperator>(Copy);
692    if (BO && BO->getOpcode() == BO_Assign) {
693      // Perform simple memcpy for simple copying.
694      LValue Dest = MakeAddrLValue(DestAddrOriginalType);
695      LValue Src = MakeAddrLValue(SrcAddrOriginalType);
696      EmitAggregateAssign(DestSrcOriginalType);
697    } else {
698      // For arrays with complex element types perform element by element
699      // copying.
700      EmitOMPAggregateAssign(
701          DestAddr, SrcAddr, OriginalType,
702          [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
703            // Working with the single array element, so have to remap
704            // destination and source variables to corresponding array
705            // elements.
706            CodeGenFunction::OMPPrivateScope Remap(*this);
707            Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
708            Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
709            (void)Remap.Privatize();
710            EmitIgnoredExpr(Copy);
711          });
712    }
713  } else {
714    // Remap pseudo source variable to private copy.
715    CodeGenFunction::OMPPrivateScope Remap(*this);
716    Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
717    Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
718    (void)Remap.Privatize();
719    // Emit copying of the whole variable.
720    EmitIgnoredExpr(Copy);
721  }
722}
723
724bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
725                                                OMPPrivateScope &PrivateScope) {
726  if (!HaveInsertPoint())
727    return false;
728  bool DeviceConstTarget =
729      getLangOpts().OpenMPIsDevice &&
730      isOpenMPTargetExecutionDirective(D.getDirectiveKind());
731  bool FirstprivateIsLastprivate = false;
732  llvm::DenseSet<const VarDecl *> Lastprivates;
733  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
734    for (const auto *D : C->varlists())
735      Lastprivates.insert(
736          cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
737  }
738  llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
739  llvm::SmallVector<OpenMPDirectiveKind4CaptureRegions;
740  getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
741  // Force emission of the firstprivate copy if the directive does not emit
742  // outlined function, like omp for, omp simd, omp distribute etc.
743  bool MustEmitFirstprivateCopy =
744      CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
745  for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
746    auto IRef = C->varlist_begin();
747    auto InitsRef = C->inits().begin();
748    for (const Expr *IInit : C->private_copies()) {
749      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
750      bool ThisFirstprivateIsLastprivate =
751          Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
752      const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
753      if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
754          !FD->getType()->isReferenceType()) {
755        EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
756        ++IRef;
757        ++InitsRef;
758        continue;
759      }
760      // Do not emit copy for firstprivate constant variables in target regions,
761      // captured by reference.
762      if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
763          FD && FD->getType()->isReferenceType()) {
764        (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
765                                                                    OrigVD);
766        ++IRef;
767        ++InitsRef;
768        continue;
769      }
770      FirstprivateIsLastprivate =
771          FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
772      if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
773        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
774        const auto *VDInit =
775            cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
776        bool IsRegistered;
777        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
778                        /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
779                        (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
780        LValue OriginalLVal = EmitLValue(&DRE);
781        QualType Type = VD->getType();
782        if (Type->isArrayType()) {
783          // Emit VarDecl with copy init for arrays.
784          // Get the address of the original variable captured in current
785          // captured region.
786          IsRegistered = PrivateScope.addPrivate(
787              OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
788                AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
789                const Expr *Init = VD->getInit();
790                if (!isa<CXXConstructExpr>(Init) ||
791                    isTrivialInitializer(Init)) {
792                  // Perform simple memcpy.
793                  LValue Dest =
794                      MakeAddrLValue(Emission.getAllocatedAddress(), Type);
795                  EmitAggregateAssign(Dest, OriginalLVal, Type);
796                } else {
797                  EmitOMPAggregateAssign(
798                      Emission.getAllocatedAddress(), OriginalLVal.getAddress(),
799                      Type,
800                      [this, VDInit, Init](Address DestElement,
801                                           Address SrcElement) {
802                        // Clean up any temporaries needed by the
803                        // initialization.
804                        RunCleanupsScope InitScope(*this);
805                        // Emit initialization for single element.
806                        setAddrOfLocalVar(VDInit, SrcElement);
807                        EmitAnyExprToMem(Init, DestElement,
808                                         Init->getType().getQualifiers(),
809                                         /*IsInitializer*/ false);
810                        LocalDeclMap.erase(VDInit);
811                      });
812                }
813                EmitAutoVarCleanups(Emission);
814                return Emission.getAllocatedAddress();
815              });
816        } else {
817          Address OriginalAddr = OriginalLVal.getAddress();
818          IsRegistered = PrivateScope.addPrivate(
819              OrigVD, [this, VDInit, OriginalAddr, VD]() {
820                // Emit private VarDecl with copy init.
821                // Remap temp VDInit variable to the address of the original
822                // variable (for proper handling of captured global variables).
823                setAddrOfLocalVar(VDInit, OriginalAddr);
824                EmitDecl(*VD);
825                LocalDeclMap.erase(VDInit);
826                return GetAddrOfLocalVar(VD);
827              });
828        }
829         (0) . __assert_fail ("IsRegistered && \"firstprivate var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 830, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered &&
830 (0) . __assert_fail ("IsRegistered && \"firstprivate var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 830, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">               "firstprivate var already registered as private");
831        // Silence the warning about unused variable.
832        (void)IsRegistered;
833      }
834      ++IRef;
835      ++InitsRef;
836    }
837  }
838  return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
839}
840
841void CodeGenFunction::EmitOMPPrivateClause(
842    const OMPExecutableDirective &D,
843    CodeGenFunction::OMPPrivateScope &PrivateScope) {
844  if (!HaveInsertPoint())
845    return;
846  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
847  for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
848    auto IRef = C->varlist_begin();
849    for (const Expr *IInit : C->private_copies()) {
850      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
851      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
852        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
853        bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
854          // Emit private VarDecl with copy init.
855          EmitDecl(*VD);
856          return GetAddrOfLocalVar(VD);
857        });
858         (0) . __assert_fail ("IsRegistered && \"private var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 858, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered && "private var already registered as private");
859        // Silence the warning about unused variable.
860        (void)IsRegistered;
861      }
862      ++IRef;
863    }
864  }
865}
866
867bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
868  if (!HaveInsertPoint())
869    return false;
870  // threadprivate_var1 = master_threadprivate_var1;
871  // operator=(threadprivate_var2, master_threadprivate_var2);
872  // ...
873  // __kmpc_barrier(&loc, global_tid);
874  llvm::DenseSet<const VarDecl *> CopiedVars;
875  llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
876  for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
877    auto IRef = C->varlist_begin();
878    auto ISrcRef = C->source_exprs().begin();
879    auto IDestRef = C->destination_exprs().begin();
880    for (const Expr *AssignOp : C->assignment_ops()) {
881      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
882      QualType Type = VD->getType();
883      if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
884        // Get the address of the master variable. If we are emitting code with
885        // TLS support, the address is passed from the master as field in the
886        // captured declaration.
887        Address MasterAddr = Address::invalid();
888        if (getLangOpts().OpenMPUseTLS &&
889            getContext().getTargetInfo().isTLSSupported()) {
890           (0) . __assert_fail ("CapturedStmtInfo->lookup(VD) && \"Copyin threadprivates should have been captured!\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 891, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CapturedStmtInfo->lookup(VD) &&
891 (0) . __assert_fail ("CapturedStmtInfo->lookup(VD) && \"Copyin threadprivates should have been captured!\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 891, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">                 "Copyin threadprivates should have been captured!");
892          DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
893                          (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
894          MasterAddr = EmitLValue(&DRE).getAddress();
895          LocalDeclMap.erase(VD);
896        } else {
897          MasterAddr =
898            Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
899                                        : CGM.GetAddrOfGlobal(VD),
900                    getContext().getDeclAlign(VD));
901        }
902        // Get the address of the threadprivate variable.
903        Address PrivateAddr = EmitLValue(*IRef).getAddress();
904        if (CopiedVars.size() == 1) {
905          // At first check if current thread is a master thread. If it is, no
906          // need to copy data.
907          CopyBegin = createBasicBlock("copyin.not.master");
908          CopyEnd = createBasicBlock("copyin.not.master.end");
909          Builder.CreateCondBr(
910              Builder.CreateICmpNE(
911                  Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
912                  Builder.CreatePtrToInt(PrivateAddr.getPointer(),
913                                         CGM.IntPtrTy)),
914              CopyBegin, CopyEnd);
915          EmitBlock(CopyBegin);
916        }
917        const auto *SrcVD =
918            cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
919        const auto *DestVD =
920            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
921        EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
922      }
923      ++IRef;
924      ++ISrcRef;
925      ++IDestRef;
926    }
927  }
928  if (CopyEnd) {
929    // Exit out of copying procedure for non-master thread.
930    EmitBlock(CopyEnd/*IsFinished=*/true);
931    return true;
932  }
933  return false;
934}
935
936bool CodeGenFunction::EmitOMPLastprivateClauseInit(
937    const OMPExecutableDirective &DOMPPrivateScope &PrivateScope) {
938  if (!HaveInsertPoint())
939    return false;
940  bool HasAtLeastOneLastprivate = false;
941  llvm::DenseSet<const VarDecl *> SIMDLCVs;
942  if (isOpenMPSimdDirective(D.getDirectiveKind())) {
943    const auto *LoopDirective = cast<OMPLoopDirective>(&D);
944    for (const Expr *C : LoopDirective->counters()) {
945      SIMDLCVs.insert(
946          cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
947    }
948  }
949  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
950  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
951    HasAtLeastOneLastprivate = true;
952    if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
953        !getLangOpts().OpenMPSimd)
954      break;
955    auto IRef = C->varlist_begin();
956    auto IDestRef = C->destination_exprs().begin();
957    for (const Expr *IInit : C->private_copies()) {
958      // Keep the address of the original variable for future update at the end
959      // of the loop.
960      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
961      // Taskloops do not require additional initialization, it is done in
962      // runtime support library.
963      if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
964        const auto *DestVD =
965            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
966        PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
967          DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
968                          /*RefersToEnclosingVariableOrCapture=*/
969                              CapturedStmtInfo->lookup(OrigVD) != nullptr,
970                          (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
971          return EmitLValue(&DRE).getAddress();
972        });
973        // Check if the variable is also a firstprivate: in this case IInit is
974        // not generated. Initialization of this variable will happen in codegen
975        // for 'firstprivate' clause.
976        if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
977          const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
978          bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
979            // Emit private VarDecl with copy init.
980            EmitDecl(*VD);
981            return GetAddrOfLocalVar(VD);
982          });
983           (0) . __assert_fail ("IsRegistered && \"lastprivate var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 984, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered &&
984 (0) . __assert_fail ("IsRegistered && \"lastprivate var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 984, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">                 "lastprivate var already registered as private");
985          (void)IsRegistered;
986        }
987      }
988      ++IRef;
989      ++IDestRef;
990    }
991  }
992  return HasAtLeastOneLastprivate;
993}
994
995void CodeGenFunction::EmitOMPLastprivateClauseFinal(
996    const OMPExecutableDirective &Dbool NoFinals,
997    llvm::Value *IsLastIterCond) {
998  if (!HaveInsertPoint())
999    return;
1000  // Emit following code:
1001  // if (<IsLastIterCond>) {
1002  //   orig_var1 = private_orig_var1;
1003  //   ...
1004  //   orig_varn = private_orig_varn;
1005  // }
1006  llvm::BasicBlock *ThenBB = nullptr;
1007  llvm::BasicBlock *DoneBB = nullptr;
1008  if (IsLastIterCond) {
1009    ThenBB = createBasicBlock(".omp.lastprivate.then");
1010    DoneBB = createBasicBlock(".omp.lastprivate.done");
1011    Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1012    EmitBlock(ThenBB);
1013  }
1014  llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1015  llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1016  if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1017    auto IC = LoopDirective->counters().begin();
1018    for (const Expr *F : LoopDirective->finals()) {
1019      const auto *D =
1020          cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1021      if (NoFinals)
1022        AlreadyEmittedVars.insert(D);
1023      else
1024        LoopCountersAndUpdates[D] = F;
1025      ++IC;
1026    }
1027  }
1028  for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1029    auto IRef = C->varlist_begin();
1030    auto ISrcRef = C->source_exprs().begin();
1031    auto IDestRef = C->destination_exprs().begin();
1032    for (const Expr *AssignOp : C->assignment_ops()) {
1033      const auto *PrivateVD =
1034          cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1035      QualType Type = PrivateVD->getType();
1036      const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1037      if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1038        // If lastprivate variable is a loop control variable for loop-based
1039        // directive, update its value before copyin back to original
1040        // variable.
1041        if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1042          EmitIgnoredExpr(FinalExpr);
1043        const auto *SrcVD =
1044            cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1045        const auto *DestVD =
1046            cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1047        // Get the address of the original variable.
1048        Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1049        // Get the address of the private variable.
1050        Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1051        if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1052          PrivateAddr =
1053              Address(Builder.CreateLoad(PrivateAddr),
1054                      getNaturalTypeAlignment(RefTy->getPointeeType()));
1055        EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1056      }
1057      ++IRef;
1058      ++ISrcRef;
1059      ++IDestRef;
1060    }
1061    if (const Expr *PostUpdate = C->getPostUpdateExpr())
1062      EmitIgnoredExpr(PostUpdate);
1063  }
1064  if (IsLastIterCond)
1065    EmitBlock(DoneBB/*IsFinished=*/true);
1066}
1067
1068void CodeGenFunction::EmitOMPReductionClauseInit(
1069    const OMPExecutableDirective &D,
1070    CodeGenFunction::OMPPrivateScope &PrivateScope) {
1071  if (!HaveInsertPoint())
1072    return;
1073  SmallVector<const Expr *, 4Shareds;
1074  SmallVector<const Expr *, 4Privates;
1075  SmallVector<const Expr *, 4ReductionOps;
1076  SmallVector<const Expr *, 4LHSs;
1077  SmallVector<const Expr *, 4RHSs;
1078  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1079    auto IPriv = C->privates().begin();
1080    auto IRed = C->reduction_ops().begin();
1081    auto ILHS = C->lhs_exprs().begin();
1082    auto IRHS = C->rhs_exprs().begin();
1083    for (const Expr *Ref : C->varlists()) {
1084      Shareds.emplace_back(Ref);
1085      Privates.emplace_back(*IPriv);
1086      ReductionOps.emplace_back(*IRed);
1087      LHSs.emplace_back(*ILHS);
1088      RHSs.emplace_back(*IRHS);
1089      std::advance(IPriv, 1);
1090      std::advance(IRed, 1);
1091      std::advance(ILHS, 1);
1092      std::advance(IRHS, 1);
1093    }
1094  }
1095  ReductionCodeGen RedCG(Shareds, Privates, ReductionOps);
1096  unsigned Count = 0;
1097  auto ILHS = LHSs.begin();
1098  auto IRHS = RHSs.begin();
1099  auto IPriv = Privates.begin();
1100  for (const Expr *IRef : Shareds) {
1101    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1102    // Emit private VarDecl with reduction init.
1103    RedCG.emitSharedLValue(*this, Count);
1104    RedCG.emitAggregateType(*this, Count);
1105    AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1106    RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1107                             RedCG.getSharedLValue(Count),
1108                             [&Emission](CodeGenFunction &CGF) {
1109                               CGF.EmitAutoVarInit(Emission);
1110                               return true;
1111                             });
1112    EmitAutoVarCleanups(Emission);
1113    Address BaseAddr = RedCG.adjustPrivateAddress(
1114        *this, Count, Emission.getAllocatedAddress());
1115    bool IsRegistered = PrivateScope.addPrivate(
1116        RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
1117     (0) . __assert_fail ("IsRegistered && \"private var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1117, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered && "private var already registered as private");
1118    // Silence the warning about unused variable.
1119    (void)IsRegistered;
1120
1121    const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1122    const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1123    QualType Type = PrivateVD->getType();
1124    bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1125    if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1126      // Store the address of the original variable associated with the LHS
1127      // implicit variable.
1128      PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
1129        return RedCG.getSharedLValue(Count).getAddress();
1130      });
1131      PrivateScope.addPrivate(
1132          RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
1133    } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1134               isa<ArraySubscriptExpr>(IRef)) {
1135      // Store the address of the original variable associated with the LHS
1136      // implicit variable.
1137      PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
1138        return RedCG.getSharedLValue(Count).getAddress();
1139      });
1140      PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
1141        return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
1142                                            ConvertTypeForMem(RHSVD->getType()),
1143                                            "rhs.begin");
1144      });
1145    } else {
1146      QualType Type = PrivateVD->getType();
1147      bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1148      Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
1149      // Store the address of the original variable associated with the LHS
1150      // implicit variable.
1151      if (IsArray) {
1152        OriginalAddr = Builder.CreateElementBitCast(
1153            OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1154      }
1155      PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
1156      PrivateScope.addPrivate(
1157          RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
1158            return IsArray
1159                       ? Builder.CreateElementBitCast(
1160                             GetAddrOfLocalVar(PrivateVD),
1161                             ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1162                       : GetAddrOfLocalVar(PrivateVD);
1163          });
1164    }
1165    ++ILHS;
1166    ++IRHS;
1167    ++IPriv;
1168    ++Count;
1169  }
1170}
1171
1172void CodeGenFunction::EmitOMPReductionClauseFinal(
1173    const OMPExecutableDirective &Dconst OpenMPDirectiveKind ReductionKind) {
1174  if (!HaveInsertPoint())
1175    return;
1176  llvm::SmallVector<const Expr *, 8Privates;
1177  llvm::SmallVector<const Expr *, 8LHSExprs;
1178  llvm::SmallVector<const Expr *, 8RHSExprs;
1179  llvm::SmallVector<const Expr *, 8ReductionOps;
1180  bool HasAtLeastOneReduction = false;
1181  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1182    HasAtLeastOneReduction = true;
1183    Privates.append(C->privates().begin(), C->privates().end());
1184    LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1185    RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1186    ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1187  }
1188  if (HasAtLeastOneReduction) {
1189    bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1190                      isOpenMPParallelDirective(D.getDirectiveKind()) ||
1191                      ReductionKind == OMPD_simd;
1192    bool SimpleReduction = ReductionKind == OMPD_simd;
1193    // Emit nowait reduction if nowait clause is present or directive is a
1194    // parallel directive (it always has implicit barrier).
1195    CGM.getOpenMPRuntime().emitReduction(
1196        *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1197        {WithNowait, SimpleReduction, ReductionKind});
1198  }
1199}
1200
1201static void emitPostUpdateForReductionClause(
1202    CodeGenFunction &CGFconst OMPExecutableDirective &D,
1203    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1204  if (!CGF.HaveInsertPoint())
1205    return;
1206  llvm::BasicBlock *DoneBB = nullptr;
1207  for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1208    if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1209      if (!DoneBB) {
1210        if (llvm::Value *Cond = CondGen(CGF)) {
1211          // If the first post-update expression is found, emit conditional
1212          // block if it was requested.
1213          llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1214          DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1215          CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1216          CGF.EmitBlock(ThenBB);
1217        }
1218      }
1219      CGF.EmitIgnoredExpr(PostUpdate);
1220    }
1221  }
1222  if (DoneBB)
1223    CGF.EmitBlock(DoneBB/*IsFinished=*/true);
1224}
1225
1226namespace {
1227/// Codegen lambda for appending distribute lower and upper bounds to outlined
1228/// parallel function. This is necessary for combined constructs such as
1229/// 'distribute parallel for'
1230typedef llvm::function_ref<void(CodeGenFunction &,
1231                                const OMPExecutableDirective &,
1232                                llvm::SmallVectorImpl<llvm::Value *> &)>
1233    CodeGenBoundParametersTy;
1234// anonymous namespace
1235
1236static void emitCommonOMPParallelDirective(
1237    CodeGenFunction &CGFconst OMPExecutableDirective &S,
1238    OpenMPDirectiveKind InnermostKindconst RegionCodeGenTy &CodeGen,
1239    const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1240  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1241  llvm::Function *OutlinedFn =
1242      CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1243          S, *CS->getCapturedDecl()->param_begin(), InnermostKindCodeGen);
1244  if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1245    CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1246    llvm::Value *NumThreads =
1247        CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1248                           /*IgnoreResultAssign=*/true);
1249    CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1250        CGFNumThreadsNumThreadsClause->getBeginLoc());
1251  }
1252  if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1253    CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1254    CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1255        CGFProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1256  }
1257  const Expr *IfCond = nullptr;
1258  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1259    if (C->getNameModifier() == OMPD_unknown ||
1260        C->getNameModifier() == OMPD_parallel) {
1261      IfCond = C->getCondition();
1262      break;
1263    }
1264  }
1265
1266  OMPParallelScope Scope(CGFS);
1267  llvm::SmallVector<llvm::Value *, 16CapturedVars;
1268  // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1269  // lower and upper bounds with the pragma 'for' chunking mechanism.
1270  // The following lambda takes care of appending the lower and upper bound
1271  // parameters when necessary
1272  CodeGenBoundParameters(CGF, S, CapturedVars);
1273  CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1274  CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1275                                              CapturedVars, IfCond);
1276}
1277
1278static void emitEmptyBoundParameters(CodeGenFunction &,
1279                                     const OMPExecutableDirective &,
1280                                     llvm::SmallVectorImpl<llvm::Value *> &) {}
1281
1282void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1283  // Emit parallel region as a standalone region.
1284  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
1285    Action.Enter(CGF);
1286    OMPPrivateScope PrivateScope(CGF);
1287    bool Copyins = CGF.EmitOMPCopyinClause(S);
1288    (void)CGF.EmitOMPFirstprivateClause(SPrivateScope);
1289    if (Copyins) {
1290      // Emit implicit barrier to synchronize threads and avoid data races on
1291      // propagation master's thread values of threadprivate variables to local
1292      // instances of that variables of all other implicit threads.
1293      CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1294          CGFS.getBeginLoc(), OMPD_unknown/*EmitChecks=*/false,
1295          /*ForceSimpleCall=*/true);
1296    }
1297    CGF.EmitOMPPrivateClause(SPrivateScope);
1298    CGF.EmitOMPReductionClauseInit(SPrivateScope);
1299    (void)PrivateScope.Privatize();
1300    CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1301    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_parallel);
1302  };
1303  emitCommonOMPParallelDirective(*thisSOMPD_parallelCodeGen,
1304                                 emitEmptyBoundParameters);
1305  emitPostUpdateForReductionClause(*thisS,
1306                                   [](CodeGenFunction &) { return nullptr; });
1307}
1308
1309void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1310                                      JumpDest LoopExit) {
1311  RunCleanupsScope BodyScope(*this);
1312  // Update counters values on current iteration.
1313  for (const Expr *UE : D.updates())
1314    EmitIgnoredExpr(UE);
1315  // Update the linear variables.
1316  // In distribute directives only loop counters may be marked as linear, no
1317  // need to generate the code for them.
1318  if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1319    for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1320      for (const Expr *UE : C->updates())
1321        EmitIgnoredExpr(UE);
1322    }
1323  }
1324
1325  // On a continue in the body, jump to the end.
1326  JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1327  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1328  // Emit loop body.
1329  EmitStmt(D.getBody());
1330  // The end (updates/cleanups).
1331  EmitBlock(Continue.getBlock());
1332  BreakContinueStack.pop_back();
1333}
1334
1335void CodeGenFunction::EmitOMPInnerLoop(
1336    const Stmt &Sbool RequiresCleanupconst Expr *LoopCond,
1337    const Expr *IncExpr,
1338    const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
1339    const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
1340  auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1341
1342  // Start the loop with a block that tests the condition.
1343  auto CondBlock = createBasicBlock("omp.inner.for.cond");
1344  EmitBlock(CondBlock);
1345  const SourceRange R = S.getSourceRange();
1346  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1347                 SourceLocToDebugLoc(R.getEnd()));
1348
1349  // If there are any cleanups between here and the loop-exit scope,
1350  // create a block to stage a loop exit along.
1351  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1352  if (RequiresCleanup)
1353    ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1354
1355  llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
1356
1357  // Emit condition.
1358  EmitBranchOnBoolExpr(LoopCondLoopBodyExitBlockgetProfileCount(&S));
1359  if (ExitBlock != LoopExit.getBlock()) {
1360    EmitBlock(ExitBlock);
1361    EmitBranchThroughCleanup(LoopExit);
1362  }
1363
1364  EmitBlock(LoopBody);
1365  incrementProfileCounter(&S);
1366
1367  // Create a block for the increment.
1368  JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1369  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1370
1371  BodyGen(*this);
1372
1373  // Emit "IV = IV + 1" and a back-edge to the condition block.
1374  EmitBlock(Continue.getBlock());
1375  EmitIgnoredExpr(IncExpr);
1376  PostIncGen(*this);
1377  BreakContinueStack.pop_back();
1378  EmitBranch(CondBlock);
1379  LoopStack.pop();
1380  // Emit the fall-through block.
1381  EmitBlock(LoopExit.getBlock());
1382}
1383
1384bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1385  if (!HaveInsertPoint())
1386    return false;
1387  // Emit inits for the linear variables.
1388  bool HasLinears = false;
1389  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1390    for (const Expr *Init : C->inits()) {
1391      HasLinears = true;
1392      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1393      if (const auto *Ref =
1394              dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1395        AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1396        const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1397        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1398                        CapturedStmtInfo->lookup(OrigVD) != nullptr,
1399                        VD->getInit()->getType(), VK_LValue,
1400                        VD->getInit()->getExprLoc());
1401        EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1402                                                VD->getType()),
1403                       /*capturedByInit=*/false);
1404        EmitAutoVarCleanups(Emission);
1405      } else {
1406        EmitVarDecl(*VD);
1407      }
1408    }
1409    // Emit the linear steps for the linear clauses.
1410    // If a step is not constant, it is pre-calculated before the loop.
1411    if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1412      if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1413        EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1414        // Emit calculation of the linear step.
1415        EmitIgnoredExpr(CS);
1416      }
1417  }
1418  return HasLinears;
1419}
1420
1421void CodeGenFunction::EmitOMPLinearClauseFinal(
1422    const OMPLoopDirective &D,
1423    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1424  if (!HaveInsertPoint())
1425    return;
1426  llvm::BasicBlock *DoneBB = nullptr;
1427  // Emit the final values of the linear variables.
1428  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1429    auto IC = C->varlist_begin();
1430    for (const Expr *F : C->finals()) {
1431      if (!DoneBB) {
1432        if (llvm::Value *Cond = CondGen(*this)) {
1433          // If the first post-update expression is found, emit conditional
1434          // block if it was requested.
1435          llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
1436          DoneBB = createBasicBlock(".omp.linear.pu.done");
1437          Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1438          EmitBlock(ThenBB);
1439        }
1440      }
1441      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1442      DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1443                      CapturedStmtInfo->lookup(OrigVD) != nullptr,
1444                      (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1445      Address OrigAddr = EmitLValue(&DRE).getAddress();
1446      CodeGenFunction::OMPPrivateScope VarScope(*this);
1447      VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1448      (void)VarScope.Privatize();
1449      EmitIgnoredExpr(F);
1450      ++IC;
1451    }
1452    if (const Expr *PostUpdate = C->getPostUpdateExpr())
1453      EmitIgnoredExpr(PostUpdate);
1454  }
1455  if (DoneBB)
1456    EmitBlock(DoneBB/*IsFinished=*/true);
1457}
1458
1459static void emitAlignedClause(CodeGenFunction &CGF,
1460                              const OMPExecutableDirective &D) {
1461  if (!CGF.HaveInsertPoint())
1462    return;
1463  for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1464    unsigned ClauseAlignment = 0;
1465    if (const Expr *AlignmentExpr = Clause->getAlignment()) {
1466      auto *AlignmentCI =
1467          cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1468      ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1469    }
1470    for (const Expr *E : Clause->varlists()) {
1471      unsigned Alignment = ClauseAlignment;
1472      if (Alignment == 0) {
1473        // OpenMP [2.8.1, Description]
1474        // If no optional parameter is specified, implementation-defined default
1475        // alignments for SIMD instructions on the target platforms are assumed.
1476        Alignment =
1477            CGF.getContext()
1478                .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1479                    E->getType()->getPointeeType()))
1480                .getQuantity();
1481      }
1482       (0) . __assert_fail ("(Alignment == 0 || llvm..isPowerOf2_32(Alignment)) && \"alignment is not power of 2\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1483, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1483 (0) . __assert_fail ("(Alignment == 0 || llvm..isPowerOf2_32(Alignment)) && \"alignment is not power of 2\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1483, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">             "alignment is not power of 2");
1484      if (Alignment != 0) {
1485        llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1486        CGF.EmitAlignmentAssumption(
1487            PtrValue, E, /*No second loc needed*/ SourceLocation(), Alignment);
1488      }
1489    }
1490  }
1491}
1492
1493void CodeGenFunction::EmitOMPPrivateLoopCounters(
1494    const OMPLoopDirective &SCodeGenFunction::OMPPrivateScope &LoopScope) {
1495  if (!HaveInsertPoint())
1496    return;
1497  auto I = S.private_counters().begin();
1498  for (const Expr *E : S.counters()) {
1499    const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1500    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1501    // Emit var without initialization.
1502    AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
1503    EmitAutoVarCleanups(VarEmission);
1504    LocalDeclMap.erase(PrivateVD);
1505    (void)LoopScope.addPrivate(VD, [&VarEmission]() {
1506      return VarEmission.getAllocatedAddress();
1507    });
1508    if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1509        VD->hasGlobalStorage()) {
1510      (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
1511        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
1512                        LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1513                        E->getType(), VK_LValue, E->getExprLoc());
1514        return EmitLValue(&DRE).getAddress();
1515      });
1516    } else {
1517      (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
1518        return VarEmission.getAllocatedAddress();
1519      });
1520    }
1521    ++I;
1522  }
1523  // Privatize extra loop counters used in loops for ordered(n) clauses.
1524  for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
1525    if (!C->getNumForLoops())
1526      continue;
1527    for (unsigned I = S.getCollapsedNumber(),
1528                  E = C->getLoopNumIterations().size();
1529         I < E; ++I) {
1530      const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
1531      const auto *VD = cast<VarDecl>(DRE->getDecl());
1532      // Override only those variables that can be captured to avoid re-emission
1533      // of the variables declared within the loops.
1534      if (DRE->refersToEnclosingVariableOrCapture()) {
1535        (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
1536          return CreateMemTemp(DRE->getType(), VD->getName());
1537        });
1538      }
1539    }
1540  }
1541}
1542
1543static void emitPreCond(CodeGenFunction &CGFconst OMPLoopDirective &S,
1544                        const Expr *Condllvm::BasicBlock *TrueBlock,
1545                        llvm::BasicBlock *FalseBlockuint64_t TrueCount) {
1546  if (!CGF.HaveInsertPoint())
1547    return;
1548  {
1549    CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1550    CGF.EmitOMPPrivateLoopCounters(SPreCondScope);
1551    (void)PreCondScope.Privatize();
1552    // Get initial values of real counters.
1553    for (const Expr *I : S.inits()) {
1554      CGF.EmitIgnoredExpr(I);
1555    }
1556  }
1557  // Check that loop is executed at least one time.
1558  CGF.EmitBranchOnBoolExpr(CondTrueBlockFalseBlockTrueCount);
1559}
1560
1561void CodeGenFunction::EmitOMPLinearClause(
1562    const OMPLoopDirective &DCodeGenFunction::OMPPrivateScope &PrivateScope) {
1563  if (!HaveInsertPoint())
1564    return;
1565  llvm::DenseSet<const VarDecl *> SIMDLCVs;
1566  if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1567    const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1568    for (const Expr *C : LoopDirective->counters()) {
1569      SIMDLCVs.insert(
1570          cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1571    }
1572  }
1573  for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1574    auto CurPrivate = C->privates().begin();
1575    for (const Expr *E : C->varlists()) {
1576      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1577      const auto *PrivateVD =
1578          cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1579      if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1580        bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
1581          // Emit private VarDecl with copy init.
1582          EmitVarDecl(*PrivateVD);
1583          return GetAddrOfLocalVar(PrivateVD);
1584        });
1585         (0) . __assert_fail ("IsRegistered && \"linear var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1585, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered && "linear var already registered as private");
1586        // Silence the warning about unused variable.
1587        (void)IsRegistered;
1588      } else {
1589        EmitVarDecl(*PrivateVD);
1590      }
1591      ++CurPrivate;
1592    }
1593  }
1594}
1595
1596static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1597                                     const OMPExecutableDirective &D,
1598                                     bool IsMonotonic) {
1599  if (!CGF.HaveInsertPoint())
1600    return;
1601  if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1602    RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1603                                 /*ignoreResult=*/true);
1604    auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1605    CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1606    // In presence of finite 'safelen', it may be unsafe to mark all
1607    // the memory instructions parallel, because loop-carried
1608    // dependences of 'safelen' iterations are possible.
1609    if (!IsMonotonic)
1610      CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1611  } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1612    RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1613                                 /*ignoreResult=*/true);
1614    auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1615    CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1616    // In presence of finite 'safelen', it may be unsafe to mark all
1617    // the memory instructions parallel, because loop-carried
1618    // dependences of 'safelen' iterations are possible.
1619    CGF.LoopStack.setParallel(/*Enable=*/false);
1620  }
1621}
1622
1623void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1624                                      bool IsMonotonic) {
1625  // Walk clauses and process safelen/lastprivate.
1626  LoopStack.setParallel(!IsMonotonic);
1627  LoopStack.setVectorizeEnable();
1628  emitSimdlenSafelenClause(*thisDIsMonotonic);
1629}
1630
1631void CodeGenFunction::EmitOMPSimdFinal(
1632    const OMPLoopDirective &D,
1633    const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1634  if (!HaveInsertPoint())
1635    return;
1636  llvm::BasicBlock *DoneBB = nullptr;
1637  auto IC = D.counters().begin();
1638  auto IPC = D.private_counters().begin();
1639  for (const Expr *F : D.finals()) {
1640    const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1641    const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1642    const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1643    if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1644        OrigVD->hasGlobalStorage() || CED) {
1645      if (!DoneBB) {
1646        if (llvm::Value *Cond = CondGen(*this)) {
1647          // If the first post-update expression is found, emit conditional
1648          // block if it was requested.
1649          llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
1650          DoneBB = createBasicBlock(".omp.final.done");
1651          Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1652          EmitBlock(ThenBB);
1653        }
1654      }
1655      Address OrigAddr = Address::invalid();
1656      if (CED) {
1657        OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1658      } else {
1659        DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
1660                        /*RefersToEnclosingVariableOrCapture=*/false,
1661                        (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1662        OrigAddr = EmitLValue(&DRE).getAddress();
1663      }
1664      OMPPrivateScope VarScope(*this);
1665      VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1666      (void)VarScope.Privatize();
1667      EmitIgnoredExpr(F);
1668    }
1669    ++IC;
1670    ++IPC;
1671  }
1672  if (DoneBB)
1673    EmitBlock(DoneBB/*IsFinished=*/true);
1674}
1675
1676static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
1677                                         const OMPLoopDirective &S,
1678                                         CodeGenFunction::JumpDest LoopExit) {
1679  CGF.EmitOMPLoopBody(SLoopExit);
1680  CGF.EmitStopPoint(&S);
1681}
1682
1683/// Emit a helper variable and return corresponding lvalue.
1684static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1685                               const DeclRefExpr *Helper) {
1686  auto VDecl = cast<VarDecl>(Helper->getDecl());
1687  CGF.EmitVarDecl(*VDecl);
1688  return CGF.EmitLValue(Helper);
1689}
1690
1691static void emitOMPSimdRegion(CodeGenFunction &CGFconst OMPLoopDirective &S,
1692                              PrePostActionTy &Action) {
1693  Action.Enter(CGF);
1694   (0) . __assert_fail ("isOpenMPSimdDirective(S.getDirectiveKind()) && \"Expected simd directive\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1695, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
1695 (0) . __assert_fail ("isOpenMPSimdDirective(S.getDirectiveKind()) && \"Expected simd directive\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1695, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">         "Expected simd directive");
1696  OMPLoopScope PreInitScope(CGFS);
1697  // if (PreCond) {
1698  //   for (IV in 0..LastIteration) BODY;
1699  //   <Final counter/linear vars updates>;
1700  // }
1701  //
1702  if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
1703      isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
1704      isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
1705    (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1706    (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1707  }
1708
1709  // Emit: if (PreCond) - begin.
1710  // If the condition constant folds and can be elided, avoid emitting the
1711  // whole loop.
1712  bool CondConstant;
1713  llvm::BasicBlock *ContBlock = nullptr;
1714  if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1715    if (!CondConstant)
1716      return;
1717  } else {
1718    llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
1719    ContBlock = CGF.createBasicBlock("simd.if.end");
1720    emitPreCond(CGFSS.getPreCond(), ThenBlockContBlock,
1721                CGF.getProfileCount(&S));
1722    CGF.EmitBlock(ThenBlock);
1723    CGF.incrementProfileCounter(&S);
1724  }
1725
1726  // Emit the loop iteration variable.
1727  const Expr *IVExpr = S.getIterationVariable();
1728  const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1729  CGF.EmitVarDecl(*IVDecl);
1730  CGF.EmitIgnoredExpr(S.getInit());
1731
1732  // Emit the iterations count variable.
1733  // If it is not a variable, Sema decided to calculate iterations count on
1734  // each iteration (e.g., it is foldable into a constant).
1735  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1736    CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1737    // Emit calculation of the iterations count.
1738    CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1739  }
1740
1741  CGF.EmitOMPSimdInit(S);
1742
1743  emitAlignedClause(CGFS);
1744  (void)CGF.EmitOMPLinearClauseInit(S);
1745  {
1746    CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1747    CGF.EmitOMPPrivateLoopCounters(SLoopScope);
1748    CGF.EmitOMPLinearClause(SLoopScope);
1749    CGF.EmitOMPPrivateClause(SLoopScope);
1750    CGF.EmitOMPReductionClauseInit(SLoopScope);
1751    bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(SLoopScope);
1752    (void)LoopScope.Privatize();
1753    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
1754      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGFS);
1755    CGF.EmitOMPInnerLoop(SLoopScope.requiresCleanups(), S.getCond(),
1756                         S.getInc(),
1757                         [&S](CodeGenFunction &CGF) {
1758                           CGF.EmitOMPLoopBody(SCodeGenFunction::JumpDest());
1759                           CGF.EmitStopPoint(&S);
1760                         },
1761                         [](CodeGenFunction &) {});
1762    CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
1763    // Emit final copy of the lastprivate variables at the end of loops.
1764    if (HasLastprivateClause)
1765      CGF.EmitOMPLastprivateClauseFinal(S/*NoFinals=*/true);
1766    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_simd);
1767    emitPostUpdateForReductionClause(CGFS,
1768                                     [](CodeGenFunction &) { return nullptr; });
1769  }
1770  CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
1771  // Emit: if (PreCond) - end.
1772  if (ContBlock) {
1773    CGF.EmitBranch(ContBlock);
1774    CGF.EmitBlock(ContBlocktrue);
1775  }
1776}
1777
1778void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1779  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
1780    emitOMPSimdRegion(CGFSAction);
1781  };
1782  OMPLexicalScope Scope(*thisSOMPD_unknown);
1783  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_simdCodeGen);
1784}
1785
1786void CodeGenFunction::EmitOMPOuterLoop(
1787    bool DynamicOrOrderedbool IsMonotonicconst OMPLoopDirective &S,
1788    CodeGenFunction::OMPPrivateScope &LoopScope,
1789    const CodeGenFunction::OMPLoopArguments &LoopArgs,
1790    const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
1791    const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
1792  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
1793
1794  const Expr *IVExpr = S.getIterationVariable();
1795  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1796  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1797
1798  JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1799
1800  // Start the loop with a block that tests the condition.
1801  llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
1802  EmitBlock(CondBlock);
1803  const SourceRange R = S.getSourceRange();
1804  LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1805                 SourceLocToDebugLoc(R.getEnd()));
1806
1807  llvm::Value *BoolCondVal = nullptr;
1808  if (!DynamicOrOrdered) {
1809    // UB = min(UB, GlobalUB) or
1810    // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
1811    // 'distribute parallel for')
1812    EmitIgnoredExpr(LoopArgs.EUB);
1813    // IV = LB
1814    EmitIgnoredExpr(LoopArgs.Init);
1815    // IV < UB
1816    BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
1817  } else {
1818    BoolCondVal =
1819        RT.emitForNext(*thisS.getBeginLoc(), IVSizeIVSignedLoopArgs.IL,
1820                       LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
1821  }
1822
1823  // If there are any cleanups between here and the loop-exit scope,
1824  // create a block to stage a loop exit along.
1825  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1826  if (LoopScope.requiresCleanups())
1827    ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1828
1829  llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
1830  Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1831  if (ExitBlock != LoopExit.getBlock()) {
1832    EmitBlock(ExitBlock);
1833    EmitBranchThroughCleanup(LoopExit);
1834  }
1835  EmitBlock(LoopBody);
1836
1837  // Emit "IV = LB" (in case of static schedule, we have already calculated new
1838  // LB for loop condition and emitted it above).
1839  if (DynamicOrOrdered)
1840    EmitIgnoredExpr(LoopArgs.Init);
1841
1842  // Create a block for the increment.
1843  JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1844  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1845
1846  // Generate !llvm.loop.parallel metadata for loads and stores for loops
1847  // with dynamic/guided scheduling and without ordered clause.
1848  if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1849    LoopStack.setParallel(!IsMonotonic);
1850  else
1851    EmitOMPSimdInit(SIsMonotonic);
1852
1853  SourceLocation Loc = S.getBeginLoc();
1854
1855  // when 'distribute' is not combined with a 'for':
1856  // while (idx <= UB) { BODY; ++idx; }
1857  // when 'distribute' is combined with a 'for'
1858  // (e.g. 'distribute parallel for')
1859  // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
1860  EmitOMPInnerLoop(
1861      S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
1862      [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
1863        CodeGenLoop(CGF, S, LoopExit);
1864      },
1865      [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
1866        CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
1867      });
1868
1869  EmitBlock(Continue.getBlock());
1870  BreakContinueStack.pop_back();
1871  if (!DynamicOrOrdered) {
1872    // Emit "LB = LB + Stride", "UB = UB + Stride".
1873    EmitIgnoredExpr(LoopArgs.NextLB);
1874    EmitIgnoredExpr(LoopArgs.NextUB);
1875  }
1876
1877  EmitBranch(CondBlock);
1878  LoopStack.pop();
1879  // Emit the fall-through block.
1880  EmitBlock(LoopExit.getBlock());
1881
1882  // Tell the runtime we are done.
1883  auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1884    if (!DynamicOrOrdered)
1885      CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGFS.getEndLoc(),
1886                                                     S.getDirectiveKind());
1887  };
1888  OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1889}
1890
1891void CodeGenFunction::EmitOMPForOuterLoop(
1892    const OpenMPScheduleTy &ScheduleKindbool IsMonotonic,
1893    const OMPLoopDirective &SOMPPrivateScope &LoopScopebool Ordered,
1894    const OMPLoopArguments &LoopArgs,
1895    const CodeGenDispatchBoundsTy &CGDispatchBounds) {
1896  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
1897
1898  // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1899  const bool DynamicOrOrdered =
1900      Ordered || RT.isDynamic(ScheduleKind.Schedule);
1901
1902   (0) . __assert_fail ("(Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, LoopArgs.Chunk != nullptr)) && \"static non-chunked schedule does not need outer loop\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1905, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert((Ordered ||
1903 (0) . __assert_fail ("(Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, LoopArgs.Chunk != nullptr)) && \"static non-chunked schedule does not need outer loop\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1905, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">          !RT.isStaticNonchunked(ScheduleKind.Schedule,
1904 (0) . __assert_fail ("(Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, LoopArgs.Chunk != nullptr)) && \"static non-chunked schedule does not need outer loop\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1905, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">                                 LoopArgs.Chunk != nullptr)) &&
1905 (0) . __assert_fail ("(Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, LoopArgs.Chunk != nullptr)) && \"static non-chunked schedule does not need outer loop\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 1905, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">         "static non-chunked schedule does not need outer loop");
1906
1907  // Emit outer loop.
1908  //
1909  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1910  // When schedule(dynamic,chunk_size) is specified, the iterations are
1911  // distributed to threads in the team in chunks as the threads request them.
1912  // Each thread executes a chunk of iterations, then requests another chunk,
1913  // until no chunks remain to be distributed. Each chunk contains chunk_size
1914  // iterations, except for the last chunk to be distributed, which may have
1915  // fewer iterations. When no chunk_size is specified, it defaults to 1.
1916  //
1917  // When schedule(guided,chunk_size) is specified, the iterations are assigned
1918  // to threads in the team in chunks as the executing threads request them.
1919  // Each thread executes a chunk of iterations, then requests another chunk,
1920  // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1921  // each chunk is proportional to the number of unassigned iterations divided
1922  // by the number of threads in the team, decreasing to 1. For a chunk_size
1923  // with value k (greater than 1), the size of each chunk is determined in the
1924  // same way, with the restriction that the chunks do not contain fewer than k
1925  // iterations (except for the last chunk to be assigned, which may have fewer
1926  // than k iterations).
1927  //
1928  // When schedule(auto) is specified, the decision regarding scheduling is
1929  // delegated to the compiler and/or runtime system. The programmer gives the
1930  // implementation the freedom to choose any possible mapping of iterations to
1931  // threads in the team.
1932  //
1933  // When schedule(runtime) is specified, the decision regarding scheduling is
1934  // deferred until run time, and the schedule and chunk size are taken from the
1935  // run-sched-var ICV. If the ICV is set to auto, the schedule is
1936  // implementation defined
1937  //
1938  // while(__kmpc_dispatch_next(&LB, &UB)) {
1939  //   idx = LB;
1940  //   while (idx <= UB) { BODY; ++idx;
1941  //   __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1942  //   } // inner loop
1943  // }
1944  //
1945  // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1946  // When schedule(static, chunk_size) is specified, iterations are divided into
1947  // chunks of size chunk_size, and the chunks are assigned to the threads in
1948  // the team in a round-robin fashion in the order of the thread number.
1949  //
1950  // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1951  //   while (idx <= UB) { BODY; ++idx; } // inner loop
1952  //   LB = LB + ST;
1953  //   UB = UB + ST;
1954  // }
1955  //
1956
1957  const Expr *IVExpr = S.getIterationVariable();
1958  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1959  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1960
1961  if (DynamicOrOrdered) {
1962    const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
1963        CGDispatchBounds(*thisSLoopArgs.LB, LoopArgs.UB);
1964    llvm::Value *LBVal = DispatchBounds.first;
1965    llvm::Value *UBVal = DispatchBounds.second;
1966    CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBValUBVal,
1967                                                             LoopArgs.Chunk};
1968    RT.emitForDispatchInit(*thisS.getBeginLoc(), ScheduleKindIVSize,
1969                           IVSignedOrderedDipatchRTInputValues);
1970  } else {
1971    CGOpenMPRuntime::StaticRTInput StaticInit(
1972        IVSizeIVSignedOrderedLoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
1973        LoopArgs.ST, LoopArgs.Chunk);
1974    RT.emitForStaticInit(*thisS.getBeginLoc(), S.getDirectiveKind(),
1975                         ScheduleKindStaticInit);
1976  }
1977
1978  auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGFSourceLocation Loc,
1979                                    const unsigned IVSize,
1980                                    const bool IVSigned) {
1981    if (Ordered) {
1982      CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGFLocIVSize,
1983                                                            IVSigned);
1984    }
1985  };
1986
1987  OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
1988                                 LoopArgs.IL, LoopArgs.ChunkLoopArgs.EUB);
1989  OuterLoopArgs.IncExpr = S.getInc();
1990  OuterLoopArgs.Init = S.getInit();
1991  OuterLoopArgs.Cond = S.getCond();
1992  OuterLoopArgs.NextLB = S.getNextLowerBound();
1993  OuterLoopArgs.NextUB = S.getNextUpperBound();
1994  EmitOMPOuterLoop(DynamicOrOrderedIsMonotonicSLoopScopeOuterLoopArgs,
1995                   emitOMPLoopBodyWithStopPointCodeGenOrdered);
1996}
1997
1998static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
1999                             const unsigned IVSizeconst bool IVSigned) {}
2000
2001void CodeGenFunction::EmitOMPDistributeOuterLoop(
2002    OpenMPDistScheduleClauseKind ScheduleKindconst OMPLoopDirective &S,
2003    OMPPrivateScope &LoopScopeconst OMPLoopArguments &LoopArgs,
2004    const CodeGenLoopTy &CodeGenLoopContent) {
2005
2006  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2007
2008  // Emit outer loop.
2009  // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2010  // dynamic
2011  //
2012
2013  const Expr *IVExpr = S.getIterationVariable();
2014  const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2015  const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2016
2017  CGOpenMPRuntime::StaticRTInput StaticInit(
2018      IVSizeIVSigned/* Ordered = */ falseLoopArgs.IL, LoopArgs.LB,
2019      LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2020  RT.emitDistributeStaticInit(*thisS.getBeginLoc(), ScheduleKindStaticInit);
2021
2022  // for combined 'distribute' and 'for' the increment expression of distribute
2023  // is stored in DistInc. For 'distribute' alone, it is in Inc.
2024  Expr *IncExpr;
2025  if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2026    IncExpr = S.getDistInc();
2027  else
2028    IncExpr = S.getInc();
2029
2030  // this routine is shared by 'omp distribute parallel for' and
2031  // 'omp distribute': select the right EUB expression depending on the
2032  // directive
2033  OMPLoopArguments OuterLoopArgs;
2034  OuterLoopArgs.LB = LoopArgs.LB;
2035  OuterLoopArgs.UB = LoopArgs.UB;
2036  OuterLoopArgs.ST = LoopArgs.ST;
2037  OuterLoopArgs.IL = LoopArgs.IL;
2038  OuterLoopArgs.Chunk = LoopArgs.Chunk;
2039  OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2040                          ? S.getCombinedEnsureUpperBound()
2041                          : S.getEnsureUpperBound();
2042  OuterLoopArgs.IncExpr = IncExpr;
2043  OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2044                           ? S.getCombinedInit()
2045                           : S.getInit();
2046  OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2047                           ? S.getCombinedCond()
2048                           : S.getCond();
2049  OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2050                             ? S.getCombinedNextLowerBound()
2051                             : S.getNextLowerBound();
2052  OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2053                             ? S.getCombinedNextUpperBound()
2054                             : S.getNextUpperBound();
2055
2056  EmitOMPOuterLoop(/* DynamicOrOrdered = */ false/* IsMonotonic = */ falseS,
2057                   LoopScopeOuterLoopArgsCodeGenLoopContent,
2058                   emitEmptyOrdered);
2059}
2060
2061static std::pair<LValueLValue>
2062emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
2063                                     const OMPExecutableDirective &S) {
2064  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2065  LValue LB =
2066      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2067  LValue UB =
2068      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2069
2070  // When composing 'distribute' with 'for' (e.g. as in 'distribute
2071  // parallel for') we need to use the 'distribute'
2072  // chunk lower and upper bounds rather than the whole loop iteration
2073  // space. These are parameters to the outlined function for 'parallel'
2074  // and we copy the bounds of the previous schedule into the
2075  // the current ones.
2076  LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
2077  LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
2078  llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
2079      PrevLBLS.getPrevLowerBoundVariable()->getExprLoc());
2080  PrevLBVal = CGF.EmitScalarConversion(
2081      PrevLBValLS.getPrevLowerBoundVariable()->getType(),
2082      LS.getIterationVariable()->getType(),
2083      LS.getPrevLowerBoundVariable()->getExprLoc());
2084  llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
2085      PrevUBLS.getPrevUpperBoundVariable()->getExprLoc());
2086  PrevUBVal = CGF.EmitScalarConversion(
2087      PrevUBValLS.getPrevUpperBoundVariable()->getType(),
2088      LS.getIterationVariable()->getType(),
2089      LS.getPrevUpperBoundVariable()->getExprLoc());
2090
2091  CGF.EmitStoreOfScalar(PrevLBValLB);
2092  CGF.EmitStoreOfScalar(PrevUBValUB);
2093
2094  return {LBUB};
2095}
2096
2097/// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
2098/// we need to use the LB and UB expressions generated by the worksharing
2099/// code generation support, whereas in non combined situations we would
2100/// just emit 0 and the LastIteration expression
2101/// This function is necessary due to the difference of the LB and UB
2102/// types for the RT emission routines for 'for_static_init' and
2103/// 'for_dispatch_init'
2104static std::pair<llvm::Value *, llvm::Value *>
2105emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
2106                                        const OMPExecutableDirective &S,
2107                                        Address LBAddress UB) {
2108  const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2109  const Expr *IVExpr = LS.getIterationVariable();
2110  // when implementing a dynamic schedule for a 'for' combined with a
2111  // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
2112  // is not normalized as each team only executes its own assigned
2113  // distribute chunk
2114  QualType IteratorTy = IVExpr->getType();
2115  llvm::Value *LBVal =
2116      CGF.EmitLoadOfScalar(LB/*Volatile=*/falseIteratorTyS.getBeginLoc());
2117  llvm::Value *UBVal =
2118      CGF.EmitLoadOfScalar(UB/*Volatile=*/falseIteratorTyS.getBeginLoc());
2119  return {LBValUBVal};
2120}
2121
2122static void emitDistributeParallelForDistributeInnerBoundParams(
2123    CodeGenFunction &CGFconst OMPExecutableDirective &S,
2124    llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
2125  const auto &Dir = cast<OMPLoopDirective>(S);
2126  LValue LB =
2127      CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
2128  llvm::Value *LBCast = CGF.Builder.CreateIntCast(
2129      CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy/*isSigned=*/false);
2130  CapturedVars.push_back(LBCast);
2131  LValue UB =
2132      CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
2133
2134  llvm::Value *UBCast = CGF.Builder.CreateIntCast(
2135      CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy/*isSigned=*/false);
2136  CapturedVars.push_back(UBCast);
2137}
2138
2139static void
2140emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
2141                                 const OMPLoopDirective &S,
2142                                 CodeGenFunction::JumpDest LoopExit) {
2143  auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
2144                                         PrePostActionTy &Action) {
2145    Action.Enter(CGF);
2146    bool HasCancel = false;
2147    if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2148      if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
2149        HasCancel = D->hasCancel();
2150      else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
2151        HasCancel = D->hasCancel();
2152      else if (const auto *D =
2153                   dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
2154        HasCancel = D->hasCancel();
2155    }
2156    CodeGenFunction::OMPCancelStackRAII CancelRegion(CGFS.getDirectiveKind(),
2157                                                     HasCancel);
2158    CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
2159                               emitDistributeParallelForInnerBounds,
2160                               emitDistributeParallelForDispatchBounds);
2161  };
2162
2163  emitCommonOMPParallelDirective(
2164      CGFS,
2165      isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
2166      CGInlinedWorksharingLoop,
2167      emitDistributeParallelForDistributeInnerBoundParams);
2168}
2169
2170void CodeGenFunction::EmitOMPDistributeParallelForDirective(
2171    const OMPDistributeParallelForDirective &S) {
2172  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
2173    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
2174                              S.getDistInc());
2175  };
2176  OMPLexicalScope Scope(*thisSOMPD_parallel);
2177  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_distributeCodeGen);
2178}
2179
2180void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
2181    const OMPDistributeParallelForSimdDirective &S) {
2182  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
2183    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
2184                              S.getDistInc());
2185  };
2186  OMPLexicalScope Scope(*thisSOMPD_parallel);
2187  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_distributeCodeGen);
2188}
2189
2190void CodeGenFunction::EmitOMPDistributeSimdDirective(
2191    const OMPDistributeSimdDirective &S) {
2192  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
2193    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
2194  };
2195  OMPLexicalScope Scope(*thisSOMPD_unknown);
2196  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_simdCodeGen);
2197}
2198
2199void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
2200    CodeGenModule &CGMStringRef ParentNameconst OMPTargetSimdDirective &S) {
2201  // Emit SPMD target parallel for region as a standalone region.
2202  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2203    emitOMPSimdRegion(CGFSAction);
2204  };
2205  llvm::Function *Fn;
2206  llvm::Constant *Addr;
2207  // Emit target region as a standalone region.
2208  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
2209      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
2210   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 2210, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
2211}
2212
2213void CodeGenFunction::EmitOMPTargetSimdDirective(
2214    const OMPTargetSimdDirective &S) {
2215  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2216    emitOMPSimdRegion(CGFSAction);
2217  };
2218  emitCommonOMPTargetDirective(*thisSCodeGen);
2219}
2220
2221namespace {
2222  struct ScheduleKindModifiersTy {
2223    OpenMPScheduleClauseKind Kind;
2224    OpenMPScheduleClauseModifier M1;
2225    OpenMPScheduleClauseModifier M2;
2226    ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2227                            OpenMPScheduleClauseModifier M1,
2228                            OpenMPScheduleClauseModifier M2)
2229        : Kind(Kind), M1(M1), M2(M2) {}
2230  };
2231// namespace
2232
2233bool CodeGenFunction::EmitOMPWorksharingLoop(
2234    const OMPLoopDirective &SExpr *EUB,
2235    const CodeGenLoopBoundsTy &CodeGenLoopBounds,
2236    const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2237  // Emit the loop iteration variable.
2238  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2239  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
2240  EmitVarDecl(*IVDecl);
2241
2242  // Emit the iterations count variable.
2243  // If it is not a variable, Sema decided to calculate iterations count on each
2244  // iteration (e.g., it is foldable into a constant).
2245  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2246    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2247    // Emit calculation of the iterations count.
2248    EmitIgnoredExpr(S.getCalcLastIteration());
2249  }
2250
2251  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2252
2253  bool HasLastprivateClause;
2254  // Check pre-condition.
2255  {
2256    OMPLoopScope PreInitScope(*thisS);
2257    // Skip the entire loop if we don't meet the precondition.
2258    // If the condition constant folds and can be elided, avoid emitting the
2259    // whole loop.
2260    bool CondConstant;
2261    llvm::BasicBlock *ContBlock = nullptr;
2262    if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2263      if (!CondConstant)
2264        return false;
2265    } else {
2266      llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
2267      ContBlock = createBasicBlock("omp.precond.end");
2268      emitPreCond(*thisSS.getPreCond(), ThenBlockContBlock,
2269                  getProfileCount(&S));
2270      EmitBlock(ThenBlock);
2271      incrementProfileCounter(&S);
2272    }
2273
2274    RunCleanupsScope DoacrossCleanupScope(*this);
2275    bool Ordered = false;
2276    if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2277      if (OrderedClause->getNumForLoops())
2278        RT.emitDoacrossInit(*thisSOrderedClause->getLoopNumIterations());
2279      else
2280        Ordered = true;
2281    }
2282
2283    llvm::DenseSet<const Expr *> EmittedFinals;
2284    emitAlignedClause(*thisS);
2285    bool HasLinears = EmitOMPLinearClauseInit(S);
2286    // Emit helper vars inits.
2287
2288    std::pair<LValueLValueBounds = CodeGenLoopBounds(*thisS);
2289    LValue LB = Bounds.first;
2290    LValue UB = Bounds.second;
2291    LValue ST =
2292        EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2293    LValue IL =
2294        EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2295
2296    // Emit 'then' code.
2297    {
2298      OMPPrivateScope LoopScope(*this);
2299      if (EmitOMPFirstprivateClause(SLoopScope) || HasLinears) {
2300        // Emit implicit barrier to synchronize threads and avoid data races on
2301        // initialization of firstprivate variables and post-update of
2302        // lastprivate variables.
2303        CGM.getOpenMPRuntime().emitBarrierCall(
2304            *thisS.getBeginLoc(), OMPD_unknown/*EmitChecks=*/false,
2305            /*ForceSimpleCall=*/true);
2306      }
2307      EmitOMPPrivateClause(SLoopScope);
2308      HasLastprivateClause = EmitOMPLastprivateClauseInit(SLoopScope);
2309      EmitOMPReductionClauseInit(SLoopScope);
2310      EmitOMPPrivateLoopCounters(SLoopScope);
2311      EmitOMPLinearClause(SLoopScope);
2312      (void)LoopScope.Privatize();
2313      if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2314        CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*thisS);
2315
2316      // Detect the loop schedule kind and chunk.
2317      const Expr *ChunkExpr = nullptr;
2318      OpenMPScheduleTy ScheduleKind;
2319      if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
2320        ScheduleKind.Schedule = C->getScheduleKind();
2321        ScheduleKind.M1 = C->getFirstScheduleModifier();
2322        ScheduleKind.M2 = C->getSecondScheduleModifier();
2323        ChunkExpr = C->getChunkSize();
2324      } else {
2325        // Default behaviour for schedule clause.
2326        CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
2327            *thisSScheduleKind.ScheduleChunkExpr);
2328      }
2329      bool HasChunkSizeOne = false;
2330      llvm::Value *Chunk = nullptr;
2331      if (ChunkExpr) {
2332        Chunk = EmitScalarExpr(ChunkExpr);
2333        Chunk = EmitScalarConversion(ChunkChunkExpr->getType(),
2334                                     S.getIterationVariable()->getType(),
2335                                     S.getBeginLoc());
2336        Expr::EvalResult Result;
2337        if (ChunkExpr->EvaluateAsInt(ResultgetContext())) {
2338          llvm::APSInt EvaluatedChunk = Result.Val.getInt();
2339          HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
2340        }
2341      }
2342      const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2343      const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2344      // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2345      // If the static schedule kind is specified or if the ordered clause is
2346      // specified, and if no monotonic modifier is specified, the effect will
2347      // be as if the monotonic modifier was specified.
2348      bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
2349          /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
2350          isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
2351      if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
2352                                 /* Chunked */ Chunk != nullptr) ||
2353           StaticChunkedOne) &&
2354          !Ordered) {
2355        if (isOpenMPSimdDirective(S.getDirectiveKind()))
2356          EmitOMPSimdInit(S/*IsMonotonic=*/true);
2357        // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2358        // When no chunk_size is specified, the iteration space is divided into
2359        // chunks that are approximately equal in size, and at most one chunk is
2360        // distributed to each thread. Note that the size of the chunks is
2361        // unspecified in this case.
2362        CGOpenMPRuntime::StaticRTInput StaticInit(
2363            IVSizeIVSignedOrderedIL.getAddress(), LB.getAddress(),
2364            UB.getAddress(), ST.getAddress(),
2365            StaticChunkedOne ? Chunk : nullptr);
2366        RT.emitForStaticInit(*thisS.getBeginLoc(), S.getDirectiveKind(),
2367                             ScheduleKindStaticInit);
2368        JumpDest LoopExit =
2369            getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2370        // UB = min(UB, GlobalUB);
2371        if (!StaticChunkedOne)
2372          EmitIgnoredExpr(S.getEnsureUpperBound());
2373        // IV = LB;
2374        EmitIgnoredExpr(S.getInit());
2375        // For unchunked static schedule generate:
2376        //
2377        // while (idx <= UB) {
2378        //   BODY;
2379        //   ++idx;
2380        // }
2381        //
2382        // For static schedule with chunk one:
2383        //
2384        // while (IV <= PrevUB) {
2385        //   BODY;
2386        //   IV += ST;
2387        // }
2388        EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
2389            StaticChunkedOne ? S.getCombinedParForInDistCond() : S.getCond(),
2390            StaticChunkedOne ? S.getDistInc() : S.getInc(),
2391            [&S, LoopExit](CodeGenFunction &CGF) {
2392             CGF.EmitOMPLoopBody(S, LoopExit);
2393             CGF.EmitStopPoint(&S);
2394            },
2395            [](CodeGenFunction &) {});
2396        EmitBlock(LoopExit.getBlock());
2397        // Tell the runtime we are done.
2398        auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2399          CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGFS.getEndLoc(),
2400                                                         S.getDirectiveKind());
2401        };
2402        OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2403      } else {
2404        const bool IsMonotonic =
2405            Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2406            ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2407            ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2408            ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2409        // Emit the outer loop, which requests its work chunk [LB..UB] from
2410        // runtime and runs the inner loop to process it.
2411        const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
2412                                             ST.getAddress(), IL.getAddress(),
2413                                             ChunkEUB);
2414        EmitOMPForOuterLoop(ScheduleKindIsMonotonicSLoopScopeOrdered,
2415                            LoopArgumentsCGDispatchBounds);
2416      }
2417      if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2418        EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
2419          return CGF.Builder.CreateIsNotNull(
2420              CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2421        });
2422      }
2423      EmitOMPReductionClauseFinal(
2424          S/*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
2425                 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
2426                 : /*Parallel only*/ OMPD_parallel);
2427      // Emit post-update of the reduction variables if IsLastIter != 0.
2428      emitPostUpdateForReductionClause(
2429          *thisS, [IL, &S](CodeGenFunction &CGF) {
2430            return CGF.Builder.CreateIsNotNull(
2431                CGF.EmitLoadOfScalar(ILS.getBeginLoc()));
2432          });
2433      // Emit final copy of the lastprivate variables if IsLastIter != 0.
2434      if (HasLastprivateClause)
2435        EmitOMPLastprivateClauseFinal(
2436            S, isOpenMPSimdDirective(S.getDirectiveKind()),
2437            Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
2438    }
2439    EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
2440      return CGF.Builder.CreateIsNotNull(
2441          CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2442    });
2443    DoacrossCleanupScope.ForceCleanup();
2444    // We're now done with the loop, so jump to the continuation block.
2445    if (ContBlock) {
2446      EmitBranch(ContBlock);
2447      EmitBlock(ContBlock/*IsFinished=*/true);
2448    }
2449  }
2450  return HasLastprivateClause;
2451}
2452
2453/// The following two functions generate expressions for the loop lower
2454/// and upper bounds in case of static and dynamic (dispatch) schedule
2455/// of the associated 'for' or 'distribute' loop.
2456static std::pair<LValueLValue>
2457emitForLoopBounds(CodeGenFunction &CGFconst OMPExecutableDirective &S) {
2458  const auto &LS = cast<OMPLoopDirective>(S);
2459  LValue LB =
2460      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2461  LValue UB =
2462      EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2463  return {LB, UB};
2464}
2465
2466/// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
2467/// consider the lower and upper bound expressions generated by the
2468/// worksharing loop support, but we use 0 and the iteration space size as
2469/// constants
2470static std::pair<llvm::Value *, llvm::Value *>
2471emitDispatchForLoopBounds(CodeGenFunction &CGFconst OMPExecutableDirective &S,
2472                          Address LBAddress UB) {
2473  const auto &LS = cast<OMPLoopDirective>(S);
2474  const Expr *IVExpr = LS.getIterationVariable();
2475  const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
2476  llvm::Value *LBVal = CGF.Builder.getIntN(IVSize0);
2477  llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
2478  return {LBValUBVal};
2479}
2480
2481void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2482  bool HasLastprivates = false;
2483  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2484                                          PrePostActionTy &) {
2485    OMPCancelStackRAII CancelRegion(CGFOMPD_forS.hasCancel());
2486    HasLastprivates = CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(),
2487                                                 emitForLoopBounds,
2488                                                 emitDispatchForLoopBounds);
2489  };
2490  {
2491    OMPLexicalScope Scope(*thisSOMPD_unknown);
2492    CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_forCodeGen,
2493                                                S.hasCancel());
2494  }
2495
2496  // Emit an implicit barrier at the end.
2497  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2498    CGM.getOpenMPRuntime().emitBarrierCall(*thisS.getBeginLoc(), OMPD_for);
2499}
2500
2501void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2502  bool HasLastprivates = false;
2503  auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2504                                          PrePostActionTy &) {
2505    HasLastprivates = CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(),
2506                                                 emitForLoopBounds,
2507                                                 emitDispatchForLoopBounds);
2508  };
2509  {
2510    OMPLexicalScope Scope(*thisSOMPD_unknown);
2511    CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_simdCodeGen);
2512  }
2513
2514  // Emit an implicit barrier at the end.
2515  if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2516    CGM.getOpenMPRuntime().emitBarrierCall(*thisS.getBeginLoc(), OMPD_for);
2517}
2518
2519static LValue createSectionLVal(CodeGenFunction &CGFQualType Ty,
2520                                const Twine &Name,
2521                                llvm::Value *Init = nullptr) {
2522  LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(TyName), Ty);
2523  if (Init)
2524    CGF.EmitStoreThroughLValue(RValue::get(Init), LVal/*isInit*/ true);
2525  return LVal;
2526}
2527
2528void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2529  const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
2530  const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
2531  bool HasLastprivates = false;
2532  auto &&CodeGen = [&SCapturedStmt, CS,
2533                    &HasLastprivates](CodeGenFunction &CGFPrePostActionTy &) {
2534    ASTContext &C = CGF.getContext();
2535    QualType KmpInt32Ty =
2536        C.getIntTypeForBitwidth(/*DestWidth=*/32/*Signed=*/1);
2537    // Emit helper vars inits.
2538    LValue LB = createSectionLVal(CGFKmpInt32Ty".omp.sections.lb.",
2539                                  CGF.Builder.getInt32(0));
2540    llvm::ConstantInt *GlobalUBVal = CS != nullptr
2541                                         ? CGF.Builder.getInt32(CS->size() - 1)
2542                                         : CGF.Builder.getInt32(0);
2543    LValue UB =
2544        createSectionLVal(CGFKmpInt32Ty".omp.sections.ub."GlobalUBVal);
2545    LValue ST = createSectionLVal(CGFKmpInt32Ty".omp.sections.st.",
2546                                  CGF.Builder.getInt32(1));
2547    LValue IL = createSectionLVal(CGFKmpInt32Ty".omp.sections.il.",
2548                                  CGF.Builder.getInt32(0));
2549    // Loop counter.
2550    LValue IV = createSectionLVal(CGFKmpInt32Ty".omp.sections.iv.");
2551    OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32TyVK_LValue);
2552    CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExprIV);
2553    OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32TyVK_LValue);
2554    CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExprUB);
2555    // Generate condition for loop.
2556    BinaryOperator Cond(&IVRefExpr, &UBRefExprBO_LEC.BoolTyVK_RValue,
2557                        OK_OrdinaryS.getBeginLoc(), FPOptions());
2558    // Increment for loop counter.
2559    UnaryOperator Inc(&IVRefExprUO_PreIncKmpInt32TyVK_RValueOK_Ordinary,
2560                      S.getBeginLoc(), true);
2561    auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
2562      // Iterate through all sections and emit a switch construct:
2563      // switch (IV) {
2564      //   case 0:
2565      //     <SectionStmt[0]>;
2566      //     break;
2567      // ...
2568      //   case <NumSection> - 1:
2569      //     <SectionStmt[<NumSection> - 1]>;
2570      //     break;
2571      // }
2572      // .omp.sections.exit:
2573      llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2574      llvm::SwitchInst *SwitchStmt =
2575          CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
2576                                   ExitBB, CS == nullptr ? 1 : CS->size());
2577      if (CS) {
2578        unsigned CaseNumber = 0;
2579        for (const Stmt *SubStmt : CS->children()) {
2580          auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2581          CGF.EmitBlock(CaseBB);
2582          SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2583          CGF.EmitStmt(SubStmt);
2584          CGF.EmitBranch(ExitBB);
2585          ++CaseNumber;
2586        }
2587      } else {
2588        llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
2589        CGF.EmitBlock(CaseBB);
2590        SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2591        CGF.EmitStmt(CapturedStmt);
2592        CGF.EmitBranch(ExitBB);
2593      }
2594      CGF.EmitBlock(ExitBB/*IsFinished=*/true);
2595    };
2596
2597    CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2598    if (CGF.EmitOMPFirstprivateClause(SLoopScope)) {
2599      // Emit implicit barrier to synchronize threads and avoid data races on
2600      // initialization of firstprivate variables and post-update of lastprivate
2601      // variables.
2602      CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2603          CGFS.getBeginLoc(), OMPD_unknown/*EmitChecks=*/false,
2604          /*ForceSimpleCall=*/true);
2605    }
2606    CGF.EmitOMPPrivateClause(SLoopScope);
2607    HasLastprivates = CGF.EmitOMPLastprivateClauseInit(SLoopScope);
2608    CGF.EmitOMPReductionClauseInit(SLoopScope);
2609    (void)LoopScope.Privatize();
2610    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2611      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGFS);
2612
2613    // Emit static non-chunked loop.
2614    OpenMPScheduleTy ScheduleKind;
2615    ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2616    CGOpenMPRuntime::StaticRTInput StaticInit(
2617        /*IVSize=*/32/*IVSigned=*/true/*Ordered=*/falseIL.getAddress(),
2618        LB.getAddress(), UB.getAddress(), ST.getAddress());
2619    CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2620        CGFS.getBeginLoc(), S.getDirectiveKind(), ScheduleKindStaticInit);
2621    // UB = min(UB, GlobalUB);
2622    llvm::Value *UBVal = CGF.EmitLoadOfScalar(UBS.getBeginLoc());
2623    llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
2624        CGF.Builder.CreateICmpSLT(UBValGlobalUBVal), UBValGlobalUBVal);
2625    CGF.EmitStoreOfScalar(MinUBGlobalUBUB);
2626    // IV = LB;
2627    CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LBS.getBeginLoc()), IV);
2628    // while (idx <= UB) { BODY; ++idx; }
2629    CGF.EmitOMPInnerLoop(S/*RequiresCleanup=*/false, &Cond, &IncBodyGen,
2630                         [](CodeGenFunction &) {});
2631    // Tell the runtime we are done.
2632    auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2633      CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGFS.getEndLoc(),
2634                                                     S.getDirectiveKind());
2635    };
2636    CGF.OMPCancelStack.emitExit(CGFS.getDirectiveKind(), CodeGen);
2637    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_parallel);
2638    // Emit post-update of the reduction variables if IsLastIter != 0.
2639    emitPostUpdateForReductionClause(CGFS, [IL, &S](CodeGenFunction &CGF) {
2640      return CGF.Builder.CreateIsNotNull(
2641          CGF.EmitLoadOfScalar(ILS.getBeginLoc()));
2642    });
2643
2644    // Emit final copy of the lastprivate variables if IsLastIter != 0.
2645    if (HasLastprivates)
2646      CGF.EmitOMPLastprivateClauseFinal(
2647          S/*NoFinals=*/false,
2648          CGF.Builder.CreateIsNotNull(
2649              CGF.EmitLoadOfScalar(ILS.getBeginLoc())));
2650  };
2651
2652  bool HasCancel = false;
2653  if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2654    HasCancel = OSD->hasCancel();
2655  else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2656    HasCancel = OPSD->hasCancel();
2657  OMPCancelStackRAII CancelRegion(*thisS.getDirectiveKind(), HasCancel);
2658  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_sectionsCodeGen,
2659                                              HasCancel);
2660  // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2661  // clause. Otherwise the barrier will be generated by the codegen for the
2662  // directive.
2663  if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2664    // Emit implicit barrier to synchronize threads and avoid data races on
2665    // initialization of firstprivate variables.
2666    CGM.getOpenMPRuntime().emitBarrierCall(*thisS.getBeginLoc(),
2667                                           OMPD_unknown);
2668  }
2669}
2670
2671void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2672  {
2673    OMPLexicalScope Scope(*thisSOMPD_unknown);
2674    EmitSections(S);
2675  }
2676  // Emit an implicit barrier at the end.
2677  if (!S.getSingleClause<OMPNowaitClause>()) {
2678    CGM.getOpenMPRuntime().emitBarrierCall(*thisS.getBeginLoc(),
2679                                           OMPD_sections);
2680  }
2681}
2682
2683void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2684  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
2685    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2686  };
2687  OMPLexicalScope Scope(*thisSOMPD_unknown);
2688  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_sectionCodeGen,
2689                                              S.hasCancel());
2690}
2691
2692void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2693  llvm::SmallVector<const Expr *, 8CopyprivateVars;
2694  llvm::SmallVector<const Expr *, 8DestExprs;
2695  llvm::SmallVector<const Expr *, 8SrcExprs;
2696  llvm::SmallVector<const Expr *, 8AssignmentOps;
2697  // Check if there are any 'copyprivate' clauses associated with this
2698  // 'single' construct.
2699  // Build a list of copyprivate variables along with helper expressions
2700  // (<source>, <destination>, <destination>=<source> expressions)
2701  for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2702    CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2703    DestExprs.append(C->destination_exprs().begin(),
2704                     C->destination_exprs().end());
2705    SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2706    AssignmentOps.append(C->assignment_ops().begin(),
2707                         C->assignment_ops().end());
2708  }
2709  // Emit code for 'single' region along with 'copyprivate' clauses
2710  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2711    Action.Enter(CGF);
2712    OMPPrivateScope SingleScope(CGF);
2713    (void)CGF.EmitOMPFirstprivateClause(SSingleScope);
2714    CGF.EmitOMPPrivateClause(SSingleScope);
2715    (void)SingleScope.Privatize();
2716    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2717  };
2718  {
2719    OMPLexicalScope Scope(*thisSOMPD_unknown);
2720    CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
2721                                            CopyprivateVars, DestExprs,
2722                                            SrcExprs, AssignmentOps);
2723  }
2724  // Emit an implicit barrier at the end (to avoid data race on firstprivate
2725  // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2726  if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2727    CGM.getOpenMPRuntime().emitBarrierCall(
2728        *thisS.getBeginLoc(),
2729        S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2730  }
2731}
2732
2733void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2734  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2735    Action.Enter(CGF);
2736    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2737  };
2738  OMPLexicalScope Scope(*thisSOMPD_unknown);
2739  CGM.getOpenMPRuntime().emitMasterRegion(*thisCodeGenS.getBeginLoc());
2740}
2741
2742void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2743  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2744    Action.Enter(CGF);
2745    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2746  };
2747  const Expr *Hint = nullptr;
2748  if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
2749    Hint = HintClause->getHint();
2750  OMPLexicalScope Scope(*thisSOMPD_unknown);
2751  CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2752                                            S.getDirectiveName().getAsString(),
2753                                            CodeGenS.getBeginLoc(), Hint);
2754}
2755
2756void CodeGenFunction::EmitOMPParallelForDirective(
2757    const OMPParallelForDirective &S) {
2758  // Emit directive as a combined directive that consists of two implicit
2759  // directives: 'parallel' with 'for' directive.
2760  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2761    Action.Enter(CGF);
2762    OMPCancelStackRAII CancelRegion(CGFOMPD_parallel_forS.hasCancel());
2763    CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(), emitForLoopBounds,
2764                               emitDispatchForLoopBounds);
2765  };
2766  emitCommonOMPParallelDirective(*thisSOMPD_forCodeGen,
2767                                 emitEmptyBoundParameters);
2768}
2769
2770void CodeGenFunction::EmitOMPParallelForSimdDirective(
2771    const OMPParallelForSimdDirective &S) {
2772  // Emit directive as a combined directive that consists of two implicit
2773  // directives: 'parallel' with 'for' directive.
2774  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2775    Action.Enter(CGF);
2776    CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(), emitForLoopBounds,
2777                               emitDispatchForLoopBounds);
2778  };
2779  emitCommonOMPParallelDirective(*thisSOMPD_simdCodeGen,
2780                                 emitEmptyBoundParameters);
2781}
2782
2783void CodeGenFunction::EmitOMPParallelSectionsDirective(
2784    const OMPParallelSectionsDirective &S) {
2785  // Emit directive as a combined directive that consists of two implicit
2786  // directives: 'parallel' with 'sections' directive.
2787  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
2788    Action.Enter(CGF);
2789    CGF.EmitSections(S);
2790  };
2791  emitCommonOMPParallelDirective(*thisSOMPD_sectionsCodeGen,
2792                                 emitEmptyBoundParameters);
2793}
2794
2795void CodeGenFunction::EmitOMPTaskBasedDirective(
2796    const OMPExecutableDirective &Sconst OpenMPDirectiveKind CapturedRegion,
2797    const RegionCodeGenTy &BodyGenconst TaskGenTy &TaskGen,
2798    OMPTaskDataTy &Data) {
2799  // Emit outlined function for task construct.
2800  const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
2801  auto I = CS->getCapturedDecl()->param_begin();
2802  auto PartId = std::next(I);
2803  auto TaskT = std::next(I4);
2804  // Check if the task is final
2805  if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2806    // If the condition constant folds and can be elided, try to avoid emitting
2807    // the condition and the dead arm of the if/else.
2808    const Expr *Cond = Clause->getCondition();
2809    bool CondConstant;
2810    if (ConstantFoldsToSimpleInteger(CondCondConstant))
2811      Data.Final.setInt(CondConstant);
2812    else
2813      Data.Final.setPointer(EvaluateExprAsBool(Cond));
2814  } else {
2815    // By default the task is not final.
2816    Data.Final.setInt(/*IntVal=*/false);
2817  }
2818  // Check if the task has 'priority' clause.
2819  if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2820    const Expr *Prio = Clause->getPriority();
2821    Data.Priority.setInt(/*IntVal=*/true);
2822    Data.Priority.setPointer(EmitScalarConversion(
2823        EmitScalarExpr(Prio), Prio->getType(),
2824        getContext().getIntTypeForBitwidth(/*DestWidth=*/32/*Signed=*/1),
2825        Prio->getExprLoc()));
2826  }
2827  // The first function argument for tasks is a thread id, the second one is a
2828  // part id (0 for tied tasks, >=0 for untied task).
2829  llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2830  // Get list of private variables.
2831  for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2832    auto IRef = C->varlist_begin();
2833    for (const Expr *IInit : C->private_copies()) {
2834      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2835      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2836        Data.PrivateVars.push_back(*IRef);
2837        Data.PrivateCopies.push_back(IInit);
2838      }
2839      ++IRef;
2840    }
2841  }
2842  EmittedAsPrivate.clear();
2843  // Get list of firstprivate variables.
2844  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2845    auto IRef = C->varlist_begin();
2846    auto IElemInitRef = C->inits().begin();
2847    for (const Expr *IInit : C->private_copies()) {
2848      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2849      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2850        Data.FirstprivateVars.push_back(*IRef);
2851        Data.FirstprivateCopies.push_back(IInit);
2852        Data.FirstprivateInits.push_back(*IElemInitRef);
2853      }
2854      ++IRef;
2855      ++IElemInitRef;
2856    }
2857  }
2858  // Get list of lastprivate variables (for taskloops).
2859  llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2860  for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2861    auto IRef = C->varlist_begin();
2862    auto ID = C->destination_exprs().begin();
2863    for (const Expr *IInit : C->private_copies()) {
2864      const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2865      if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2866        Data.LastprivateVars.push_back(*IRef);
2867        Data.LastprivateCopies.push_back(IInit);
2868      }
2869      LastprivateDstsOrigs.insert(
2870          {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2871           cast<DeclRefExpr>(*IRef)});
2872      ++IRef;
2873      ++ID;
2874    }
2875  }
2876  SmallVector<const Expr *, 4LHSs;
2877  SmallVector<const Expr *, 4RHSs;
2878  for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
2879    auto IPriv = C->privates().begin();
2880    auto IRed = C->reduction_ops().begin();
2881    auto ILHS = C->lhs_exprs().begin();
2882    auto IRHS = C->rhs_exprs().begin();
2883    for (const Expr *Ref : C->varlists()) {
2884      Data.ReductionVars.emplace_back(Ref);
2885      Data.ReductionCopies.emplace_back(*IPriv);
2886      Data.ReductionOps.emplace_back(*IRed);
2887      LHSs.emplace_back(*ILHS);
2888      RHSs.emplace_back(*IRHS);
2889      std::advance(IPriv, 1);
2890      std::advance(IRed, 1);
2891      std::advance(ILHS, 1);
2892      std::advance(IRHS, 1);
2893    }
2894  }
2895  Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
2896      *this, S.getBeginLoc(), LHSs, RHSs, Data);
2897  // Build list of dependences.
2898  for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2899    for (const Expr *IRef : C->varlists())
2900      Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
2901  auto &&CodeGen = [&Data, &SCS, &BodyGen, &LastprivateDstsOrigs,
2902                    CapturedRegion](CodeGenFunction &CGF,
2903                                    PrePostActionTy &Action) {
2904    // Set proper addresses for generated private copies.
2905    OMPPrivateScope Scope(CGF);
2906    if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2907        !Data.LastprivateVars.empty()) {
2908      llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
2909          CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
2910      enum { PrivatesParam = 2CopyFnParam = 3 };
2911      llvm::Value *CopyFn = CGF.Builder.CreateLoad(
2912          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
2913      llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
2914          CS->getCapturedDecl()->getParam(PrivatesParam)));
2915      // Map privates.
2916      llvm::SmallVector<std::pair<const VarDecl *, Address>, 16PrivatePtrs;
2917      llvm::SmallVector<llvm::Value *, 16CallArgs;
2918      CallArgs.push_back(PrivatesPtr);
2919      for (const Expr *E : Data.PrivateVars) {
2920        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2921        Address PrivatePtr = CGF.CreateMemTemp(
2922            CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2923        PrivatePtrs.emplace_back(VD, PrivatePtr);
2924        CallArgs.push_back(PrivatePtr.getPointer());
2925      }
2926      for (const Expr *E : Data.FirstprivateVars) {
2927        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2928        Address PrivatePtr =
2929            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2930                              ".firstpriv.ptr.addr");
2931        PrivatePtrs.emplace_back(VD, PrivatePtr);
2932        CallArgs.push_back(PrivatePtr.getPointer());
2933      }
2934      for (const Expr *E : Data.LastprivateVars) {
2935        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2936        Address PrivatePtr =
2937            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2938                              ".lastpriv.ptr.addr");
2939        PrivatePtrs.emplace_back(VD, PrivatePtr);
2940        CallArgs.push_back(PrivatePtr.getPointer());
2941      }
2942      CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2943          CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
2944      for (const auto &Pair : LastprivateDstsOrigs) {
2945        const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2946        DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
2947                        /*RefersToEnclosingVariableOrCapture=*/
2948                            CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
2949                        Pair.second->getType(), VK_LValue,
2950                        Pair.second->getExprLoc());
2951        Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2952          return CGF.EmitLValue(&DRE).getAddress();
2953        });
2954      }
2955      for (const auto &Pair : PrivatePtrs) {
2956        Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2957                            CGF.getContext().getDeclAlign(Pair.first));
2958        Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2959      }
2960    }
2961    if (Data.Reductions) {
2962      OMPLexicalScope LexScope(CGFSCapturedRegion);
2963      ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
2964                             Data.ReductionOps);
2965      llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
2966          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
2967      for (unsigned Cnt = 0E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
2968        RedCG.emitSharedLValue(CGFCnt);
2969        RedCG.emitAggregateType(CGFCnt);
2970        // FIXME: This must removed once the runtime library is fixed.
2971        // Emit required threadprivate variables for
2972        // initializer/combiner/finalizer.
2973        CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGFS.getBeginLoc(),
2974                                                           RedCGCnt);
2975        Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
2976            CGFS.getBeginLoc(), ReductionsPtrRedCG.getSharedLValue(Cnt));
2977        Replacement =
2978            Address(CGF.EmitScalarConversion(
2979                        Replacement.getPointer(), CGF.getContext().VoidPtrTy,
2980                        CGF.getContext().getPointerType(
2981                            Data.ReductionCopies[Cnt]->getType()),
2982                        Data.ReductionCopies[Cnt]->getExprLoc()),
2983                    Replacement.getAlignment());
2984        Replacement = RedCG.adjustPrivateAddress(CGFCntReplacement);
2985        Scope.addPrivate(RedCG.getBaseDecl(Cnt),
2986                         [Replacement]() { return Replacement; });
2987      }
2988    }
2989    // Privatize all private variables except for in_reduction items.
2990    (void)Scope.Privatize();
2991    SmallVector<const Expr *, 4InRedVars;
2992    SmallVector<const Expr *, 4InRedPrivs;
2993    SmallVector<const Expr *, 4InRedOps;
2994    SmallVector<const Expr *, 4TaskgroupDescriptors;
2995    for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
2996      auto IPriv = C->privates().begin();
2997      auto IRed = C->reduction_ops().begin();
2998      auto ITD = C->taskgroup_descriptors().begin();
2999      for (const Expr *Ref : C->varlists()) {
3000        InRedVars.emplace_back(Ref);
3001        InRedPrivs.emplace_back(*IPriv);
3002        InRedOps.emplace_back(*IRed);
3003        TaskgroupDescriptors.emplace_back(*ITD);
3004        std::advance(IPriv, 1);
3005        std::advance(IRed, 1);
3006        std::advance(ITD, 1);
3007      }
3008    }
3009    // Privatize in_reduction items here, because taskgroup descriptors must be
3010    // privatized earlier.
3011    OMPPrivateScope InRedScope(CGF);
3012    if (!InRedVars.empty()) {
3013      ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps);
3014      for (unsigned Cnt = 0E = InRedVars.size(); Cnt < E; ++Cnt) {
3015        RedCG.emitSharedLValue(CGFCnt);
3016        RedCG.emitAggregateType(CGFCnt);
3017        // The taskgroup descriptor variable is always implicit firstprivate and
3018        // privatized already during processing of the firstprivates.
3019        // FIXME: This must removed once the runtime library is fixed.
3020        // Emit required threadprivate variables for
3021        // initializer/combiner/finalizer.
3022        CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGFS.getBeginLoc(),
3023                                                           RedCGCnt);
3024        llvm::Value *ReductionsPtr =
3025            CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
3026                                 TaskgroupDescriptors[Cnt]->getExprLoc());
3027        Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
3028            CGFS.getBeginLoc(), ReductionsPtrRedCG.getSharedLValue(Cnt));
3029        Replacement = Address(
3030            CGF.EmitScalarConversion(
3031                Replacement.getPointer(), CGF.getContext().VoidPtrTy,
3032                CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
3033                InRedPrivs[Cnt]->getExprLoc()),
3034            Replacement.getAlignment());
3035        Replacement = RedCG.adjustPrivateAddress(CGFCntReplacement);
3036        InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
3037                              [Replacement]() { return Replacement; });
3038      }
3039    }
3040    (void)InRedScope.Privatize();
3041
3042    Action.Enter(CGF);
3043    BodyGen(CGF);
3044  };
3045  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3046      S, *I, *PartId, *TaskTS.getDirectiveKind(), CodeGenData.Tied,
3047      Data.NumberOfParts);
3048  OMPLexicalScope Scope(*thisS);
3049  TaskGen(*thisOutlinedFnData);
3050}
3051
3052static ImplicitParamDecl *
3053createImplicitFirstprivateForType(ASTContext &COMPTaskDataTy &Data,
3054                                  QualType TyCapturedDecl *CD,
3055                                  SourceLocation Loc) {
3056  auto *OrigVD = ImplicitParamDecl::Create(CCDLoc/*Id=*/nullptrTy,
3057                                           ImplicitParamDecl::Other);
3058  auto *OrigRef = DeclRefExpr::Create(
3059      C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
3060      /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3061  auto *PrivateVD = ImplicitParamDecl::Create(CCDLoc/*Id=*/nullptrTy,
3062                                              ImplicitParamDecl::Other);
3063  auto *PrivateRef = DeclRefExpr::Create(
3064      C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
3065      /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3066  QualType ElemType = C.getBaseElementType(Ty);
3067  auto *InitVD = ImplicitParamDecl::Create(CCDLoc/*Id=*/nullptrElemType,
3068                                           ImplicitParamDecl::Other);
3069  auto *InitRef = DeclRefExpr::Create(
3070      C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
3071      /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
3072  PrivateVD->setInitStyle(VarDecl::CInit);
3073  PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
3074                                              InitRef, /*BasePath=*/nullptr,
3075                                              VK_RValue));
3076  Data.FirstprivateVars.emplace_back(OrigRef);
3077  Data.FirstprivateCopies.emplace_back(PrivateRef);
3078  Data.FirstprivateInits.emplace_back(InitRef);
3079  return OrigVD;
3080}
3081
3082void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
3083    const OMPExecutableDirective &Sconst RegionCodeGenTy &BodyGen,
3084    OMPTargetDataInfo &InputInfo) {
3085  // Emit outlined function for task construct.
3086  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3087  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3088  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3089  auto I = CS->getCapturedDecl()->param_begin();
3090  auto PartId = std::next(I);
3091  auto TaskT = std::next(I4);
3092  OMPTaskDataTy Data;
3093  // The task is not final.
3094  Data.Final.setInt(/*IntVal=*/false);
3095  // Get list of firstprivate variables.
3096  for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
3097    auto IRef = C->varlist_begin();
3098    auto IElemInitRef = C->inits().begin();
3099    for (auto *IInit : C->private_copies()) {
3100      Data.FirstprivateVars.push_back(*IRef);
3101      Data.FirstprivateCopies.push_back(IInit);
3102      Data.FirstprivateInits.push_back(*IElemInitRef);
3103      ++IRef;
3104      ++IElemInitRef;
3105    }
3106  }
3107  OMPPrivateScope TargetScope(*this);
3108  VarDecl *BPVD = nullptr;
3109  VarDecl *PVD = nullptr;
3110  VarDecl *SVD = nullptr;
3111  if (InputInfo.NumberOfTargetItems > 0) {
3112    auto *CD = CapturedDecl::Create(
3113        getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
3114    llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
3115    QualType BaseAndPointersType = getContext().getConstantArrayType(
3116        getContext().VoidPtrTy, ArrSize, ArrayType::Normal,
3117        /*IndexTypeQuals=*/0);
3118    BPVD = createImplicitFirstprivateForType(
3119        getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3120    PVD = createImplicitFirstprivateForType(
3121        getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3122    QualType SizesType = getContext().getConstantArrayType(
3123        getContext().getSizeType(), ArrSize, ArrayType::Normal,
3124        /*IndexTypeQuals=*/0);
3125    SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
3126                                            S.getBeginLoc());
3127    TargetScope.addPrivate(
3128        BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
3129    TargetScope.addPrivate(PVD,
3130                           [&InputInfo]() { return InputInfo.PointersArray; });
3131    TargetScope.addPrivate(SVD,
3132                           [&InputInfo]() { return InputInfo.SizesArray; });
3133  }
3134  (void)TargetScope.Privatize();
3135  // Build list of dependences.
3136  for (const auto *C : S.getClausesOfKind<OMPDependClause>())
3137    for (const Expr *IRef : C->varlists())
3138      Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
3139  auto &&CodeGen = [&Data, &SCS, &BodyGenBPVDPVDSVD,
3140                    &InputInfo](CodeGenFunction &CGFPrePostActionTy &Action) {
3141    // Set proper addresses for generated private copies.
3142    OMPPrivateScope Scope(CGF);
3143    if (!Data.FirstprivateVars.empty()) {
3144      llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
3145          CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
3146      enum { PrivatesParam = 2CopyFnParam = 3 };
3147      llvm::Value *CopyFn = CGF.Builder.CreateLoad(
3148          CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
3149      llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
3150          CS->getCapturedDecl()->getParam(PrivatesParam)));
3151      // Map privates.
3152      llvm::SmallVector<std::pair<const VarDecl *, Address>, 16PrivatePtrs;
3153      llvm::SmallVector<llvm::Value *, 16CallArgs;
3154      CallArgs.push_back(PrivatesPtr);
3155      for (const Expr *E : Data.FirstprivateVars) {
3156        const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3157        Address PrivatePtr =
3158            CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3159                              ".firstpriv.ptr.addr");
3160        PrivatePtrs.emplace_back(VD, PrivatePtr);
3161        CallArgs.push_back(PrivatePtr.getPointer());
3162      }
3163      CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3164          CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
3165      for (const auto &Pair : PrivatePtrs) {
3166        Address Replacement(CGF.Builder.CreateLoad(Pair.second),
3167                            CGF.getContext().getDeclAlign(Pair.first));
3168        Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
3169      }
3170    }
3171    // Privatize all private variables except for in_reduction items.
3172    (void)Scope.Privatize();
3173    if (InputInfo.NumberOfTargetItems > 0) {
3174      InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
3175          CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
3176      InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
3177          CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
3178      InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
3179          CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
3180    }
3181
3182    Action.Enter(CGF);
3183    OMPLexicalScope LexScope(CGFSOMPD_task/*EmitPreInitStmt=*/false);
3184    BodyGen(CGF);
3185  };
3186  llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3187      S, *I, *PartId, *TaskTS.getDirectiveKind(), CodeGen/*Tied=*/true,
3188      Data.NumberOfParts);
3189  llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
3190  IntegerLiteral IfCond(getContext(), TrueOrFalse,
3191                        getContext().getIntTypeForBitwidth(32/*Signed=*/0),
3192                        SourceLocation());
3193
3194  CGM.getOpenMPRuntime().emitTaskCall(*thisS.getBeginLoc(), SOutlinedFn,
3195                                      SharedsTyCapturedStruct, &IfCondData);
3196}
3197
3198void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
3199  // Emit outlined function for task construct.
3200  const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3201  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3202  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3203  const Expr *IfCond = nullptr;
3204  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3205    if (C->getNameModifier() == OMPD_unknown ||
3206        C->getNameModifier() == OMPD_task) {
3207      IfCond = C->getCondition();
3208      break;
3209    }
3210  }
3211
3212  OMPTaskDataTy Data;
3213  // Check if we should emit tied or untied task.
3214  Data.Tied = !S.getSingleClause<OMPUntiedClause>();
3215  auto &&BodyGen = [CS](CodeGenFunction &CGFPrePostActionTy &) {
3216    CGF.EmitStmt(CS->getCapturedStmt());
3217  };
3218  auto &&TaskGen = [&SSharedsTyCapturedStruct,
3219                    IfCond](CodeGenFunction &CGFllvm::Function *OutlinedFn,
3220                            const OMPTaskDataTy &Data) {
3221    CGF.CGM.getOpenMPRuntime().emitTaskCall(CGFS.getBeginLoc(), SOutlinedFn,
3222                                            SharedsTyCapturedStructIfCond,
3223                                            Data);
3224  };
3225  EmitOMPTaskBasedDirective(SOMPD_taskBodyGenTaskGenData);
3226}
3227
3228void CodeGenFunction::EmitOMPTaskyieldDirective(
3229    const OMPTaskyieldDirective &S) {
3230  CGM.getOpenMPRuntime().emitTaskyieldCall(*thisS.getBeginLoc());
3231}
3232
3233void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
3234  CGM.getOpenMPRuntime().emitBarrierCall(*thisS.getBeginLoc(), OMPD_barrier);
3235}
3236
3237void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
3238  CGM.getOpenMPRuntime().emitTaskwaitCall(*thisS.getBeginLoc());
3239}
3240
3241void CodeGenFunction::EmitOMPTaskgroupDirective(
3242    const OMPTaskgroupDirective &S) {
3243  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
3244    Action.Enter(CGF);
3245    if (const Expr *E = S.getReductionRef()) {
3246      SmallVector<const Expr *, 4LHSs;
3247      SmallVector<const Expr *, 4RHSs;
3248      OMPTaskDataTy Data;
3249      for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
3250        auto IPriv = C->privates().begin();
3251        auto IRed = C->reduction_ops().begin();
3252        auto ILHS = C->lhs_exprs().begin();
3253        auto IRHS = C->rhs_exprs().begin();
3254        for (const Expr *Ref : C->varlists()) {
3255          Data.ReductionVars.emplace_back(Ref);
3256          Data.ReductionCopies.emplace_back(*IPriv);
3257          Data.ReductionOps.emplace_back(*IRed);
3258          LHSs.emplace_back(*ILHS);
3259          RHSs.emplace_back(*IRHS);
3260          std::advance(IPriv, 1);
3261          std::advance(IRed, 1);
3262          std::advance(ILHS, 1);
3263          std::advance(IRHS, 1);
3264        }
3265      }
3266      llvm::Value *ReductionDesc =
3267          CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
3268                                                           LHSs, RHSs, Data);
3269      const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3270      CGF.EmitVarDecl(*VD);
3271      CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
3272                            /*Volatile=*/false, E->getType());
3273    }
3274    CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3275  };
3276  OMPLexicalScope Scope(*thisSOMPD_unknown);
3277  CGM.getOpenMPRuntime().emitTaskgroupRegion(*thisCodeGenS.getBeginLoc());
3278}
3279
3280void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
3281  CGM.getOpenMPRuntime().emitFlush(
3282      *this,
3283      [&S]() -> ArrayRef<const Expr *> {
3284        if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
3285          return llvm::makeArrayRef(FlushClause->varlist_begin(),
3286                                    FlushClause->varlist_end());
3287        return llvm::None;
3288      }(),
3289      S.getBeginLoc());
3290}
3291
3292void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
3293                                            const CodeGenLoopTy &CodeGenLoop,
3294                                            Expr *IncExpr) {
3295  // Emit the loop iteration variable.
3296  const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3297  const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3298  EmitVarDecl(*IVDecl);
3299
3300  // Emit the iterations count variable.
3301  // If it is not a variable, Sema decided to calculate iterations count on each
3302  // iteration (e.g., it is foldable into a constant).
3303  if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3304    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3305    // Emit calculation of the iterations count.
3306    EmitIgnoredExpr(S.getCalcLastIteration());
3307  }
3308
3309  CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3310
3311  bool HasLastprivateClause = false;
3312  // Check pre-condition.
3313  {
3314    OMPLoopScope PreInitScope(*thisS);
3315    // Skip the entire loop if we don't meet the precondition.
3316    // If the condition constant folds and can be elided, avoid emitting the
3317    // whole loop.
3318    bool CondConstant;
3319    llvm::BasicBlock *ContBlock = nullptr;
3320    if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3321      if (!CondConstant)
3322        return;
3323    } else {
3324      llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3325      ContBlock = createBasicBlock("omp.precond.end");
3326      emitPreCond(*thisSS.getPreCond(), ThenBlockContBlock,
3327                  getProfileCount(&S));
3328      EmitBlock(ThenBlock);
3329      incrementProfileCounter(&S);
3330    }
3331
3332    emitAlignedClause(*thisS);
3333    // Emit 'then' code.
3334    {
3335      // Emit helper vars inits.
3336
3337      LValue LB = EmitOMPHelperVar(
3338          *this, cast<DeclRefExpr>(
3339                     (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3340                          ? S.getCombinedLowerBoundVariable()
3341                          : S.getLowerBoundVariable())));
3342      LValue UB = EmitOMPHelperVar(
3343          *this, cast<DeclRefExpr>(
3344                     (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3345                          ? S.getCombinedUpperBoundVariable()
3346                          : S.getUpperBoundVariable())));
3347      LValue ST =
3348          EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3349      LValue IL =
3350          EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3351
3352      OMPPrivateScope LoopScope(*this);
3353      if (EmitOMPFirstprivateClause(SLoopScope)) {
3354        // Emit implicit barrier to synchronize threads and avoid data races
3355        // on initialization of firstprivate variables and post-update of
3356        // lastprivate variables.
3357        CGM.getOpenMPRuntime().emitBarrierCall(
3358            *thisS.getBeginLoc(), OMPD_unknown/*EmitChecks=*/false,
3359            /*ForceSimpleCall=*/true);
3360      }
3361      EmitOMPPrivateClause(SLoopScope);
3362      if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3363          !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3364          !isOpenMPTeamsDirective(S.getDirectiveKind()))
3365        EmitOMPReductionClauseInit(SLoopScope);
3366      HasLastprivateClause = EmitOMPLastprivateClauseInit(SLoopScope);
3367      EmitOMPPrivateLoopCounters(SLoopScope);
3368      (void)LoopScope.Privatize();
3369      if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3370        CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*thisS);
3371
3372      // Detect the distribute schedule kind and chunk.
3373      llvm::Value *Chunk = nullptr;
3374      OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
3375      if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
3376        ScheduleKind = C->getDistScheduleKind();
3377        if (const Expr *Ch = C->getChunkSize()) {
3378          Chunk = EmitScalarExpr(Ch);
3379          Chunk = EmitScalarConversion(ChunkCh->getType(),
3380                                       S.getIterationVariable()->getType(),
3381                                       S.getBeginLoc());
3382        }
3383      } else {
3384        // Default behaviour for dist_schedule clause.
3385        CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
3386            *thisSScheduleKindChunk);
3387      }
3388      const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3389      const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3390
3391      // OpenMP [2.10.8, distribute Construct, Description]
3392      // If dist_schedule is specified, kind must be static. If specified,
3393      // iterations are divided into chunks of size chunk_size, chunks are
3394      // assigned to the teams of the league in a round-robin fashion in the
3395      // order of the team number. When no chunk_size is specified, the
3396      // iteration space is divided into chunks that are approximately equal
3397      // in size, and at most one chunk is distributed to each team of the
3398      // league. The size of the chunks is unspecified in this case.
3399      bool StaticChunked = RT.isStaticChunked(
3400          ScheduleKind/* Chunked */ Chunk != nullptr) &&
3401          isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3402      if (RT.isStaticNonchunked(ScheduleKind,
3403                                /* Chunked */ Chunk != nullptr) ||
3404          StaticChunked) {
3405        if (isOpenMPSimdDirective(S.getDirectiveKind()))
3406          EmitOMPSimdInit(S/*IsMonotonic=*/true);
3407        CGOpenMPRuntime::StaticRTInput StaticInit(
3408            IVSizeIVSigned/* Ordered = */ falseIL.getAddress(),
3409            LB.getAddress(), UB.getAddress(), ST.getAddress(),
3410            StaticChunked ? Chunk : nullptr);
3411        RT.emitDistributeStaticInit(*thisS.getBeginLoc(), ScheduleKind,
3412                                    StaticInit);
3413        JumpDest LoopExit =
3414            getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3415        // UB = min(UB, GlobalUB);
3416        EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3417                            ? S.getCombinedEnsureUpperBound()
3418                            : S.getEnsureUpperBound());
3419        // IV = LB;
3420        EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3421                            ? S.getCombinedInit()
3422                            : S.getInit());
3423
3424        const Expr *Cond =
3425            isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3426                ? S.getCombinedCond()
3427                : S.getCond();
3428
3429        if (StaticChunked)
3430          Cond = S.getCombinedDistCond();
3431
3432        // For static unchunked schedules generate:
3433        //
3434        //  1. For distribute alone, codegen
3435        //    while (idx <= UB) {
3436        //      BODY;
3437        //      ++idx;
3438        //    }
3439        //
3440        //  2. When combined with 'for' (e.g. as in 'distribute parallel for')
3441        //    while (idx <= UB) {
3442        //      <CodeGen rest of pragma>(LB, UB);
3443        //      idx += ST;
3444        //    }
3445        //
3446        // For static chunk one schedule generate:
3447        //
3448        // while (IV <= GlobalUB) {
3449        //   <CodeGen rest of pragma>(LB, UB);
3450        //   LB += ST;
3451        //   UB += ST;
3452        //   UB = min(UB, GlobalUB);
3453        //   IV = LB;
3454        // }
3455        //
3456        EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
3457                         [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
3458                           CodeGenLoop(CGF, S, LoopExit);
3459                         },
3460                         [&S, StaticChunked](CodeGenFunction &CGF) {
3461                           if (StaticChunked) {
3462                             CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
3463                             CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
3464                             CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
3465                             CGF.EmitIgnoredExpr(S.getCombinedInit());
3466                           }
3467                         });
3468        EmitBlock(LoopExit.getBlock());
3469        // Tell the runtime we are done.
3470        RT.emitForStaticFinish(*thisS.getBeginLoc(), S.getDirectiveKind());
3471      } else {
3472        // Emit the outer loop, which requests its work chunk [LB..UB] from
3473        // runtime and runs the inner loop to process it.
3474        const OMPLoopArguments LoopArguments = {
3475            LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
3476            Chunk};
3477        EmitOMPDistributeOuterLoop(ScheduleKindSLoopScopeLoopArguments,
3478                                   CodeGenLoop);
3479      }
3480      if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3481        EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3482          return CGF.Builder.CreateIsNotNull(
3483              CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3484        });
3485      }
3486      if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3487          !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3488          !isOpenMPTeamsDirective(S.getDirectiveKind())) {
3489        EmitOMPReductionClauseFinal(SOMPD_simd);
3490        // Emit post-update of the reduction variables if IsLastIter != 0.
3491        emitPostUpdateForReductionClause(
3492            *thisS, [IL, &S](CodeGenFunction &CGF) {
3493              return CGF.Builder.CreateIsNotNull(
3494                  CGF.EmitLoadOfScalar(ILS.getBeginLoc()));
3495            });
3496      }
3497      // Emit final copy of the lastprivate variables if IsLastIter != 0.
3498      if (HasLastprivateClause) {
3499        EmitOMPLastprivateClauseFinal(
3500            S, /*NoFinals=*/false,
3501            Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3502      }
3503    }
3504
3505    // We're now done with the loop, so jump to the continuation block.
3506    if (ContBlock) {
3507      EmitBranch(ContBlock);
3508      EmitBlock(ContBlocktrue);
3509    }
3510  }
3511}
3512
3513void CodeGenFunction::EmitOMPDistributeDirective(
3514    const OMPDistributeDirective &S) {
3515  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
3516    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
3517  };
3518  OMPLexicalScope Scope(*thisSOMPD_unknown);
3519  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_distributeCodeGen);
3520}
3521
3522static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
3523                                                   const CapturedStmt *S) {
3524  CodeGenFunction CGF(CGM/*suppressNewContext=*/true);
3525  CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
3526  CGF.CapturedStmtInfo = &CapStmtInfo;
3527  llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
3528  Fn->setDoesNotRecurse();
3529  return Fn;
3530}
3531
3532void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
3533  if (S.hasClausesOfKind<OMPDependClause>()) {
3534     (0) . __assert_fail ("!S.getAssociatedStmt() && \"No associated statement must be in ordered depend construct.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3535, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(!S.getAssociatedStmt() &&
3535 (0) . __assert_fail ("!S.getAssociatedStmt() && \"No associated statement must be in ordered depend construct.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3535, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">           "No associated statement must be in ordered depend construct.");
3536    for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
3537      CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
3538    return;
3539  }
3540  const auto *C = S.getSingleClause<OMPSIMDClause>();
3541  auto &&CodeGen = [&SCthis](CodeGenFunction &CGF,
3542                                 PrePostActionTy &Action) {
3543    const CapturedStmt *CS = S.getInnermostCapturedStmt();
3544    if (C) {
3545      llvm::SmallVector<llvm::Value *, 16CapturedVars;
3546      CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3547      llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGMCS);
3548      CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
3549                                                      OutlinedFn, CapturedVars);
3550    } else {
3551      Action.Enter(CGF);
3552      CGF.EmitStmt(CS->getCapturedStmt());
3553    }
3554  };
3555  OMPLexicalScope Scope(*thisSOMPD_unknown);
3556  CGM.getOpenMPRuntime().emitOrderedRegion(*thisCodeGenS.getBeginLoc(), !C);
3557}
3558
3559static llvm::Value *convertToScalarValue(CodeGenFunction &CGFRValue Val,
3560                                         QualType SrcTypeQualType DestType,
3561                                         SourceLocation Loc) {
3562   (0) . __assert_fail ("CGF.hasScalarEvaluationKind(DestType) && \"DestType must have scalar evaluation kind.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3563, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CGF.hasScalarEvaluationKind(DestType) &&
3563 (0) . __assert_fail ("CGF.hasScalarEvaluationKind(DestType) && \"DestType must have scalar evaluation kind.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3563, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">         "DestType must have scalar evaluation kind.");
3564   (0) . __assert_fail ("!Val.isAggregate() && \"Must be a scalar or complex.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3564, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(!Val.isAggregate() && "Must be a scalar or complex.");
3565  return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
3566                                                   DestTypeLoc)
3567                        : CGF.EmitComplexToScalarConversion(
3568                              Val.getComplexVal(), SrcTypeDestTypeLoc);
3569}
3570
3571static CodeGenFunction::ComplexPairTy
3572convertToComplexValue(CodeGenFunction &CGFRValue ValQualType SrcType,
3573                      QualType DestTypeSourceLocation Loc) {
3574   (0) . __assert_fail ("CGF.getEvaluationKind(DestType) == TEK_Complex && \"DestType must have complex evaluation kind.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3575, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
3575 (0) . __assert_fail ("CGF.getEvaluationKind(DestType) == TEK_Complex && \"DestType must have complex evaluation kind.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3575, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">         "DestType must have complex evaluation kind.");
3576  CodeGenFunction::ComplexPairTy ComplexVal;
3577  if (Val.isScalar()) {
3578    // Convert the input element to the element type of the complex.
3579    QualType DestElementType =
3580        DestType->castAs<ComplexType>()->getElementType();
3581    llvm::Value *ScalarVal = CGF.EmitScalarConversion(
3582        Val.getScalarVal(), SrcTypeDestElementTypeLoc);
3583    ComplexVal = CodeGenFunction::ComplexPairTy(
3584        ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
3585  } else {
3586     (0) . __assert_fail ("Val.isComplex() && \"Must be a scalar or complex.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3586, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Val.isComplex() && "Must be a scalar or complex.");
3587    QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
3588    QualType DestElementType =
3589        DestType->castAs<ComplexType>()->getElementType();
3590    ComplexVal.first = CGF.EmitScalarConversion(
3591        Val.getComplexVal().firstSrcElementTypeDestElementTypeLoc);
3592    ComplexVal.second = CGF.EmitScalarConversion(
3593        Val.getComplexVal().secondSrcElementTypeDestElementTypeLoc);
3594  }
3595  return ComplexVal;
3596}
3597
3598static void emitSimpleAtomicStore(CodeGenFunction &CGFbool IsSeqCst,
3599                                  LValue LValRValue RVal) {
3600  if (LVal.isGlobalReg()) {
3601    CGF.EmitStoreThroughGlobalRegLValue(RValLVal);
3602  } else {
3603    CGF.EmitAtomicStore(RVal, LVal,
3604                        IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3605                                 : llvm::AtomicOrdering::Monotonic,
3606                        LVal.isVolatile(), /*IsInit=*/false);
3607  }
3608}
3609
3610void CodeGenFunction::emitOMPSimpleStore(LValue LValRValue RVal,
3611                                         QualType RValTySourceLocation Loc) {
3612  switch (getEvaluationKind(LVal.getType())) {
3613  case TEK_Scalar:
3614    EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3615                               *thisRValRValTyLVal.getType(), Loc)),
3616                           LVal);
3617    break;
3618  case TEK_Complex:
3619    EmitStoreOfComplex(
3620        convertToComplexValue(*thisRValRValTyLVal.getType(), Loc), LVal,
3621        /*isInit=*/false);
3622    break;
3623  case TEK_Aggregate:
3624    llvm_unreachable("Must be a scalar or complex.");
3625  }
3626}
3627
3628static void emitOMPAtomicReadExpr(CodeGenFunction &CGFbool IsSeqCst,
3629                                  const Expr *Xconst Expr *V,
3630                                  SourceLocation Loc) {
3631  // v = x;
3632   (0) . __assert_fail ("V->isLValue() && \"V of 'omp atomic read' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3632, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3633   (0) . __assert_fail ("X->isLValue() && \"X of 'omp atomic read' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3633, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3634  LValue XLValue = CGF.EmitLValue(X);
3635  LValue VLValue = CGF.EmitLValue(V);
3636  RValue Res = XLValue.isGlobalReg()
3637                   ? CGF.EmitLoadOfLValue(XLValue, Loc)
3638                   : CGF.EmitAtomicLoad(
3639                         XLValue, Loc,
3640                         IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3641                                  : llvm::AtomicOrdering::Monotonic,
3642                         XLValue.isVolatile());
3643  // OpenMP, 2.12.6, atomic Construct
3644  // Any atomic construct with a seq_cst clause forces the atomically
3645  // performed operation to include an implicit flush operation without a
3646  // list.
3647  if (IsSeqCst)
3648    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3649  CGF.emitOMPSimpleStore(VLValueResX->getType().getNonReferenceType(), Loc);
3650}
3651
3652static void emitOMPAtomicWriteExpr(CodeGenFunction &CGFbool IsSeqCst,
3653                                   const Expr *Xconst Expr *E,
3654                                   SourceLocation Loc) {
3655  // x = expr;
3656   (0) . __assert_fail ("X->isLValue() && \"X of 'omp atomic write' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3656, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3657  emitSimpleAtomicStore(CGFIsSeqCstCGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3658  // OpenMP, 2.12.6, atomic Construct
3659  // Any atomic construct with a seq_cst clause forces the atomically
3660  // performed operation to include an implicit flush operation without a
3661  // list.
3662  if (IsSeqCst)
3663    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3664}
3665
3666static std::pair<boolRValueemitOMPAtomicRMW(CodeGenFunction &CGFLValue X,
3667                                                RValue Update,
3668                                                BinaryOperatorKind BO,
3669                                                llvm::AtomicOrdering AO,
3670                                                bool IsXLHSInRHSPart) {
3671  ASTContext &Context = CGF.getContext();
3672  // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3673  // expression is simple and atomic is allowed for the given type for the
3674  // target platform.
3675  if (BO == BO_Comma || !Update.isScalar() ||
3676      !Update.getScalarVal()->getType()->isIntegerTy() ||
3677      !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3678                        (Update.getScalarVal()->getType() !=
3679                         X.getAddress().getElementType())) ||
3680      !X.getAddress().getElementType()->isIntegerTy() ||
3681      !Context.getTargetInfo().hasBuiltinAtomic(
3682          Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3683    return std::make_pair(falseRValue::get(nullptr));
3684
3685  llvm::AtomicRMWInst::BinOp RMWOp;
3686  switch (BO) {
3687  case BO_Add:
3688    RMWOp = llvm::AtomicRMWInst::Add;
3689    break;
3690  case BO_Sub:
3691    if (!IsXLHSInRHSPart)
3692      return std::make_pair(falseRValue::get(nullptr));
3693    RMWOp = llvm::AtomicRMWInst::Sub;
3694    break;
3695  case BO_And:
3696    RMWOp = llvm::AtomicRMWInst::And;
3697    break;
3698  case BO_Or:
3699    RMWOp = llvm::AtomicRMWInst::Or;
3700    break;
3701  case BO_Xor:
3702    RMWOp = llvm::AtomicRMWInst::Xor;
3703    break;
3704  case BO_LT:
3705    RMWOp = X.getType()->hasSignedIntegerRepresentation()
3706                ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3707                                   : llvm::AtomicRMWInst::Max)
3708                : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3709                                   : llvm::AtomicRMWInst::UMax);
3710    break;
3711  case BO_GT:
3712    RMWOp = X.getType()->hasSignedIntegerRepresentation()
3713                ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3714                                   : llvm::AtomicRMWInst::Min)
3715                : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3716                                   : llvm::AtomicRMWInst::UMin);
3717    break;
3718  case BO_Assign:
3719    RMWOp = llvm::AtomicRMWInst::Xchg;
3720    break;
3721  case BO_Mul:
3722  case BO_Div:
3723  case BO_Rem:
3724  case BO_Shl:
3725  case BO_Shr:
3726  case BO_LAnd:
3727  case BO_LOr:
3728    return std::make_pair(falseRValue::get(nullptr));
3729  case BO_PtrMemD:
3730  case BO_PtrMemI:
3731  case BO_LE:
3732  case BO_GE:
3733  case BO_EQ:
3734  case BO_NE:
3735  case BO_Cmp:
3736  case BO_AddAssign:
3737  case BO_SubAssign:
3738  case BO_AndAssign:
3739  case BO_OrAssign:
3740  case BO_XorAssign:
3741  case BO_MulAssign:
3742  case BO_DivAssign:
3743  case BO_RemAssign:
3744  case BO_ShlAssign:
3745  case BO_ShrAssign:
3746  case BO_Comma:
3747    llvm_unreachable("Unsupported atomic update operation");
3748  }
3749  llvm::Value *UpdateVal = Update.getScalarVal();
3750  if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3751    UpdateVal = CGF.Builder.CreateIntCast(
3752        IC, X.getAddress().getElementType(),
3753        X.getType()->hasSignedIntegerRepresentation());
3754  }
3755  llvm::Value *Res =
3756      CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3757  return std::make_pair(trueRValue::get(Res));
3758}
3759
3760std::pair<boolRValueCodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3761    LValue XRValue EBinaryOperatorKind BObool IsXLHSInRHSPart,
3762    llvm::AtomicOrdering AOSourceLocation Loc,
3763    const llvm::function_ref<RValue(RValue)> CommonGen) {
3764  // Update expressions are allowed to have the following forms:
3765  // x binop= expr; -> xrval + expr;
3766  // x++, ++x -> xrval + 1;
3767  // x--, --x -> xrval - 1;
3768  // x = x binop expr; -> xrval binop expr
3769  // x = expr Op x; - > expr binop xrval;
3770  auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3771  if (!Res.first) {
3772    if (X.isGlobalReg()) {
3773      // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3774      // 'xrval'.
3775      EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3776    } else {
3777      // Perform compare-and-swap procedure.
3778      EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3779    }
3780  }
3781  return Res;
3782}
3783
3784static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGFbool IsSeqCst,
3785                                    const Expr *Xconst Expr *E,
3786                                    const Expr *UEbool IsXLHSInRHSPart,
3787                                    SourceLocation Loc) {
3788   (0) . __assert_fail ("isa(UE->IgnoreImpCasts()) && \"Update expr in 'atomic update' must be a binary operator.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3789, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3789 (0) . __assert_fail ("isa(UE->IgnoreImpCasts()) && \"Update expr in 'atomic update' must be a binary operator.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3789, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">         "Update expr in 'atomic update' must be a binary operator.");
3790  const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3791  // Update expressions are allowed to have the following forms:
3792  // x binop= expr; -> xrval + expr;
3793  // x++, ++x -> xrval + 1;
3794  // x--, --x -> xrval - 1;
3795  // x = x binop expr; -> xrval binop expr
3796  // x = expr Op x; - > expr binop xrval;
3797   (0) . __assert_fail ("X->isLValue() && \"X of 'omp atomic update' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3797, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3798  LValue XLValue = CGF.EmitLValue(X);
3799  RValue ExprRValue = CGF.EmitAnyExpr(E);
3800  llvm::AtomicOrdering AO = IsSeqCst
3801                                ? llvm::AtomicOrdering::SequentiallyConsistent
3802                                : llvm::AtomicOrdering::Monotonic;
3803  const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3804  const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3805  const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3806  const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3807  auto &&Gen = [&CGFUEExprRValueXRValExprERValExpr](RValue XRValue) {
3808    CodeGenFunction::OpaqueValueMapping MapExpr(CGFERValExprExprRValue);
3809    CodeGenFunction::OpaqueValueMapping MapX(CGFXRValExprXRValue);
3810    return CGF.EmitAnyExpr(UE);
3811  };
3812  (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3813      XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3814  // OpenMP, 2.12.6, atomic Construct
3815  // Any atomic construct with a seq_cst clause forces the atomically
3816  // performed operation to include an implicit flush operation without a
3817  // list.
3818  if (IsSeqCst)
3819    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3820}
3821
3822static RValue convertToType(CodeGenFunction &CGFRValue Value,
3823                            QualType SourceTypeQualType ResType,
3824                            SourceLocation Loc) {
3825  switch (CGF.getEvaluationKind(ResType)) {
3826  case TEK_Scalar:
3827    return RValue::get(
3828        convertToScalarValue(CGFValueSourceTypeResTypeLoc));
3829  case TEK_Complex: {
3830    auto Res = convertToComplexValue(CGFValueSourceTypeResTypeLoc);
3831    return RValue::getComplex(Res.firstRes.second);
3832  }
3833  case TEK_Aggregate:
3834    break;
3835  }
3836  llvm_unreachable("Must be a scalar or complex.");
3837}
3838
3839static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGFbool IsSeqCst,
3840                                     bool IsPostfixUpdateconst Expr *V,
3841                                     const Expr *Xconst Expr *E,
3842                                     const Expr *UEbool IsXLHSInRHSPart,
3843                                     SourceLocation Loc) {
3844   (0) . __assert_fail ("X->isLValue() && \"X of 'omp atomic capture' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3844, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3845   (0) . __assert_fail ("V->isLValue() && \"V of 'omp atomic capture' is not lvalue\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3845, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3846  RValue NewVVal;
3847  LValue VLValue = CGF.EmitLValue(V);
3848  LValue XLValue = CGF.EmitLValue(X);
3849  RValue ExprRValue = CGF.EmitAnyExpr(E);
3850  llvm::AtomicOrdering AO = IsSeqCst
3851                                ? llvm::AtomicOrdering::SequentiallyConsistent
3852                                : llvm::AtomicOrdering::Monotonic;
3853  QualType NewVValType;
3854  if (UE) {
3855    // 'x' is updated with some additional value.
3856     (0) . __assert_fail ("isa(UE->IgnoreImpCasts()) && \"Update expr in 'atomic capture' must be a binary operator.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3857, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3857 (0) . __assert_fail ("isa(UE->IgnoreImpCasts()) && \"Update expr in 'atomic capture' must be a binary operator.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 3857, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">           "Update expr in 'atomic capture' must be a binary operator.");
3858    const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3859    // Update expressions are allowed to have the following forms:
3860    // x binop= expr; -> xrval + expr;
3861    // x++, ++x -> xrval + 1;
3862    // x--, --x -> xrval - 1;
3863    // x = x binop expr; -> xrval binop expr
3864    // x = expr Op x; - > expr binop xrval;
3865    const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3866    const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3867    const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3868    NewVValType = XRValExpr->getType();
3869    const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3870    auto &&Gen = [&CGF, &NewVValUEExprRValueXRValExprERValExpr,
3871                  IsPostfixUpdate](RValue XRValue) {
3872      CodeGenFunction::OpaqueValueMapping MapExpr(CGFERValExprExprRValue);
3873      CodeGenFunction::OpaqueValueMapping MapX(CGFXRValExprXRValue);
3874      RValue Res = CGF.EmitAnyExpr(UE);
3875      NewVVal = IsPostfixUpdate ? XRValue : Res;
3876      return Res;
3877    };
3878    auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3879        XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3880    if (Res.first) {
3881      // 'atomicrmw' instruction was generated.
3882      if (IsPostfixUpdate) {
3883        // Use old value from 'atomicrmw'.
3884        NewVVal = Res.second;
3885      } else {
3886        // 'atomicrmw' does not provide new value, so evaluate it using old
3887        // value of 'x'.
3888        CodeGenFunction::OpaqueValueMapping MapExpr(CGFERValExprExprRValue);
3889        CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3890        NewVVal = CGF.EmitAnyExpr(UE);
3891      }
3892    }
3893  } else {
3894    // 'x' is simply rewritten with some 'expr'.
3895    NewVValType = X->getType().getNonReferenceType();
3896    ExprRValue = convertToType(CGFExprRValueE->getType(),
3897                               X->getType().getNonReferenceType(), Loc);
3898    auto &&Gen = [&NewVValExprRValue](RValue XRValue) {
3899      NewVVal = XRValue;
3900      return ExprRValue;
3901    };
3902    // Try to perform atomicrmw xchg, otherwise simple exchange.
3903    auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3904        XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3905        Loc, Gen);
3906    if (Res.first) {
3907      // 'atomicrmw' instruction was generated.
3908      NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3909    }
3910  }
3911  // Emit post-update store to 'v' of old/new 'x' value.
3912  CGF.emitOMPSimpleStore(VLValueNewVValNewVValTypeLoc);
3913  // OpenMP, 2.12.6, atomic Construct
3914  // Any atomic construct with a seq_cst clause forces the atomically
3915  // performed operation to include an implicit flush operation without a
3916  // list.
3917  if (IsSeqCst)
3918    CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3919}
3920
3921static void emitOMPAtomicExpr(CodeGenFunction &CGFOpenMPClauseKind Kind,
3922                              bool IsSeqCstbool IsPostfixUpdate,
3923                              const Expr *Xconst Expr *Vconst Expr *E,
3924                              const Expr *UEbool IsXLHSInRHSPart,
3925                              SourceLocation Loc) {
3926  switch (Kind) {
3927  case OMPC_read:
3928    emitOMPAtomicReadExpr(CGFIsSeqCstXVLoc);
3929    break;
3930  case OMPC_write:
3931    emitOMPAtomicWriteExpr(CGFIsSeqCstXELoc);
3932    break;
3933  case OMPC_unknown:
3934  case OMPC_update:
3935    emitOMPAtomicUpdateExpr(CGFIsSeqCstXEUEIsXLHSInRHSPartLoc);
3936    break;
3937  case OMPC_capture:
3938    emitOMPAtomicCaptureExpr(CGFIsSeqCstIsPostfixUpdateVXEUE,
3939                             IsXLHSInRHSPartLoc);
3940    break;
3941  case OMPC_if:
3942  case OMPC_final:
3943  case OMPC_num_threads:
3944  case OMPC_private:
3945  case OMPC_firstprivate:
3946  case OMPC_lastprivate:
3947  case OMPC_reduction:
3948  case OMPC_task_reduction:
3949  case OMPC_in_reduction:
3950  case OMPC_safelen:
3951  case OMPC_simdlen:
3952  case OMPC_allocator:
3953  case OMPC_allocate:
3954  case OMPC_collapse:
3955  case OMPC_default:
3956  case OMPC_seq_cst:
3957  case OMPC_shared:
3958  case OMPC_linear:
3959  case OMPC_aligned:
3960  case OMPC_copyin:
3961  case OMPC_copyprivate:
3962  case OMPC_flush:
3963  case OMPC_proc_bind:
3964  case OMPC_schedule:
3965  case OMPC_ordered:
3966  case OMPC_nowait:
3967  case OMPC_untied:
3968  case OMPC_threadprivate:
3969  case OMPC_depend:
3970  case OMPC_mergeable:
3971  case OMPC_device:
3972  case OMPC_threads:
3973  case OMPC_simd:
3974  case OMPC_map:
3975  case OMPC_num_teams:
3976  case OMPC_thread_limit:
3977  case OMPC_priority:
3978  case OMPC_grainsize:
3979  case OMPC_nogroup:
3980  case OMPC_num_tasks:
3981  case OMPC_hint:
3982  case OMPC_dist_schedule:
3983  case OMPC_defaultmap:
3984  case OMPC_uniform:
3985  case OMPC_to:
3986  case OMPC_from:
3987  case OMPC_use_device_ptr:
3988  case OMPC_is_device_ptr:
3989  case OMPC_unified_address:
3990  case OMPC_unified_shared_memory:
3991  case OMPC_reverse_offload:
3992  case OMPC_dynamic_allocators:
3993  case OMPC_atomic_default_mem_order:
3994    llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3995  }
3996}
3997
3998void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3999  bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
4000  OpenMPClauseKind Kind = OMPC_unknown;
4001  for (const OMPClause *C : S.clauses()) {
4002    // Find first clause (skip seq_cst clause, if it is first).
4003    if (C->getClauseKind() != OMPC_seq_cst) {
4004      Kind = C->getClauseKind();
4005      break;
4006    }
4007  }
4008
4009  const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
4010  if (const auto *FE = dyn_cast<FullExpr>(CS))
4011    enterFullExpression(FE);
4012  // Processing for statements under 'atomic capture'.
4013  if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
4014    for (const Stmt *C : Compound->body()) {
4015      if (const auto *FE = dyn_cast<FullExpr>(C))
4016        enterFullExpression(FE);
4017    }
4018  }
4019
4020  auto &&CodeGen = [&SKindIsSeqCstCS](CodeGenFunction &CGF,
4021                                            PrePostActionTy &) {
4022    CGF.EmitStopPoint(CS);
4023    emitOMPAtomicExpr(CGFKindIsSeqCstS.isPostfixUpdate(), S.getX(),
4024                      S.getV(), S.getExpr(), S.getUpdateExpr(),
4025                      S.isXLHSInRHSPart(), S.getBeginLoc());
4026  };
4027  OMPLexicalScope Scope(*thisSOMPD_unknown);
4028  CGM.getOpenMPRuntime().emitInlinedDirective(*thisOMPD_atomicCodeGen);
4029}
4030
4031static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
4032                                         const OMPExecutableDirective &S,
4033                                         const RegionCodeGenTy &CodeGen) {
4034  assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
4035  CodeGenModule &CGM = CGF.CGM;
4036
4037  // On device emit this construct as inlined code.
4038  if (CGM.getLangOpts().OpenMPIsDevice) {
4039    OMPLexicalScope Scope(CGFSOMPD_target);
4040    CGM.getOpenMPRuntime().emitInlinedDirective(
4041        CGFOMPD_target, [&S](CodeGenFunction &CGFPrePostActionTy &) {
4042          CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4043        });
4044    return;
4045  }
4046
4047  llvm::Function *Fn = nullptr;
4048  llvm::Constant *FnID = nullptr;
4049
4050  const Expr *IfCond = nullptr;
4051  // Check for the at most one if clause associated with the target region.
4052  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4053    if (C->getNameModifier() == OMPD_unknown ||
4054        C->getNameModifier() == OMPD_target) {
4055      IfCond = C->getCondition();
4056      break;
4057    }
4058  }
4059
4060  // Check if we have any device clause associated with the directive.
4061  const Expr *Device = nullptr;
4062  if (auto *C = S.getSingleClause<OMPDeviceClause>())
4063    Device = C->getDevice();
4064
4065  // Check if we have an if clause whose conditional always evaluates to false
4066  // or if we do not have any targets specified. If so the target region is not
4067  // an offload entry point.
4068  bool IsOffloadEntry = true;
4069  if (IfCond) {
4070    bool Val;
4071    if (CGF.ConstantFoldsToSimpleInteger(IfCondVal) && !Val)
4072      IsOffloadEntry = false;
4073  }
4074  if (CGM.getLangOpts().OMPTargetTriples.empty())
4075    IsOffloadEntry = false;
4076
4077   (0) . __assert_fail ("CGF.CurFuncDecl && \"No parent declaration for target region!\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4077, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(CGF.CurFuncDecl && "No parent declaration for target region!");
4078  StringRef ParentName;
4079  // In case we have Ctors/Dtors we use the complete type variant to produce
4080  // the mangling of the device outlined kernel.
4081  if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
4082    ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
4083  else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
4084    ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
4085  else
4086    ParentName =
4087        CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
4088
4089  // Emit target region as a standalone region.
4090  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
4091                                                    IsOffloadEntry, CodeGen);
4092  OMPLexicalScope Scope(CGFSOMPD_task);
4093  auto &&SizeEmitter = [](CodeGenFunction &CGFconst OMPLoopDirective &D) {
4094    OMPLoopScope(CGFD);
4095    // Emit calculation of the iterations count.
4096    llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
4097    NumIterations = CGF.Builder.CreateIntCast(NumIterationsCGF.Int64Ty,
4098                                              /*IsSigned=*/false);
4099    return NumIterations;
4100  };
4101  if (IsOffloadEntry)
4102    CGM.getOpenMPRuntime().emitTargetNumIterationsCall(CGFSDevice,
4103                                                       SizeEmitter);
4104  CGM.getOpenMPRuntime().emitTargetCall(CGFSFnFnIDIfCondDevice);
4105}
4106
4107static void emitTargetRegion(CodeGenFunction &CGFconst OMPTargetDirective &S,
4108                             PrePostActionTy &Action) {
4109  Action.Enter(CGF);
4110  CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4111  (void)CGF.EmitOMPFirstprivateClause(SPrivateScope);
4112  CGF.EmitOMPPrivateClause(SPrivateScope);
4113  (void)PrivateScope.Privatize();
4114  if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4115    CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGFS);
4116
4117  CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
4118}
4119
4120void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
4121                                                  StringRef ParentName,
4122                                                  const OMPTargetDirective &S) {
4123  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4124    emitTargetRegion(CGFSAction);
4125  };
4126  llvm::Function *Fn;
4127  llvm::Constant *Addr;
4128  // Emit target region as a standalone region.
4129  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4130      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4131   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4131, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4132}
4133
4134void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
4135  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4136    emitTargetRegion(CGFSAction);
4137  };
4138  emitCommonOMPTargetDirective(*thisSCodeGen);
4139}
4140
4141static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
4142                                        const OMPExecutableDirective &S,
4143                                        OpenMPDirectiveKind InnermostKind,
4144                                        const RegionCodeGenTy &CodeGen) {
4145  const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
4146  llvm::Function *OutlinedFn =
4147      CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
4148          S, *CS->getCapturedDecl()->param_begin(), InnermostKindCodeGen);
4149
4150  const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
4151  const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
4152  if (NT || TL) {
4153    const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
4154    const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
4155
4156    CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGFNumTeamsThreadLimit,
4157                                                  S.getBeginLoc());
4158  }
4159
4160  OMPTeamsScope Scope(CGFS);
4161  llvm::SmallVector<llvm::Value *, 16CapturedVars;
4162  CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
4163  CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
4164                                           CapturedVars);
4165}
4166
4167void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
4168  // Emit teams region as a standalone region.
4169  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4170    Action.Enter(CGF);
4171    OMPPrivateScope PrivateScope(CGF);
4172    (void)CGF.EmitOMPFirstprivateClause(SPrivateScope);
4173    CGF.EmitOMPPrivateClause(SPrivateScope);
4174    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4175    (void)PrivateScope.Privatize();
4176    CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
4177    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4178  };
4179  emitCommonOMPTeamsDirective(*thisSOMPD_distributeCodeGen);
4180  emitPostUpdateForReductionClause(*thisS,
4181                                   [](CodeGenFunction &) { return nullptr; });
4182}
4183
4184static void emitTargetTeamsRegion(CodeGenFunction &CGFPrePostActionTy &Action,
4185                                  const OMPTargetTeamsDirective &S) {
4186  auto *CS = S.getCapturedStmt(OMPD_teams);
4187  Action.Enter(CGF);
4188  // Emit teams region as a standalone region.
4189  auto &&CodeGen = [&SCS](CodeGenFunction &CGFPrePostActionTy &Action) {
4190    Action.Enter(CGF);
4191    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4192    (void)CGF.EmitOMPFirstprivateClause(SPrivateScope);
4193    CGF.EmitOMPPrivateClause(SPrivateScope);
4194    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4195    (void)PrivateScope.Privatize();
4196    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4197      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGFS);
4198    CGF.EmitStmt(CS->getCapturedStmt());
4199    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4200  };
4201  emitCommonOMPTeamsDirective(CGFSOMPD_teamsCodeGen);
4202  emitPostUpdateForReductionClause(CGFS,
4203                                   [](CodeGenFunction &) { return nullptr; });
4204}
4205
4206void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
4207    CodeGenModule &CGMStringRef ParentName,
4208    const OMPTargetTeamsDirective &S) {
4209  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4210    emitTargetTeamsRegion(CGFActionS);
4211  };
4212  llvm::Function *Fn;
4213  llvm::Constant *Addr;
4214  // Emit target region as a standalone region.
4215  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4216      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4217   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4217, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4218}
4219
4220void CodeGenFunction::EmitOMPTargetTeamsDirective(
4221    const OMPTargetTeamsDirective &S) {
4222  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4223    emitTargetTeamsRegion(CGFActionS);
4224  };
4225  emitCommonOMPTargetDirective(*thisSCodeGen);
4226}
4227
4228static void
4229emitTargetTeamsDistributeRegion(CodeGenFunction &CGFPrePostActionTy &Action,
4230                                const OMPTargetTeamsDistributeDirective &S) {
4231  Action.Enter(CGF);
4232  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4233    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
4234  };
4235
4236  // Emit teams region as a standalone region.
4237  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4238                                            PrePostActionTy &Action) {
4239    Action.Enter(CGF);
4240    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4241    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4242    (void)PrivateScope.Privatize();
4243    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_distribute,
4244                                                    CodeGenDistribute);
4245    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4246  };
4247  emitCommonOMPTeamsDirective(CGFSOMPD_distributeCodeGen);
4248  emitPostUpdateForReductionClause(CGFS,
4249                                   [](CodeGenFunction &) { return nullptr; });
4250}
4251
4252void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
4253    CodeGenModule &CGMStringRef ParentName,
4254    const OMPTargetTeamsDistributeDirective &S) {
4255  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4256    emitTargetTeamsDistributeRegion(CGFActionS);
4257  };
4258  llvm::Function *Fn;
4259  llvm::Constant *Addr;
4260  // Emit target region as a standalone region.
4261  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4262      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4263   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4263, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4264}
4265
4266void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
4267    const OMPTargetTeamsDistributeDirective &S) {
4268  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4269    emitTargetTeamsDistributeRegion(CGFActionS);
4270  };
4271  emitCommonOMPTargetDirective(*thisSCodeGen);
4272}
4273
4274static void emitTargetTeamsDistributeSimdRegion(
4275    CodeGenFunction &CGFPrePostActionTy &Action,
4276    const OMPTargetTeamsDistributeSimdDirective &S) {
4277  Action.Enter(CGF);
4278  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4279    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
4280  };
4281
4282  // Emit teams region as a standalone region.
4283  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4284                                            PrePostActionTy &Action) {
4285    Action.Enter(CGF);
4286    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4287    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4288    (void)PrivateScope.Privatize();
4289    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_distribute,
4290                                                    CodeGenDistribute);
4291    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4292  };
4293  emitCommonOMPTeamsDirective(CGFSOMPD_distribute_simdCodeGen);
4294  emitPostUpdateForReductionClause(CGFS,
4295                                   [](CodeGenFunction &) { return nullptr; });
4296}
4297
4298void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
4299    CodeGenModule &CGMStringRef ParentName,
4300    const OMPTargetTeamsDistributeSimdDirective &S) {
4301  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4302    emitTargetTeamsDistributeSimdRegion(CGFActionS);
4303  };
4304  llvm::Function *Fn;
4305  llvm::Constant *Addr;
4306  // Emit target region as a standalone region.
4307  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4308      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4309   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4309, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4310}
4311
4312void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
4313    const OMPTargetTeamsDistributeSimdDirective &S) {
4314  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4315    emitTargetTeamsDistributeSimdRegion(CGFActionS);
4316  };
4317  emitCommonOMPTargetDirective(*thisSCodeGen);
4318}
4319
4320void CodeGenFunction::EmitOMPTeamsDistributeDirective(
4321    const OMPTeamsDistributeDirective &S) {
4322
4323  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4324    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
4325  };
4326
4327  // Emit teams region as a standalone region.
4328  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4329                                            PrePostActionTy &Action) {
4330    Action.Enter(CGF);
4331    OMPPrivateScope PrivateScope(CGF);
4332    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4333    (void)PrivateScope.Privatize();
4334    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_distribute,
4335                                                    CodeGenDistribute);
4336    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4337  };
4338  emitCommonOMPTeamsDirective(*thisSOMPD_distributeCodeGen);
4339  emitPostUpdateForReductionClause(*thisS,
4340                                   [](CodeGenFunction &) { return nullptr; });
4341}
4342
4343void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
4344    const OMPTeamsDistributeSimdDirective &S) {
4345  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4346    CGF.EmitOMPDistributeLoop(SemitOMPLoopBodyWithStopPointS.getInc());
4347  };
4348
4349  // Emit teams region as a standalone region.
4350  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4351                                            PrePostActionTy &Action) {
4352    Action.Enter(CGF);
4353    OMPPrivateScope PrivateScope(CGF);
4354    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4355    (void)PrivateScope.Privatize();
4356    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_simd,
4357                                                    CodeGenDistribute);
4358    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4359  };
4360  emitCommonOMPTeamsDirective(*thisSOMPD_distribute_simdCodeGen);
4361  emitPostUpdateForReductionClause(*thisS,
4362                                   [](CodeGenFunction &) { return nullptr; });
4363}
4364
4365void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
4366    const OMPTeamsDistributeParallelForDirective &S) {
4367  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4368    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
4369                              S.getDistInc());
4370  };
4371
4372  // Emit teams region as a standalone region.
4373  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4374                                            PrePostActionTy &Action) {
4375    Action.Enter(CGF);
4376    OMPPrivateScope PrivateScope(CGF);
4377    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4378    (void)PrivateScope.Privatize();
4379    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_distribute,
4380                                                    CodeGenDistribute);
4381    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4382  };
4383  emitCommonOMPTeamsDirective(*thisSOMPD_distribute_parallel_forCodeGen);
4384  emitPostUpdateForReductionClause(*thisS,
4385                                   [](CodeGenFunction &) { return nullptr; });
4386}
4387
4388void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
4389    const OMPTeamsDistributeParallelForSimdDirective &S) {
4390  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4391    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
4392                              S.getDistInc());
4393  };
4394
4395  // Emit teams region as a standalone region.
4396  auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4397                                            PrePostActionTy &Action) {
4398    Action.Enter(CGF);
4399    OMPPrivateScope PrivateScope(CGF);
4400    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4401    (void)PrivateScope.Privatize();
4402    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4403        CGFOMPD_distributeCodeGenDistribute/*HasCancel=*/false);
4404    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4405  };
4406  emitCommonOMPTeamsDirective(*thisSOMPD_distribute_parallel_forCodeGen);
4407  emitPostUpdateForReductionClause(*thisS,
4408                                   [](CodeGenFunction &) { return nullptr; });
4409}
4410
4411static void emitTargetTeamsDistributeParallelForRegion(
4412    CodeGenFunction &CGFconst OMPTargetTeamsDistributeParallelForDirective &S,
4413    PrePostActionTy &Action) {
4414  Action.Enter(CGF);
4415  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4416    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
4417                              S.getDistInc());
4418  };
4419
4420  // Emit teams region as a standalone region.
4421  auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4422                                                 PrePostActionTy &Action) {
4423    Action.Enter(CGF);
4424    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4425    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4426    (void)PrivateScope.Privatize();
4427    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4428        CGFOMPD_distributeCodeGenDistribute/*HasCancel=*/false);
4429    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4430  };
4431
4432  emitCommonOMPTeamsDirective(CGFSOMPD_distribute_parallel_for,
4433                              CodeGenTeams);
4434  emitPostUpdateForReductionClause(CGFS,
4435                                   [](CodeGenFunction &) { return nullptr; });
4436}
4437
4438void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
4439    CodeGenModule &CGMStringRef ParentName,
4440    const OMPTargetTeamsDistributeParallelForDirective &S) {
4441  // Emit SPMD target teams distribute parallel for region as a standalone
4442  // region.
4443  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4444    emitTargetTeamsDistributeParallelForRegion(CGFSAction);
4445  };
4446  llvm::Function *Fn;
4447  llvm::Constant *Addr;
4448  // Emit target region as a standalone region.
4449  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4450      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4451   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4451, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4452}
4453
4454void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
4455    const OMPTargetTeamsDistributeParallelForDirective &S) {
4456  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4457    emitTargetTeamsDistributeParallelForRegion(CGFSAction);
4458  };
4459  emitCommonOMPTargetDirective(*thisSCodeGen);
4460}
4461
4462static void emitTargetTeamsDistributeParallelForSimdRegion(
4463    CodeGenFunction &CGF,
4464    const OMPTargetTeamsDistributeParallelForSimdDirective &S,
4465    PrePostActionTy &Action) {
4466  Action.Enter(CGF);
4467  auto &&CodeGenDistribute = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4468    CGF.EmitOMPDistributeLoop(SemitInnerParallelForWhenCombined,
4469                              S.getDistInc());
4470  };
4471
4472  // Emit teams region as a standalone region.
4473  auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4474                                                 PrePostActionTy &Action) {
4475    Action.Enter(CGF);
4476    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4477    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4478    (void)PrivateScope.Privatize();
4479    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4480        CGFOMPD_distributeCodeGenDistribute/*HasCancel=*/false);
4481    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_teams);
4482  };
4483
4484  emitCommonOMPTeamsDirective(CGFSOMPD_distribute_parallel_for_simd,
4485                              CodeGenTeams);
4486  emitPostUpdateForReductionClause(CGFS,
4487                                   [](CodeGenFunction &) { return nullptr; });
4488}
4489
4490void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
4491    CodeGenModule &CGMStringRef ParentName,
4492    const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4493  // Emit SPMD target teams distribute parallel for simd region as a standalone
4494  // region.
4495  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4496    emitTargetTeamsDistributeParallelForSimdRegion(CGFSAction);
4497  };
4498  llvm::Function *Fn;
4499  llvm::Constant *Addr;
4500  // Emit target region as a standalone region.
4501  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4502      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4503   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4503, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4504}
4505
4506void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
4507    const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4508  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4509    emitTargetTeamsDistributeParallelForSimdRegion(CGFSAction);
4510  };
4511  emitCommonOMPTargetDirective(*thisSCodeGen);
4512}
4513
4514void CodeGenFunction::EmitOMPCancellationPointDirective(
4515    const OMPCancellationPointDirective &S) {
4516  CGM.getOpenMPRuntime().emitCancellationPointCall(*thisS.getBeginLoc(),
4517                                                   S.getCancelRegion());
4518}
4519
4520void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
4521  const Expr *IfCond = nullptr;
4522  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4523    if (C->getNameModifier() == OMPD_unknown ||
4524        C->getNameModifier() == OMPD_cancel) {
4525      IfCond = C->getCondition();
4526      break;
4527    }
4528  }
4529  CGM.getOpenMPRuntime().emitCancelCall(*thisS.getBeginLoc(), IfCond,
4530                                        S.getCancelRegion());
4531}
4532
4533CodeGenFunction::JumpDest
4534CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
4535  if (Kind == OMPD_parallel || Kind == OMPD_task ||
4536      Kind == OMPD_target_parallel)
4537    return ReturnBlock;
4538  assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
4539         Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
4540         Kind == OMPD_distribute_parallel_for ||
4541         Kind == OMPD_target_parallel_for ||
4542         Kind == OMPD_teams_distribute_parallel_for ||
4543         Kind == OMPD_target_teams_distribute_parallel_for);
4544  return OMPCancelStack.getExitBlock();
4545}
4546
4547void CodeGenFunction::EmitOMPUseDevicePtrClause(
4548    const OMPClause &NCOMPPrivateScope &PrivateScope,
4549    const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
4550  const auto &C = cast<OMPUseDevicePtrClause>(NC);
4551  auto OrigVarIt = C.varlist_begin();
4552  auto InitIt = C.inits().begin();
4553  for (const Expr *PvtVarIt : C.private_copies()) {
4554    const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
4555    const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
4556    const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
4557
4558    // In order to identify the right initializer we need to match the
4559    // declaration used by the mapping logic. In some cases we may get
4560    // OMPCapturedExprDecl that refers to the original declaration.
4561    const ValueDecl *MatchingVD = OrigVD;
4562    if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
4563      // OMPCapturedExprDecl are used to privative fields of the current
4564      // structure.
4565      const auto *ME = cast<MemberExpr>(OED->getInit());
4566       (0) . __assert_fail ("isa(ME->getBase()) && \"Base should be the current struct!\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4567, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(isa<CXXThisExpr>(ME->getBase()) &&
4567 (0) . __assert_fail ("isa(ME->getBase()) && \"Base should be the current struct!\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4567, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">             "Base should be the current struct!");
4568      MatchingVD = ME->getMemberDecl();
4569    }
4570
4571    // If we don't have information about the current list item, move on to
4572    // the next one.
4573    auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
4574    if (InitAddrIt == CaptureDeviceAddrMap.end())
4575      continue;
4576
4577    bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD,
4578                                                         InitAddrIt, InitVD,
4579                                                         PvtVD]() {
4580      // Initialize the temporary initialization variable with the address we
4581      // get from the runtime library. We have to cast the source address
4582      // because it is always a void *. References are materialized in the
4583      // privatization scope, so the initialization here disregards the fact
4584      // the original variable is a reference.
4585      QualType AddrQTy =
4586          getContext().getPointerType(OrigVD->getType().getNonReferenceType());
4587      llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
4588      Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
4589      setAddrOfLocalVar(InitVD, InitAddr);
4590
4591      // Emit private declaration, it will be initialized by the value we
4592      // declaration we just added to the local declarations map.
4593      EmitDecl(*PvtVD);
4594
4595      // The initialization variables reached its purpose in the emission
4596      // of the previous declaration, so we don't need it anymore.
4597      LocalDeclMap.erase(InitVD);
4598
4599      // Return the address of the private variable.
4600      return GetAddrOfLocalVar(PvtVD);
4601    });
4602     (0) . __assert_fail ("IsRegistered && \"firstprivate var already registered as private\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4602, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IsRegistered && "firstprivate var already registered as private");
4603    // Silence the warning about unused variable.
4604    (void)IsRegistered;
4605
4606    ++OrigVarIt;
4607    ++InitIt;
4608  }
4609}
4610
4611// Generate the instructions for '#pragma omp target data' directive.
4612void CodeGenFunction::EmitOMPTargetDataDirective(
4613    const OMPTargetDataDirective &S) {
4614  CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
4615
4616  // Create a pre/post action to signal the privatization of the device pointer.
4617  // This action can be replaced by the OpenMP runtime code generation to
4618  // deactivate privatization.
4619  bool PrivatizeDevicePointers = false;
4620  class DevicePointerPrivActionTy : public PrePostActionTy {
4621    bool &PrivatizeDevicePointers;
4622
4623  public:
4624    explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
4625        : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
4626    void Enter(CodeGenFunction &CGF) override {
4627      PrivatizeDevicePointers = true;
4628    }
4629  };
4630  DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
4631
4632  auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
4633                       CodeGenFunction &CGFPrePostActionTy &Action) {
4634    auto &&InnermostCodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &) {
4635      CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4636    };
4637
4638    // Codegen that selects whether to generate the privatization code or not.
4639    auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
4640                          &InnermostCodeGen](CodeGenFunction &CGF,
4641                                             PrePostActionTy &Action) {
4642      RegionCodeGenTy RCG(InnermostCodeGen);
4643      PrivatizeDevicePointers = false;
4644
4645      // Call the pre-action to change the status of PrivatizeDevicePointers if
4646      // needed.
4647      Action.Enter(CGF);
4648
4649      if (PrivatizeDevicePointers) {
4650        OMPPrivateScope PrivateScope(CGF);
4651        // Emit all instances of the use_device_ptr clause.
4652        for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
4653          CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
4654                                        Info.CaptureDeviceAddrMap);
4655        (void)PrivateScope.Privatize();
4656        RCG(CGF);
4657      } else {
4658        RCG(CGF);
4659      }
4660    };
4661
4662    // Forward the provided action to the privatization codegen.
4663    RegionCodeGenTy PrivRCG(PrivCodeGen);
4664    PrivRCG.setAction(Action);
4665
4666    // Notwithstanding the body of the region is emitted as inlined directive,
4667    // we don't use an inline scope as changes in the references inside the
4668    // region are expected to be visible outside, so we do not privative them.
4669    OMPLexicalScope Scope(CGFS);
4670    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_target_data,
4671                                                    PrivRCG);
4672  };
4673
4674  RegionCodeGenTy RCG(CodeGen);
4675
4676  // If we don't have target devices, don't bother emitting the data mapping
4677  // code.
4678  if (CGM.getLangOpts().OMPTargetTriples.empty()) {
4679    RCG(*this);
4680    return;
4681  }
4682
4683  // Check if we have any if clause associated with the directive.
4684  const Expr *IfCond = nullptr;
4685  if (const auto *C = S.getSingleClause<OMPIfClause>())
4686    IfCond = C->getCondition();
4687
4688  // Check if we have any device clause associated with the directive.
4689  const Expr *Device = nullptr;
4690  if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4691    Device = C->getDevice();
4692
4693  // Set the action to signal privatization of device pointers.
4694  RCG.setAction(PrivAction);
4695
4696  // Emit region code.
4697  CGM.getOpenMPRuntime().emitTargetDataCalls(*thisSIfCondDeviceRCG,
4698                                             Info);
4699}
4700
4701void CodeGenFunction::EmitOMPTargetEnterDataDirective(
4702    const OMPTargetEnterDataDirective &S) {
4703  // If we don't have target devices, don't bother emitting the data mapping
4704  // code.
4705  if (CGM.getLangOpts().OMPTargetTriples.empty())
4706    return;
4707
4708  // Check if we have any if clause associated with the directive.
4709  const Expr *IfCond = nullptr;
4710  if (const auto *C = S.getSingleClause<OMPIfClause>())
4711    IfCond = C->getCondition();
4712
4713  // Check if we have any device clause associated with the directive.
4714  const Expr *Device = nullptr;
4715  if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4716    Device = C->getDevice();
4717
4718  OMPLexicalScope Scope(*thisSOMPD_task);
4719  CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*thisSIfCondDevice);
4720}
4721
4722void CodeGenFunction::EmitOMPTargetExitDataDirective(
4723    const OMPTargetExitDataDirective &S) {
4724  // If we don't have target devices, don't bother emitting the data mapping
4725  // code.
4726  if (CGM.getLangOpts().OMPTargetTriples.empty())
4727    return;
4728
4729  // Check if we have any if clause associated with the directive.
4730  const Expr *IfCond = nullptr;
4731  if (const auto *C = S.getSingleClause<OMPIfClause>())
4732    IfCond = C->getCondition();
4733
4734  // Check if we have any device clause associated with the directive.
4735  const Expr *Device = nullptr;
4736  if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4737    Device = C->getDevice();
4738
4739  OMPLexicalScope Scope(*thisSOMPD_task);
4740  CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*thisSIfCondDevice);
4741}
4742
4743static void emitTargetParallelRegion(CodeGenFunction &CGF,
4744                                     const OMPTargetParallelDirective &S,
4745                                     PrePostActionTy &Action) {
4746  // Get the captured statement associated with the 'parallel' region.
4747  const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
4748  Action.Enter(CGF);
4749  auto &&CodeGen = [&SCS](CodeGenFunction &CGFPrePostActionTy &Action) {
4750    Action.Enter(CGF);
4751    CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4752    (void)CGF.EmitOMPFirstprivateClause(SPrivateScope);
4753    CGF.EmitOMPPrivateClause(SPrivateScope);
4754    CGF.EmitOMPReductionClauseInit(SPrivateScope);
4755    (void)PrivateScope.Privatize();
4756    if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4757      CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGFS);
4758    // TODO: Add support for clauses.
4759    CGF.EmitStmt(CS->getCapturedStmt());
4760    CGF.EmitOMPReductionClauseFinal(S/*ReductionKind=*/OMPD_parallel);
4761  };
4762  emitCommonOMPParallelDirective(CGFSOMPD_parallelCodeGen,
4763                                 emitEmptyBoundParameters);
4764  emitPostUpdateForReductionClause(CGFS,
4765                                   [](CodeGenFunction &) { return nullptr; });
4766}
4767
4768void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
4769    CodeGenModule &CGMStringRef ParentName,
4770    const OMPTargetParallelDirective &S) {
4771  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4772    emitTargetParallelRegion(CGFSAction);
4773  };
4774  llvm::Function *Fn;
4775  llvm::Constant *Addr;
4776  // Emit target region as a standalone region.
4777  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4778      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4779   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4779, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4780}
4781
4782void CodeGenFunction::EmitOMPTargetParallelDirective(
4783    const OMPTargetParallelDirective &S) {
4784  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4785    emitTargetParallelRegion(CGFSAction);
4786  };
4787  emitCommonOMPTargetDirective(*thisSCodeGen);
4788}
4789
4790static void emitTargetParallelForRegion(CodeGenFunction &CGF,
4791                                        const OMPTargetParallelForDirective &S,
4792                                        PrePostActionTy &Action) {
4793  Action.Enter(CGF);
4794  // Emit directive as a combined directive that consists of two implicit
4795  // directives: 'parallel' with 'for' directive.
4796  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4797    Action.Enter(CGF);
4798    CodeGenFunction::OMPCancelStackRAII CancelRegion(
4799        CGFOMPD_target_parallel_forS.hasCancel());
4800    CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(), emitForLoopBounds,
4801                               emitDispatchForLoopBounds);
4802  };
4803  emitCommonOMPParallelDirective(CGFSOMPD_forCodeGen,
4804                                 emitEmptyBoundParameters);
4805}
4806
4807void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
4808    CodeGenModule &CGMStringRef ParentName,
4809    const OMPTargetParallelForDirective &S) {
4810  // Emit SPMD target parallel for region as a standalone region.
4811  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4812    emitTargetParallelForRegion(CGFSAction);
4813  };
4814  llvm::Function *Fn;
4815  llvm::Constant *Addr;
4816  // Emit target region as a standalone region.
4817  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4818      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4819   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4819, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4820}
4821
4822void CodeGenFunction::EmitOMPTargetParallelForDirective(
4823    const OMPTargetParallelForDirective &S) {
4824  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4825    emitTargetParallelForRegion(CGFSAction);
4826  };
4827  emitCommonOMPTargetDirective(*thisSCodeGen);
4828}
4829
4830static void
4831emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
4832                                const OMPTargetParallelForSimdDirective &S,
4833                                PrePostActionTy &Action) {
4834  Action.Enter(CGF);
4835  // Emit directive as a combined directive that consists of two implicit
4836  // directives: 'parallel' with 'for' directive.
4837  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4838    Action.Enter(CGF);
4839    CGF.EmitOMPWorksharingLoop(SS.getEnsureUpperBound(), emitForLoopBounds,
4840                               emitDispatchForLoopBounds);
4841  };
4842  emitCommonOMPParallelDirective(CGFSOMPD_simdCodeGen,
4843                                 emitEmptyBoundParameters);
4844}
4845
4846void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
4847    CodeGenModule &CGMStringRef ParentName,
4848    const OMPTargetParallelForSimdDirective &S) {
4849  // Emit SPMD target parallel for region as a standalone region.
4850  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4851    emitTargetParallelForSimdRegion(CGFSAction);
4852  };
4853  llvm::Function *Fn;
4854  llvm::Constant *Addr;
4855  // Emit target region as a standalone region.
4856  CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4857      S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4858   (0) . __assert_fail ("Fn && Addr && \"Target device function emission failed.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGStmtOpenMP.cpp", 4858, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(Fn && Addr && "Target device function emission failed.");
4859}
4860
4861void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
4862    const OMPTargetParallelForSimdDirective &S) {
4863  auto &&CodeGen = [&S](CodeGenFunction &CGFPrePostActionTy &Action) {
4864    emitTargetParallelForSimdRegion(CGFSAction);
4865  };
4866  emitCommonOMPTargetDirective(*thisSCodeGen);
4867}
4868
4869/// Emit a helper variable and return corresponding lvalue.
4870static void mapParam(CodeGenFunction &CGFconst DeclRefExpr *Helper,
4871                     const ImplicitParamDecl *PVD,
4872                     CodeGenFunction::OMPPrivateScope &Privates) {
4873  const auto *VDecl = cast<VarDecl>(Helper->getDecl());
4874  Privates.addPrivate(VDecl,
4875                      [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
4876}
4877
4878void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
4879  assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
4880  // Emit outlined function for task construct.
4881  const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
4882  Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4883  QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4884  const Expr *IfCond = nullptr;
4885  for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4886    if (C->getNameModifier() == OMPD_unknown ||
4887        C->getNameModifier() == OMPD_taskloop) {
4888      IfCond = C->getCondition();
4889      break;
4890    }
4891  }
4892
4893  OMPTaskDataTy Data;
4894  // Check if taskloop must be emitted without taskgroup.
4895  Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
4896  // TODO: Check if we should emit tied or untied task.
4897  Data.Tied = true;
4898  // Set scheduling for taskloop
4899  if (const autoClause = S.getSingleClause<OMPGrainsizeClause>()) {
4900    // grainsize clause
4901    Data.Schedule.setInt(/*IntVal=*/false);
4902    Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
4903  } else if (const autoClause = S.getSingleClause<OMPNumTasksClause>()) {
4904    // num_tasks clause
4905    Data.Schedule.setInt(/*IntVal=*/true);
4906    Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
4907  }
4908
4909  auto &&BodyGen = [CS, &S](CodeGenFunction &CGFPrePostActionTy &) {
4910    // if (PreCond) {
4911    //   for (IV in 0..LastIteration) BODY;
4912    //   <Final counter/linear vars updates>;
4913    // }
4914    //
4915
4916    // Emit: if (PreCond) - begin.
4917    // If the condition constant folds and can be elided, avoid emitting the
4918    // whole loop.
4919    bool CondConstant;
4920    llvm::BasicBlock *ContBlock = nullptr;
4921    OMPLoopScope PreInitScope(CGFS);
4922    if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
4923      if (!CondConstant)
4924        return;
4925    } else {
4926      llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
4927      ContBlock = CGF.createBasicBlock("taskloop.if.end");
4928      emitPreCond(CGFSS.getPreCond(), ThenBlockContBlock,
4929                  CGF.getProfileCount(&S));
4930      CGF.EmitBlock(ThenBlock);
4931      CGF.incrementProfileCounter(&S);
4932    }
4933
4934    if (isOpenMPSimdDirective(S.getDirectiveKind()))
4935      CGF.EmitOMPSimdInit(S);
4936
4937    OMPPrivateScope LoopScope(CGF);
4938    // Emit helper vars inits.
4939    enum { LowerBound = 5UpperBoundStrideLastIter };
4940    auto *I = CS->getCapturedDecl()->param_begin();
4941    auto *LBP = std::next(ILowerBound);
4942    auto *UBP = std::next(IUpperBound);
4943    auto *STP = std::next(IStride);
4944    auto *LIP = std::next(ILastIter);
4945    mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
4946             LoopScope);
4947    mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
4948             LoopScope);
4949    mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STPLoopScope);
4950    mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
4951             LoopScope);
4952    CGF.EmitOMPPrivateLoopCounters(SLoopScope);
4953    bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(SLoopScope);
4954    (void)LoopScope.Privatize();
4955    // Emit the loop iteration variable.
4956    const Expr *IVExpr = S.getIterationVariable();
4957    const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
4958    CGF.EmitVarDecl(*IVDecl);
4959    CGF.EmitIgnoredExpr(S.getInit());
4960
4961    // Emit the iterations count variable.
4962    // If it is not a variable, Sema decided to calculate iterations count on
4963    // each iteration (e.g., it is foldable into a constant).
4964    if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
4965      CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
4966      // Emit calculation of the iterations count.
4967      CGF.EmitIgnoredExpr(S.getCalcLastIteration());
4968    }
4969
4970    CGF.EmitOMPInnerLoop(SLoopScope.requiresCleanups(), S.getCond(),
4971                         S.getInc(),
4972                         [&S](CodeGenFunction &CGF) {
4973                           CGF.EmitOMPLoopBody(SJumpDest());
4974                           CGF.EmitStopPoint(&S);
4975                         },
4976                         [](CodeGenFunction &) {});
4977    // Emit: if (PreCond) - end.
4978    if (ContBlock) {
4979      CGF.EmitBranch(ContBlock);
4980      CGF.EmitBlock(ContBlocktrue);
4981    }
4982    // Emit final copy of the lastprivate variables if IsLastIter != 0.
4983    if (HasLastprivateClause) {
4984      CGF.EmitOMPLastprivateClauseFinal(
4985          SisOpenMPSimdDirective(S.getDirectiveKind()),
4986          CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
4987              CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
4988              (*LIP)->getType(), S.getBeginLoc())));
4989    }
4990  };
4991  auto &&TaskGen = [&SSharedsTyCapturedStruct,
4992                    IfCond](CodeGenFunction &CGFllvm::Function *OutlinedFn,
4993                            const OMPTaskDataTy &Data) {
4994    auto &&CodeGen = [&SOutlinedFnSharedsTyCapturedStructIfCond,
4995                      &Data](CodeGenFunction &CGFPrePostActionTy &) {
4996      OMPLoopScope PreInitScope(CGFS);
4997      CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGFS.getBeginLoc(), S,
4998                                                  OutlinedFnSharedsTy,
4999                                                  CapturedStructIfCondData);
5000    };
5001    CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGFOMPD_taskloop,
5002                                                    CodeGen);
5003  };
5004  if (Data.Nogroup) {
5005    EmitOMPTaskBasedDirective(SOMPD_taskloopBodyGenTaskGenData);
5006  } else {
5007    CGM.getOpenMPRuntime().emitTaskgroupRegion(
5008        *this,
5009        [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
5010                                        PrePostActionTy &Action) {
5011          Action.Enter(CGF);
5012          CGF.EmitOMPTaskBasedDirective(SOMPD_taskloopBodyGenTaskGen,
5013                                        Data);
5014        },
5015        S.getBeginLoc());
5016  }
5017}
5018
5019void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
5020  EmitOMPTaskLoopBasedDirective(S);
5021}
5022
5023void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
5024    const OMPTaskLoopSimdDirective &S) {
5025  EmitOMPTaskLoopBasedDirective(S);
5026}
5027
5028// Generate the instructions for '#pragma omp target update' directive.
5029void CodeGenFunction::EmitOMPTargetUpdateDirective(
5030    const OMPTargetUpdateDirective &S) {
5031  // If we don't have target devices, don't bother emitting the data mapping
5032  // code.
5033  if (CGM.getLangOpts().OMPTargetTriples.empty())
5034    return;
5035
5036  // Check if we have any if clause associated with the directive.
5037  const Expr *IfCond = nullptr;
5038  if (const auto *C = S.getSingleClause<OMPIfClause>())
5039    IfCond = C->getCondition();
5040
5041  // Check if we have any device clause associated with the directive.
5042  const Expr *Device = nullptr;
5043  if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5044    Device = C->getDevice();
5045
5046  OMPLexicalScope Scope(*thisSOMPD_task);
5047  CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*thisSIfCondDevice);
5048}
5049
5050void CodeGenFunction::EmitSimpleOMPExecutableDirective(
5051    const OMPExecutableDirective &D) {
5052  if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
5053    return;
5054  auto &&CodeGen = [&D](CodeGenFunction &CGFPrePostActionTy &Action) {
5055    if (isOpenMPSimdDirective(D.getDirectiveKind())) {
5056      emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
5057    } else {
5058      OMPPrivateScope LoopGlobals(CGF);
5059      if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
5060        for (const Expr *E : LD->counters()) {
5061          const auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5062          if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
5063            LValue GlobLVal = CGF.EmitLValue(E);
5064            LoopGlobals.addPrivate(
5065                VD, [&GlobLVal]() { return GlobLVal.getAddress(); });
5066          }
5067          if (isa<OMPCapturedExprDecl>(VD)) {
5068            // Emit only those that were not explicitly referenced in clauses.
5069            if (!CGF.LocalDeclMap.count(VD))
5070              CGF.EmitVarDecl(*VD);
5071          }
5072        }
5073        for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
5074          if (!C->getNumForLoops())
5075            continue;
5076          for (unsigned I = LD->getCollapsedNumber(),
5077                        E = C->getLoopNumIterations().size();
5078               I < E; ++I) {
5079            if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
5080                    cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
5081              // Emit only those that were not explicitly referenced in clauses.
5082              if (!CGF.LocalDeclMap.count(VD))
5083                CGF.EmitVarDecl(*VD);
5084            }
5085          }
5086        }
5087      }
5088      LoopGlobals.Privatize();
5089      CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
5090    }
5091  };
5092  OMPSimdLexicalScope Scope(*thisD);
5093  CGM.getOpenMPRuntime().emitInlinedDirective(
5094      *this,
5095      isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
5096                                                  : D.getDirectiveKind(),
5097      CodeGen);
5098}
5099
clang::CodeGen::CodeGenFunction::EmitOMPSharedLValue
clang::CodeGen::CodeGenFunction::getTypeSize
clang::CodeGen::CodeGenFunction::GenerateOpenMPCapturedVars
clang::CodeGen::CodeGenFunction::GenerateOpenMPCapturedStmtFunction
clang::CodeGen::CodeGenFunction::EmitOMPAggregateAssign
clang::CodeGen::CodeGenFunction::EmitOMPCopy
clang::CodeGen::CodeGenFunction::EmitOMPFirstprivateClause
clang::CodeGen::CodeGenFunction::EmitOMPPrivateClause
clang::CodeGen::CodeGenFunction::EmitOMPCopyinClause
clang::CodeGen::CodeGenFunction::EmitOMPLastprivateClauseInit
clang::CodeGen::CodeGenFunction::EmitOMPLastprivateClauseFinal
clang::CodeGen::CodeGenFunction::EmitOMPReductionClauseInit
clang::CodeGen::CodeGenFunction::EmitOMPReductionClauseFinal
clang::CodeGen::CodeGenFunction::EmitOMPParallelDirective
clang::CodeGen::CodeGenFunction::EmitOMPLoopBody
clang::CodeGen::CodeGenFunction::EmitOMPInnerLoop
clang::CodeGen::CodeGenFunction::EmitOMPLinearClauseInit
clang::CodeGen::CodeGenFunction::EmitOMPLinearClauseFinal
clang::CodeGen::CodeGenFunction::EmitOMPPrivateLoopCounters
clang::CodeGen::CodeGenFunction::EmitOMPLinearClause
clang::CodeGen::CodeGenFunction::EmitOMPSimdInit
clang::CodeGen::CodeGenFunction::EmitOMPSimdFinal
clang::CodeGen::CodeGenFunction::EmitOMPSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPOuterLoop
clang::CodeGen::CodeGenFunction::EmitOMPForOuterLoop
clang::CodeGen::CodeGenFunction::EmitOMPDistributeOuterLoop
clang::CodeGen::CodeGenFunction::EmitOMPDistributeParallelForDirective
clang::CodeGen::CodeGenFunction::EmitOMPDistributeParallelForSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPDistributeSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetSimdDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPWorksharingLoop
clang::CodeGen::CodeGenFunction::EmitOMPForDirective
clang::CodeGen::CodeGenFunction::EmitOMPForSimdDirective
clang::CodeGen::CodeGenFunction::EmitSections
clang::CodeGen::CodeGenFunction::EmitOMPSectionsDirective
clang::CodeGen::CodeGenFunction::EmitOMPSectionDirective
clang::CodeGen::CodeGenFunction::EmitOMPSingleDirective
clang::CodeGen::CodeGenFunction::EmitOMPMasterDirective
clang::CodeGen::CodeGenFunction::EmitOMPCriticalDirective
clang::CodeGen::CodeGenFunction::EmitOMPParallelForDirective
clang::CodeGen::CodeGenFunction::EmitOMPParallelForSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPParallelSectionsDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskBasedDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTaskBasedDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskyieldDirective
clang::CodeGen::CodeGenFunction::EmitOMPBarrierDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskwaitDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskgroupDirective
clang::CodeGen::CodeGenFunction::EmitOMPFlushDirective
clang::CodeGen::CodeGenFunction::EmitOMPDistributeLoop
clang::CodeGen::CodeGenFunction::EmitOMPDistributeDirective
clang::CodeGen::CodeGenFunction::EmitOMPOrderedDirective
clang::CodeGen::CodeGenFunction::emitOMPSimpleStore
clang::CodeGen::CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr
clang::CodeGen::CodeGenFunction::EmitOMPAtomicDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetDirective
clang::CodeGen::CodeGenFunction::EmitOMPTeamsDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTeamsDistributeDirective
clang::CodeGen::CodeGenFunction::EmitOMPTeamsDistributeSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective
clang::CodeGen::CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPCancellationPointDirective
clang::CodeGen::CodeGenFunction::EmitOMPCancelDirective
clang::CodeGen::CodeGenFunction::getOMPCancelDestination
clang::CodeGen::CodeGenFunction::EmitOMPUseDevicePtrClause
clang::CodeGen::CodeGenFunction::EmitOMPTargetDataDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetEnterDataDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetExitDataDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelForDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelForDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction
clang::CodeGen::CodeGenFunction::EmitOMPTargetParallelForSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskLoopBasedDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskLoopDirective
clang::CodeGen::CodeGenFunction::EmitOMPTaskLoopSimdDirective
clang::CodeGen::CodeGenFunction::EmitOMPTargetUpdateDirective
clang::CodeGen::CodeGenFunction::EmitSimpleOMPExecutableDirective