1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Interfaces for registering analysis passes, producing common pass manager
11 /// configurations, and parsing of pass pipelines.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_PASSES_PASSBUILDER_H
16 #define LLVM_PASSES_PASSBUILDER_H
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/Analysis/CGSCCPassManager.h"
20 #include "llvm/IR/PassManager.h"
21 #include "llvm/Support/Error.h"
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/Transforms/Scalar/LoopPassManager.h"
30 class ModuleSummaryIndex;
32 /// A struct capturing PGO tunables.
34 enum PGOAction { NoAction, IRInstr, IRUse, SampleUse };
35 enum CSPGOAction { NoCSAction, CSIRInstr, CSIRUse };
36 PGOOptions(std::string ProfileFile = "", std::string CSProfileGenFile = "",
37 std::string ProfileRemappingFile = "", PGOAction Action = NoAction,
38 CSPGOAction CSAction = NoCSAction, bool SamplePGOSupport = false)
39 : ProfileFile(ProfileFile), CSProfileGenFile(CSProfileGenFile),
40 ProfileRemappingFile(ProfileRemappingFile), Action(Action),
42 SamplePGOSupport(SamplePGOSupport || Action == SampleUse) {
43 // Note, we do allow ProfileFile.empty() for Action=IRUse LTO can
44 // callback with IRUse action without ProfileFile.
46 // If there is a CSAction, PGOAction cannot be IRInstr or SampleUse.
47 assert(this->CSAction == NoCSAction ||
48 (this->Action != IRInstr && this->Action != SampleUse));
50 // For CSIRInstr, CSProfileGenFile also needs to be nonempty.
51 assert(this->CSAction != CSIRInstr || !this->CSProfileGenFile.empty());
53 // If CSAction is CSIRUse, PGOAction needs to be IRUse as they share
55 assert(this->CSAction != CSIRUse || this->Action == IRUse);
57 // If neither CSAction nor CSAction, SamplePGOSupport needs to be true.
58 assert(this->Action != NoAction || this->CSAction != NoCSAction ||
59 this->SamplePGOSupport);
61 std::string ProfileFile;
62 std::string CSProfileGenFile;
63 std::string ProfileRemappingFile;
66 bool SamplePGOSupport;
69 /// Tunable parameters for passes in the default pipelines.
70 class PipelineTuningOptions {
72 /// Constructor sets pipeline tuning defaults based on cl::opts. Each option
73 /// can be set in the PassBuilder when using a LLVM as a library.
74 PipelineTuningOptions();
76 /// Tuning option to set loop interleaving on/off. Its default value is that
77 /// of the flag: `-interleave-loops`.
78 bool LoopInterleaving;
80 /// Tuning option to enable/disable loop vectorization. Its default value is
81 /// that of the flag: `-vectorize-loops`.
82 bool LoopVectorization;
84 /// Tuning option to enable/disable slp loop vectorization. Its default value
85 /// is that of the flag: `vectorize-slp`.
86 bool SLPVectorization;
88 /// Tuning option to cap the number of calls to retrive clobbering accesses in
89 /// MemorySSA, in LICM.
90 unsigned LicmMssaOptCap;
92 /// Tuning option to disable promotion to scalars in LICM with MemorySSA, if
93 /// the number of access is too large.
94 unsigned LicmMssaNoAccForPromotionCap;
97 /// This class provides access to building LLVM's passes.
99 /// Its members provide the baseline state available to passes during their
100 /// construction. The \c PassRegistry.def file specifies how to construct all
101 /// of the built-in passes, and those may reference these members during
105 PipelineTuningOptions PTO;
106 Optional<PGOOptions> PGOOpt;
107 PassInstrumentationCallbacks *PIC;
110 /// A struct to capture parsed pass pipeline names.
112 /// A pipeline is defined as a series of names, each of which may in itself
113 /// recursively contain a nested pipeline. A name is either the name of a pass
114 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
115 /// name is the name of a pass, the InnerPipeline is empty, since passes
116 /// cannot contain inner pipelines. See parsePassPipeline() for a more
117 /// detailed description of the textual pipeline format.
118 struct PipelineElement {
120 std::vector<PipelineElement> InnerPipeline;
125 /// This enumerates the LLVM ThinLTO optimization phases.
126 enum class ThinLTOPhase {
127 /// No ThinLTO behavior needed.
129 /// ThinLTO prelink (summary) phase.
131 /// ThinLTO postlink (backend compile) phase.
135 /// LLVM-provided high-level optimization levels.
137 /// This enumerates the LLVM-provided high-level optimization levels. Each
138 /// level has a specific goal and rationale.
139 enum OptimizationLevel {
140 /// Disable as many optimizations as possible. This doesn't completely
141 /// disable the optimizer in all cases, for example always_inline functions
142 /// can be required to be inlined for correctness.
145 /// Optimize quickly without destroying debuggability.
147 /// FIXME: The current and historical behavior of this level does *not*
148 /// agree with this goal, but we would like to move toward this goal in the
151 /// This level is tuned to produce a result from the optimizer as quickly
152 /// as possible and to avoid destroying debuggability. This tends to result
153 /// in a very good development mode where the compiled code will be
154 /// immediately executed as part of testing. As a consequence, where
155 /// possible, we would like to produce efficient-to-execute code, but not
156 /// if it significantly slows down compilation or would prevent even basic
157 /// debugging of the resulting binary.
159 /// As an example, complex loop transformations such as versioning,
160 /// vectorization, or fusion might not make sense here due to the degree to
161 /// which the executed code would differ from the source code, and the
162 /// potential compile time cost.
165 /// Optimize for fast execution as much as possible without triggering
166 /// significant incremental compile time or code size growth.
168 /// The key idea is that optimizations at this level should "pay for
169 /// themselves". So if an optimization increases compile time by 5% or
170 /// increases code size by 5% for a particular benchmark, that benchmark
171 /// should also be one which sees a 5% runtime improvement. If the compile
172 /// time or code size penalties happen on average across a diverse range of
173 /// LLVM users' benchmarks, then the improvements should as well.
175 /// And no matter what, the compile time needs to not grow superlinearly
176 /// with the size of input to LLVM so that users can control the runtime of
177 /// the optimizer in this mode.
179 /// This is expected to be a good default optimization level for the vast
180 /// majority of users.
183 /// Optimize for fast execution as much as possible.
185 /// This mode is significantly more aggressive in trading off compile time
186 /// and code size to get execution time improvements. The core idea is that
187 /// this mode should include any optimization that helps execution time on
188 /// balance across a diverse collection of benchmarks, even if it increases
189 /// code size or compile time for some benchmarks without corresponding
190 /// improvements to execution time.
192 /// Despite being willing to trade more compile time off to get improved
193 /// execution time, this mode still tries to avoid superlinear growth in
194 /// order to make even significantly slower compile times at least scale
195 /// reasonably. This does not preclude very substantial constant factor
199 /// Similar to \c O2 but tries to optimize for small code size instead of
200 /// fast execution without triggering significant incremental execution
203 /// The logic here is exactly the same as \c O2, but with code size and
204 /// execution time metrics swapped.
206 /// A consequence of the different core goal is that this should in general
207 /// produce substantially smaller executables that still run in
208 /// a reasonable amount of time.
211 /// A very specialized mode that will optimize for code size at any and all
214 /// This is useful primarily when there are absolute size limitations and
215 /// any effort taken to reduce the size is worth it regardless of the
216 /// execution time impact. You should expect this level to produce rather
217 /// slow, but very small, code.
221 explicit PassBuilder(TargetMachine *TM = nullptr,
222 PipelineTuningOptions PTO = PipelineTuningOptions(),
223 Optional<PGOOptions> PGOOpt = None,
224 PassInstrumentationCallbacks *PIC = nullptr)
225 : TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC) {}
227 /// Cross register the analysis managers through their proxies.
229 /// This is an interface that can be used to cross register each
230 /// AnalysisManager with all the others analysis managers.
231 void crossRegisterProxies(LoopAnalysisManager &LAM,
232 FunctionAnalysisManager &FAM,
233 CGSCCAnalysisManager &CGAM,
234 ModuleAnalysisManager &MAM);
236 /// Registers all available module analysis passes.
238 /// This is an interface that can be used to populate a \c
239 /// ModuleAnalysisManager with all registered module analyses. Callers can
240 /// still manually register any additional analyses. Callers can also
241 /// pre-register analyses and this will not override those.
242 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
244 /// Registers all available CGSCC analysis passes.
246 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
247 /// with all registered CGSCC analyses. Callers can still manually register any
248 /// additional analyses. Callers can also pre-register analyses and this will
249 /// not override those.
250 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
252 /// Registers all available function analysis passes.
254 /// This is an interface that can be used to populate a \c
255 /// FunctionAnalysisManager with all registered function analyses. Callers can
256 /// still manually register any additional analyses. Callers can also
257 /// pre-register analyses and this will not override those.
258 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
260 /// Registers all available loop analysis passes.
262 /// This is an interface that can be used to populate a \c LoopAnalysisManager
263 /// with all registered loop analyses. Callers can still manually register any
264 /// additional analyses.
265 void registerLoopAnalyses(LoopAnalysisManager &LAM);
267 /// Construct the core LLVM function canonicalization and simplification
270 /// This is a long pipeline and uses most of the per-function optimization
271 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
272 /// repeatedly over the IR and is not expected to destroy important
273 /// information about the semantics of the IR.
275 /// Note that \p Level cannot be `O0` here. The pipelines produced are
276 /// only intended for use when attempting to optimize code. If frontends
277 /// require some transformations for semantic reasons, they should explicitly
280 /// \p Phase indicates the current ThinLTO phase.
282 buildFunctionSimplificationPipeline(OptimizationLevel Level,
284 bool DebugLogging = false);
286 /// Construct the core LLVM module canonicalization and simplification
289 /// This pipeline focuses on canonicalizing and simplifying the entire module
290 /// of IR. Much like the function simplification pipeline above, it is
291 /// suitable to run repeatedly over the IR and is not expected to destroy
292 /// important information. It does, however, perform inlining and other
293 /// heuristic based simplifications that are not strictly reversible.
295 /// Note that \p Level cannot be `O0` here. The pipelines produced are
296 /// only intended for use when attempting to optimize code. If frontends
297 /// require some transformations for semantic reasons, they should explicitly
300 /// \p Phase indicates the current ThinLTO phase.
302 buildModuleSimplificationPipeline(OptimizationLevel Level,
304 bool DebugLogging = false);
306 /// Construct the core LLVM module optimization pipeline.
308 /// This pipeline focuses on optimizing the execution speed of the IR. It
309 /// uses cost modeling and thresholds to balance code growth against runtime
310 /// improvements. It includes vectorization and other information destroying
311 /// transformations. It also cannot generally be run repeatedly on a module
312 /// without potentially seriously regressing either runtime performance of
313 /// the code or serious code size growth.
315 /// Note that \p Level cannot be `O0` here. The pipelines produced are
316 /// only intended for use when attempting to optimize code. If frontends
317 /// require some transformations for semantic reasons, they should explicitly
319 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
320 bool DebugLogging = false,
321 bool LTOPreLink = false);
323 /// Build a per-module default optimization pipeline.
325 /// This provides a good default optimization pipeline for per-module
326 /// optimization and code generation without any link-time optimization. It
327 /// typically correspond to frontend "-O[123]" options for optimization
328 /// levels \c O1, \c O2 and \c O3 resp.
330 /// Note that \p Level cannot be `O0` here. The pipelines produced are
331 /// only intended for use when attempting to optimize code. If frontends
332 /// require some transformations for semantic reasons, they should explicitly
334 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
335 bool DebugLogging = false,
336 bool LTOPreLink = false);
338 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
341 /// This adds the pre-link optimizations tuned to prepare a module for
342 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
343 /// without making irreversible decisions which could be made better during
346 /// Note that \p Level cannot be `O0` here. The pipelines produced are
347 /// only intended for use when attempting to optimize code. If frontends
348 /// require some transformations for semantic reasons, they should explicitly
351 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
352 bool DebugLogging = false);
354 /// Build an ThinLTO default optimization pipeline to a pass manager.
356 /// This provides a good default optimization pipeline for link-time
357 /// optimization and code generation. It is particularly tuned to fit well
358 /// when IR coming into the LTO phase was first run through \c
359 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
361 /// Note that \p Level cannot be `O0` here. The pipelines produced are
362 /// only intended for use when attempting to optimize code. If frontends
363 /// require some transformations for semantic reasons, they should explicitly
366 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
367 const ModuleSummaryIndex *ImportSummary);
369 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
372 /// This adds the pre-link optimizations tuned to work well with a later LTO
373 /// run. It works to minimize the IR which needs to be analyzed without
374 /// making irreversible decisions which could be made better during the LTO
377 /// Note that \p Level cannot be `O0` here. The pipelines produced are
378 /// only intended for use when attempting to optimize code. If frontends
379 /// require some transformations for semantic reasons, they should explicitly
381 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
382 bool DebugLogging = false);
384 /// Build an LTO default optimization pipeline to a pass manager.
386 /// This provides a good default optimization pipeline for link-time
387 /// optimization and code generation. It is particularly tuned to fit well
388 /// when IR coming into the LTO phase was first run through \c
389 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
391 /// Note that \p Level cannot be `O0` here. The pipelines produced are
392 /// only intended for use when attempting to optimize code. If frontends
393 /// require some transformations for semantic reasons, they should explicitly
395 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
397 ModuleSummaryIndex *ExportSummary);
399 /// Build the default `AAManager` with the default alias analysis pipeline
401 AAManager buildDefaultAAPipeline();
403 /// Parse a textual pass pipeline description into a \c
404 /// ModulePassManager.
406 /// The format of the textual pass pipeline description looks something like:
408 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
410 /// Pass managers have ()s describing the nest structure of passes. All passes
411 /// are comma separated. As a special shortcut, if the very first pass is not
412 /// a module pass (as a module pass manager is), this will automatically form
413 /// the shortest stack of pass managers that allow inserting that first pass.
414 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
415 /// passes 'lpassN', all of these are valid:
417 /// fpass1,fpass2,fpass3
418 /// cgpass1,cgpass2,cgpass3
419 /// lpass1,lpass2,lpass3
421 /// And they are equivalent to the following (resp.):
423 /// module(function(fpass1,fpass2,fpass3))
424 /// module(cgscc(cgpass1,cgpass2,cgpass3))
425 /// module(function(loop(lpass1,lpass2,lpass3)))
427 /// This shortcut is especially useful for debugging and testing small pass
428 /// combinations. Note that these shortcuts don't introduce any other magic.
429 /// If the sequence of passes aren't all the exact same kind of pass, it will
430 /// be an error. You cannot mix different levels implicitly, you must
431 /// explicitly form a pass manager in which to nest passes.
432 Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
433 bool VerifyEachPass = true,
434 bool DebugLogging = false);
436 /// {{@ Parse a textual pass pipeline description into a specific PassManager
438 /// Automatic deduction of an appropriate pass manager stack is not supported.
439 /// For example, to insert a loop pass 'lpass' into a FunctionPassManager,
440 /// this is the valid pipeline text:
443 Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
444 bool VerifyEachPass = true,
445 bool DebugLogging = false);
446 Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
447 bool VerifyEachPass = true,
448 bool DebugLogging = false);
449 Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
450 bool VerifyEachPass = true,
451 bool DebugLogging = false);
454 /// Parse a textual alias analysis pipeline into the provided AA manager.
456 /// The format of the textual AA pipeline is a comma separated list of AA
459 /// basic-aa,globals-aa,...
461 /// The AA manager is set up such that the provided alias analyses are tried
462 /// in the order specified. See the \c AAManaager documentation for details
463 /// about the logic used. This routine just provides the textual mapping
464 /// between AA names and the analyses to register with the manager.
466 /// Returns false if the text cannot be parsed cleanly. The specific state of
467 /// the \p AA manager is unspecified if such an error is encountered and this
469 Error parseAAPipeline(AAManager &AA, StringRef PipelineText);
471 /// Register a callback for a default optimizer pipeline extension
474 /// This extension point allows adding passes that perform peephole
475 /// optimizations similar to the instruction combiner. These passes will be
476 /// inserted after each instance of the instruction combiner pass.
477 void registerPeepholeEPCallback(
478 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
479 PeepholeEPCallbacks.push_back(C);
482 /// Register a callback for a default optimizer pipeline extension
485 /// This extension point allows adding late loop canonicalization and
486 /// simplification passes. This is the last point in the loop optimization
487 /// pipeline before loop deletion. Each pass added
488 /// here must be an instance of LoopPass.
489 /// This is the place to add passes that can remove loops, such as target-
490 /// specific loop idiom recognition.
491 void registerLateLoopOptimizationsEPCallback(
492 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
493 LateLoopOptimizationsEPCallbacks.push_back(C);
496 /// Register a callback for a default optimizer pipeline extension
499 /// This extension point allows adding loop passes to the end of the loop
501 void registerLoopOptimizerEndEPCallback(
502 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
503 LoopOptimizerEndEPCallbacks.push_back(C);
506 /// Register a callback for a default optimizer pipeline extension
509 /// This extension point allows adding optimization passes after most of the
510 /// main optimizations, but before the last cleanup-ish optimizations.
511 void registerScalarOptimizerLateEPCallback(
512 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
513 ScalarOptimizerLateEPCallbacks.push_back(C);
516 /// Register a callback for a default optimizer pipeline extension
519 /// This extension point allows adding CallGraphSCC passes at the end of the
520 /// main CallGraphSCC passes and before any function simplification passes run
521 /// by CGPassManager.
522 void registerCGSCCOptimizerLateEPCallback(
523 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
524 CGSCCOptimizerLateEPCallbacks.push_back(C);
527 /// Register a callback for a default optimizer pipeline extension
530 /// This extension point allows adding optimization passes before the
531 /// vectorizer and other highly target specific optimization passes are
533 void registerVectorizerStartEPCallback(
534 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
535 VectorizerStartEPCallbacks.push_back(C);
538 /// Register a callback for a default optimizer pipeline extension point.
540 /// This extension point allows adding optimization once at the start of the
541 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
542 /// link-time pipelines).
543 void registerPipelineStartEPCallback(
544 const std::function<void(ModulePassManager &)> &C) {
545 PipelineStartEPCallbacks.push_back(C);
548 /// Register a callback for a default optimizer pipeline extension point
550 /// This extension point allows adding optimizations at the very end of the
551 /// function optimization pipeline. A key difference between this and the
552 /// legacy PassManager's OptimizerLast callback is that this extension point
553 /// is not triggered at O0. Extensions to the O0 pipeline should append their
554 /// passes to the end of the overall pipeline.
555 void registerOptimizerLastEPCallback(
556 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
557 OptimizerLastEPCallbacks.push_back(C);
560 /// Register a callback for parsing an AliasAnalysis Name to populate
561 /// the given AAManager \p AA
562 void registerParseAACallback(
563 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
564 AAParsingCallbacks.push_back(C);
567 /// {{@ Register callbacks for analysis registration with this PassBuilder
569 /// Callees register their analyses with the given AnalysisManager objects.
570 void registerAnalysisRegistrationCallback(
571 const std::function<void(CGSCCAnalysisManager &)> &C) {
572 CGSCCAnalysisRegistrationCallbacks.push_back(C);
574 void registerAnalysisRegistrationCallback(
575 const std::function<void(FunctionAnalysisManager &)> &C) {
576 FunctionAnalysisRegistrationCallbacks.push_back(C);
578 void registerAnalysisRegistrationCallback(
579 const std::function<void(LoopAnalysisManager &)> &C) {
580 LoopAnalysisRegistrationCallbacks.push_back(C);
582 void registerAnalysisRegistrationCallback(
583 const std::function<void(ModuleAnalysisManager &)> &C) {
584 ModuleAnalysisRegistrationCallbacks.push_back(C);
588 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
589 /// Using these callbacks, callers can parse both a single pass name, as well
590 /// as entire sub-pipelines, and populate the PassManager instance
592 void registerPipelineParsingCallback(
593 const std::function<bool(StringRef Name, CGSCCPassManager &,
594 ArrayRef<PipelineElement>)> &C) {
595 CGSCCPipelineParsingCallbacks.push_back(C);
597 void registerPipelineParsingCallback(
598 const std::function<bool(StringRef Name, FunctionPassManager &,
599 ArrayRef<PipelineElement>)> &C) {
600 FunctionPipelineParsingCallbacks.push_back(C);
602 void registerPipelineParsingCallback(
603 const std::function<bool(StringRef Name, LoopPassManager &,
604 ArrayRef<PipelineElement>)> &C) {
605 LoopPipelineParsingCallbacks.push_back(C);
607 void registerPipelineParsingCallback(
608 const std::function<bool(StringRef Name, ModulePassManager &,
609 ArrayRef<PipelineElement>)> &C) {
610 ModulePipelineParsingCallbacks.push_back(C);
614 /// Register a callback for a top-level pipeline entry.
616 /// If the PassManager type is not given at the top level of the pipeline
617 /// text, this Callback should be used to determine the appropriate stack of
618 /// PassManagers and populate the passed ModulePassManager.
619 void registerParseTopLevelPipelineCallback(
620 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
621 bool VerifyEachPass, bool DebugLogging)> &C) {
622 TopLevelPipelineParsingCallbacks.push_back(C);
626 static Optional<std::vector<PipelineElement>>
627 parsePipelineText(StringRef Text);
629 Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
630 bool VerifyEachPass, bool DebugLogging);
631 Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
632 bool VerifyEachPass, bool DebugLogging);
633 Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
634 bool VerifyEachPass, bool DebugLogging);
635 Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
636 bool VerifyEachPass, bool DebugLogging);
637 bool parseAAPassName(AAManager &AA, StringRef Name);
639 Error parseLoopPassPipeline(LoopPassManager &LPM,
640 ArrayRef<PipelineElement> Pipeline,
641 bool VerifyEachPass, bool DebugLogging);
642 Error parseFunctionPassPipeline(FunctionPassManager &FPM,
643 ArrayRef<PipelineElement> Pipeline,
644 bool VerifyEachPass, bool DebugLogging);
645 Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
646 ArrayRef<PipelineElement> Pipeline,
647 bool VerifyEachPass, bool DebugLogging);
648 Error parseModulePassPipeline(ModulePassManager &MPM,
649 ArrayRef<PipelineElement> Pipeline,
650 bool VerifyEachPass, bool DebugLogging);
652 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
653 OptimizationLevel Level, bool RunProfileGen, bool IsCS,
654 std::string ProfileFile,
655 std::string ProfileRemappingFile);
657 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
659 // Extension Point callbacks
660 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
662 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
663 LateLoopOptimizationsEPCallbacks;
664 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
665 LoopOptimizerEndEPCallbacks;
666 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
667 ScalarOptimizerLateEPCallbacks;
668 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
669 CGSCCOptimizerLateEPCallbacks;
670 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
671 VectorizerStartEPCallbacks;
672 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
673 OptimizerLastEPCallbacks;
675 SmallVector<std::function<void(ModulePassManager &)>, 2>
676 PipelineStartEPCallbacks;
677 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
678 ModuleAnalysisRegistrationCallbacks;
679 SmallVector<std::function<bool(StringRef, ModulePassManager &,
680 ArrayRef<PipelineElement>)>,
682 ModulePipelineParsingCallbacks;
683 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
684 bool VerifyEachPass, bool DebugLogging)>,
686 TopLevelPipelineParsingCallbacks;
688 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
689 CGSCCAnalysisRegistrationCallbacks;
690 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
691 ArrayRef<PipelineElement>)>,
693 CGSCCPipelineParsingCallbacks;
694 // Function callbacks
695 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
696 FunctionAnalysisRegistrationCallbacks;
697 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
698 ArrayRef<PipelineElement>)>,
700 FunctionPipelineParsingCallbacks;
702 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
703 LoopAnalysisRegistrationCallbacks;
704 SmallVector<std::function<bool(StringRef, LoopPassManager &,
705 ArrayRef<PipelineElement>)>,
707 LoopPipelineParsingCallbacks;
709 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
713 /// This utility template takes care of adding require<> and invalidate<>
714 /// passes for an analysis to a given \c PassManager. It is intended to be used
715 /// during parsing of a pass pipeline when parsing a single PipelineName.
716 /// When registering a new function analysis FancyAnalysis with the pass
717 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
720 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
721 /// ArrayRef<PipelineElement> P) {
722 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
727 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
728 typename... ExtraArgTs>
729 bool parseAnalysisUtilityPasses(
730 StringRef AnalysisName, StringRef PipelineName,
731 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
732 if (!PipelineName.endswith(">"))
734 // See if this is an invalidate<> pass name
735 if (PipelineName.startswith("invalidate<")) {
736 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
737 if (PipelineName != AnalysisName)
739 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
743 // See if this is a require<> pass name
744 if (PipelineName.startswith("require<")) {
745 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
746 if (PipelineName != AnalysisName)
748 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,